repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
bfollinprm/Nquintessence
cosmoslik/cosmoslik_plugins/likelihoods/wmap/wmap.py
1
2594
from numpy import zeros from cosmoslik import Likelihood, SubprocessExtension import os class wmap(Likelihood): """ =============== WMAP Likelihood =============== - Written by WMAP team (see `<http://lambda.gsfc.nasa.gov/>`_) - CosmoSlik module by Marius Millea - Updated July 1, 2012 Description =========== This module wraps the official WMAP likelihood code. Some minor modifications were made to allow: - Choosing the WMAP data directory at runtime - Choosing the lmin/lmax at runtime Install Notes ============= This build this module run:: ./cosmoslik.py --build likelihoods.wmap The Makefile for this module reads the following flags from ``Makefile.inc``: - ``$(CFITSIO)`` - ``$(LAPACK)`` - ``$(F2PYFLAGS)`` Models ====== The WMAP module requires a `Model` which provides the following: - ``cl_TT`` - ``cl_TE`` - ``cl_EE`` - ``cl_BB`` Extra-galactic foregrounds are ignored. Parameters ========== This module reads the following parameters from the ini file: [wmap].data_dir --------------- The path to the wmap/data directory. [wmap].use ---------- A subset of ``['TT','TE','EE','BB']`` corresponding to which likelihood terms to use. [wmap].TT.lrange ---------------- The TT range in ell to use in the likelihood [wmap].TE.lrange ---------------- The TE range in ell to use in the likelihood """ def __init__(self, datadir, use=['TT','TE','EE','BB'], ttmin=2,ttmax=1200, temin=2,temax=800): self.use = use if not os.path.exists(datadir): raise Exception("The WMAP data directory you specified does not exist: '%s'"%datadir) self.pywmap = SubprocessExtension('pywmap',globals()) self.pywmap.wmapinit(ttmin,ttmax,temin,temax,os.path.normpath(datadir)+'/') def __call__(self, cmb): cltt, clte, clee, clbb = [zeros(1202) for _ in range(4)] for cl,x in zip([cltt,clte,clee,clbb],['TT','TE','EE','BB']): if x in self.use: m = cmb['cl_%s'%x] s = slice(0,min(len(m),len(cl))) cl[s] = m[s] liketerms = self.pywmap.wmaplnlike(cltt=cltt[2:],clte=clte[2:],clee=clee[2:],clbb=clbb[2:]) return sum(liketerms)
mit
-6,767,131,946,119,144,000
24.683168
125
0.515806
false
3.786861
false
false
false
pbanaszkiewicz/amy
amy/extrequests/filters.py
1
7324
import re from django.db.models import Q from django.forms import widgets import django_filters from extrequests.models import SelfOrganisedSubmission, WorkshopInquiryRequest from workshops.fields import Select2Widget from workshops.filters import ( AllCountriesFilter, AMYFilterSet, ContinentFilter, ForeignKeyAllValuesFilter, NamesOrderingFilter, StateFilterSet, ) from workshops.models import Curriculum, Person, TrainingRequest, WorkshopRequest # ------------------------------------------------------------ # TrainingRequest related filter and filter methods # ------------------------------------------------------------ class TrainingRequestFilter(AMYFilterSet): search = django_filters.CharFilter( label="Name or Email", method="filter_by_person", ) group_name = django_filters.CharFilter( field_name="group_name", lookup_expr="icontains", label="Group" ) state = django_filters.ChoiceFilter( label="State", choices=(("no_d", "Pending or accepted"),) + TrainingRequest.STATE_CHOICES, method="filter_training_requests_by_state", ) matched = django_filters.ChoiceFilter( label="Is Matched?", choices=( ("", "Unknown"), ("u", "Unmatched"), ("p", "Matched trainee, unmatched training"), ("t", "Matched trainee and training"), ), method="filter_matched", ) nonnull_manual_score = django_filters.BooleanFilter( label="Manual score applied", method="filter_non_null_manual_score", widget=widgets.CheckboxInput, ) affiliation = django_filters.CharFilter( method="filter_affiliation", ) location = django_filters.CharFilter(lookup_expr="icontains") order_by = NamesOrderingFilter( fields=( "created_at", "score_total", ), ) class Meta: model = TrainingRequest fields = [ "search", "group_name", "state", "matched", "affiliation", "location", ] def filter_matched(self, queryset, name, choice): if choice == "": return queryset elif choice == "u": # unmatched return queryset.filter(person=None) elif choice == "p": # matched trainee, unmatched training return ( queryset.filter(person__isnull=False) .exclude( person__task__role__name="learner", person__task__event__tags__name="TTT", ) .distinct() ) else: # choice == 't' <==> matched trainee and training return queryset.filter( person__task__role__name="learner", person__task__event__tags__name="TTT", ).distinct() def filter_by_person(self, queryset, name, value): if value == "": return queryset else: # 'Harry Potter' -> ['Harry', 'Potter'] tokens = re.split(r"\s+", value) # Each token must match email address or github username or # personal, or family name. for token in tokens: queryset = queryset.filter( Q(personal__icontains=token) | Q(middle__icontains=token) | Q(family__icontains=token) | Q(email__icontains=token) | Q(person__personal__icontains=token) | Q(person__middle__icontains=token) | Q(person__family__icontains=token) | Q(person__email__icontains=token) ) return queryset def filter_affiliation(self, queryset, name, affiliation): if affiliation == "": return queryset else: q = Q(affiliation__icontains=affiliation) | Q( person__affiliation__icontains=affiliation ) return queryset.filter(q).distinct() def filter_training_requests_by_state(self, queryset, name, choice): if choice == "no_d": return queryset.exclude(state="d") else: return queryset.filter(state=choice) def filter_non_null_manual_score(self, queryset, name, manual_score): if manual_score: return queryset.filter(score_manual__isnull=False) return queryset # ------------------------------------------------------------ # WorkshopRequest related filter and filter methods # ------------------------------------------------------------ class WorkshopRequestFilter(AMYFilterSet, StateFilterSet): assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget) country = AllCountriesFilter(widget=Select2Widget) continent = ContinentFilter(widget=Select2Widget, label="Continent") requested_workshop_types = django_filters.ModelMultipleChoiceFilter( label="Requested workshop types", queryset=Curriculum.objects.all(), widget=widgets.CheckboxSelectMultiple(), ) order_by = django_filters.OrderingFilter( fields=("created_at",), ) class Meta: model = WorkshopRequest fields = [ "state", "assigned_to", "requested_workshop_types", "country", ] # ------------------------------------------------------------ # WorkshopInquiryRequest related filter and filter methods # ------------------------------------------------------------ class WorkshopInquiryFilter(AMYFilterSet, StateFilterSet): assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget) country = AllCountriesFilter(widget=Select2Widget) continent = ContinentFilter(widget=Select2Widget, label="Continent") requested_workshop_types = django_filters.ModelMultipleChoiceFilter( label="Requested workshop types", queryset=Curriculum.objects.all(), widget=widgets.CheckboxSelectMultiple(), ) order_by = django_filters.OrderingFilter( fields=("created_at",), ) class Meta: model = WorkshopInquiryRequest fields = [ "state", "assigned_to", "requested_workshop_types", "country", ] # ------------------------------------------------------------ # SelfOrganisedSubmission related filter and filter methods # ------------------------------------------------------------ class SelfOrganisedSubmissionFilter(AMYFilterSet, StateFilterSet): assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget) country = AllCountriesFilter(widget=Select2Widget) continent = ContinentFilter(widget=Select2Widget, label="Continent") workshop_types = django_filters.ModelMultipleChoiceFilter( label="Requested workshop types", queryset=Curriculum.objects.all(), widget=widgets.CheckboxSelectMultiple(), ) order_by = django_filters.OrderingFilter( fields=("created_at",), ) class Meta: model = SelfOrganisedSubmission fields = [ "state", "assigned_to", "workshop_types", "workshop_format", ]
mit
-316,208,959,969,467,600
31.264317
83
0.560213
false
4.5775
false
false
false
jmcanterafonseca/fiware-cygnus
test/acceptance/tools/ckan_utils.py
1
14123
# -*- coding: utf-8 -*- # # Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U # # This file is part of fiware-cygnus (FI-WARE project). # # fiware-cygnus is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any # later version. # fiware-cygnus is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with fiware-cygnus. If not, see # http://www.gnu.org/licenses/. # # For those usages not covered by the GNU Affero General Public License please contact: # iot_support at tid.es # __author__ = 'Iván Arias León (ivan.ariasleon at telefonica dot com)' # general constants from tools import general_utils, http_utils EMPTY = u'' WITHOUT = u'without' # url, headers and payload constants HEADER_AUTHORIZATION = u'authorization' HEADER_CONTENT_TYPE = u'Content-Type' HEADER_APPLICATION = u'application/json' VERSION = u'ckan_version' VERSION_VALUE_DEFAULT = u'2.0' HOST = u'host' HOST_VALUE_DEFAULT = u'127.0.0.1' PORT = u'port' PORT_VALUE_DEFAULT = u'80' AUTHORIZATION = u'authorization' VERIFY_VERSION = u'verify_version' FALSE_VALUE = u'false' ORION_URL = u'orion_url' ORION_URL_DEFAULT = u'http://localhost:1026' SSL = u'ssl' RETRIES_DATASET_SEARCH = u'retries_dataset_search' DELAY_TO_RETRY = u'delay_to_retry' PATH_VERSION_CKAN = u'api/util/status' PATH_API_CREATE = u'api/3/action' PATH_PACKAGE_SHOW = u'package_show?id=' PATH_DSTORE_SEARCH_SQL = u'datastore_search_sql?sql=' ORGANIZATION_LIST = u'organization_list' ORGANIZATION_CREATE = u'organization_create' PACKAGE_CREATE = u'package_create' RESOURCE_CREATE = u'resource_create' DATASTORE_CREATE = u'datastore_create' PACKAGE_SHOW = u'package_show' DATASTORE_SEARCH_SQL = u'datastore_search_sql' RESULT = u'result' RECORDS = u'records' NAME = u'name' OWNER_ORG = u'owner_org' ID = u'id' TYPE = u'type' RESOURCES = u'resources' URL_EXAMPLE = u'http://foo.bar/newresource' URL = u'url' PACKAGE_ID = u'package_id' RESOURCE_ID = u'resource_id' FIELD = u'fields' FORCE = u'force' RECVTIME = u'recvTime' TIMESTAMP = u'timestamp' TRUE = u'true' class Ckan: def __init__(self, **kwargs): """ constructor :param ckan_version: ckan version (OPTIONAL) :param ckan_verify_version: determine whether the version is verified or not (True or False). (OPTIONAL) :param authorization: API KEY (authorization) used in ckan requests (OPTIONAL) :param host: ckan host (MANDATORY) :param port: ckan port (MANDATORY) :param orion_url: Orion URL used to compose the resource URL with the convenience operation URL to query it (OPTIONAL) :param ssl: enable SSL for secure Http transportation; 'true' or 'false' (OPTIONAL) :param capacity: capacity of the channel (OPTIONAL) :param channel_transaction_capacity: amount of bytes that can be sent per transaction (OPTIONAL) :param retries_number: number of retries when get values (OPTIONAL) :param delay_to_retry: time to delay each retry (OPTIONAL) endpoint_url: endpoint url used in ckan requests """ self.version = kwargs.get(VERSION, VERSION_VALUE_DEFAULT) self.ckan_verify_version = kwargs.get(VERIFY_VERSION, FALSE_VALUE) self.authorization = kwargs.get(AUTHORIZATION, EMPTY) self.host = kwargs.get(HOST, HOST_VALUE_DEFAULT) self.port = kwargs.get(PORT, PORT_VALUE_DEFAULT) self.orion_url = kwargs.get(ORION_URL, ORION_URL_DEFAULT) self.ssl = kwargs.get(SSL, FALSE_VALUE) self.capacity = kwargs.get("capacity", "1000") self.transaction_capacity= kwargs.get("transaction_capacity", "100") self.retries_number = kwargs.get(RETRIES_DATASET_SEARCH, 15) self.retry_delay = kwargs.get(DELAY_TO_RETRY, 10) if self.ssl.lower() == "true": self.endpoint = "https://" if self.ssl.lower() == "false": self.endpoint = "http://" self.endpoint = self.endpoint + self.host+":"+self.port def __create_url(self, operation, element=EMPTY): """ create the url for different operations :param operation: operation type (dataset, etc) :return: request url """ if operation == VERSION: value = "%s/%s" % (self.endpoint, PATH_VERSION_CKAN) if operation == ORGANIZATION_CREATE or operation == PACKAGE_CREATE or operation == RESOURCE_CREATE or operation == DATASTORE_CREATE or operation == ORGANIZATION_LIST: value = "%s/%s/%s" % (self.endpoint, PATH_API_CREATE, operation) # organization Name if operation == PACKAGE_SHOW: value = "%s/%s/%s%s" % (self.endpoint, PATH_API_CREATE, PATH_PACKAGE_SHOW, element) # datasetName if operation == DATASTORE_SEARCH_SQL: value = "%s/%s/%s%s" % (self.endpoint, PATH_API_CREATE, PATH_DSTORE_SEARCH_SQL, element) # sql return value def __create_headers(self): """ create headers for different requests :return header dict """ return {HEADER_AUTHORIZATION: self.authorization, HEADER_CONTENT_TYPE: HEADER_APPLICATION} def __create_datastore_in_resource (self, resource_id, fields): """ create a datastore in a resource :param resource_id: resource id :param fields: field in datastore """ payload = general_utils.convert_dict_to_str({RESOURCE_ID: resource_id, FIELD:fields, FORCE: TRUE}, general_utils.JSON) resp = http_utils.request(http_utils.POST, url=self.__create_url(DATASTORE_CREATE), headers=self.__create_headers(), data=payload) http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - Creating datastore in resource id: %s" % (resource_id)) # ------------------------------ public methods ---------------------------------------- def verify_version (self): """ Verify if ckan is installed and that version is the expected, default version is 2.0 """ if self.ckan_verify_version.lower() == "true": resp= http_utils.request(http_utils.GET, url=self.__create_url(VERSION), headers=self.__create_headers()) body_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON) assert self.version == str(body_dict[VERSION]), \ "Wrong ckan version verified: %s. Expected: %s. \n\nBody content: %s" % (str(body_dict[VERSION]), str(self.version), str(resp.text)) return True def verify_if_organization_exist(self, name): """ Verify if the organization exist :param name: organization name :return: return True if de organization does not exist, False if it does exist """ resp = http_utils.request(http_utils.GET, url=self.__create_url(ORGANIZATION_LIST, name), headers=self.__create_headers()) http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - list of the names of the site's organizations...") body_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON) for i in range(len(body_dict[RESULT])): if body_dict[RESULT][i] == name: return True return False def create_organization (self, name): """ Create a new organization if it does not exist :param name: organization name """ self.organization = name if not(self.verify_if_organization_exist(name)): payload = general_utils.convert_dict_to_str({NAME: name}, general_utils.JSON) resp= http_utils.request(http_utils.POST, url=self.__create_url(ORGANIZATION_CREATE), headers=self.__create_headers(), data=payload) http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating organization: %s ..." % (name)) return True return False def get_organization (self): """ get organization name :return: organization name """ return self.organization def verify_if_dataset_exist(self, name): """ Verify if the dataset exist :param name: dataset name :return: return True if de dataset does not exist, False if it does exist """ resp = http_utils.request(http_utils.GET, url=self.__create_url(PACKAGE_SHOW, name), headers=self.__create_headers()) if resp.status_code == http_utils.status_codes[http_utils.OK]: bodyDict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON) self.dataset_id = bodyDict[RESULT][ID] return self.dataset_id return False def create_dataset (self, name): """ Create a new dataset if it does not exist :param name: dataset name """ self.dataset = name if not(self.verify_if_dataset_exist( name)): payload = general_utils.convert_dict_to_str({NAME: self.dataset, OWNER_ORG: self.organization}, general_utils.JSON) resp= http_utils.request(http_utils.POST, url=self.__create_url(PACKAGE_CREATE), headers=self.__create_headers(), data=payload) http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating dataset: %s ..." % (name)) bodyDict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON) self.dataset_id = bodyDict[RESULT][ID] return bodyDict[RESULT][ID] return False def get_dataset (self): """ get dataset name and dataset id :return: dataset name and dataset id """ return self.dataset, self.dataset_id def verify_if_resource_exist(self, name, dataset_name): """ Verify if the resource exist in a dataset :param name: resource name :param dataset_id: :return: return True if de resource does not exist, False if it does exist """ resp = http_utils.request(http_utils.GET, url=self.__create_url(PACKAGE_SHOW, dataset_name), headers=self.__create_headers()) if resp.status_code == http_utils.status_codes[http_utils.OK]: body_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON) for i in range(len(body_dict[RESULT][RESOURCES])): if body_dict[RESULT][RESOURCES][i][NAME] == name: self.resource_id = body_dict[RESULT][RESOURCES][i][ID] return self.resource_id return False def generate_field_datastore_to_resource (self, attributes_number, attributes_name, attribute_type, metadata_type): """ generate fields to datastore request :return: fields list """ field = [] field.append({ID:RECVTIME, TYPE: TIMESTAMP}) for i in range(0, int(attributes_number)): if attribute_type != WITHOUT: field.append({ID:attributes_name+"_"+str(i), TYPE: attribute_type}) if metadata_type != WITHOUT:field.append({ID:attributes_name+"_"+str(i)+"_md", TYPE: metadata_type}) return field def create_resource(self, name, dataset_name, fields=[]): self.resource = name if not(self.verify_if_resource_exist(name, dataset_name)): payload = general_utils.convert_dict_to_str({NAME: self.resource, URL: URL_EXAMPLE, PACKAGE_ID: self.dataset_id}, general_utils.JSON) resp= http_utils.request(http_utils.POST, url=self.__create_url(RESOURCE_CREATE), headers=self.__create_headers(), data=payload) http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating resource: %s ..." % (name)) bodyDict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON) self.resource_id = bodyDict[RESULT][ID] self.__create_datastore_in_resource (self.resource_id, fields) return self.resource_id return False def get_resource (self): """ get resource name and resource id :return: resource name and resource id """ return self.resource, self.resource_id def datastore_search_last_sql (self, rows, resource_name, dataset_name): """ get last record in a resource :param name: resource name :param dataset_name: dataset name :return: record dict """ resource_id = self.verify_if_resource_exist(resource_name, dataset_name) if resource_id != False: sql = 'SELECT * from "' + resource_id + '" ORDER BY 1 DESC LIMIT '+str (rows) resp= http_utils.request(http_utils.POST, url=self.__create_url(DATASTORE_SEARCH_SQL, sql), headers=self.__create_headers(), data=EMPTY) http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating resource: %s ..." % (resource_name)) return resp return resource_id
agpl-3.0
-1,284,196,080,977,096,200
46.864407
174
0.610552
false
3.881253
false
false
false
alphagov/notifications-delivery
tests/clients/test_aws_ses.py
1
1062
from moto import mock_ses from notifications_delivery.clients.email.aws_ses import (AwsSesClient, AwsSesClientException) @mock_ses def test_send_email(ses_client): aws_ses_client = AwsSesClient(region='eu-west-1') source = "[email protected]" to_address = "[email protected]" subject = "Email subject" body = "Email body" # All source email addresses have to be verified before you # can send on behalf of them. ses_client.verify_email_identity(EmailAddress=source) message_id = aws_ses_client.send_email(source, to_address, subject, body) assert message_id @mock_ses def test_send_email_not_verified(ses_client): aws_ses_client = AwsSesClient(region='eu-west-1') source = "[email protected]" to_address = "[email protected]" subject = "Email subject" body = "Email body" try: message_id = aws_ses_client.send_email(source, to_address, subject, body) except AwsSesClientException as e: assert 'Did not have authority to send from email [email protected]' in str(e)
mit
3,366,847,243,641,234,400
35.62069
94
0.704331
false
3.267692
false
false
false
cocrawler/cocrawler
cocrawler/fetcher.py
1
10249
''' async fetching of urls. Assumes robots checks have already been done. Success returns response object and response bytes (which were already read in order to shake out all potential network-related exceptions.) Failure returns enough details for the caller to do something smart: 503, other 5xx, DNS fail, connect timeout, error between connect and full response, proxy failure. Plus an errorstring good enough for logging. ''' import time import traceback from collections import namedtuple import ssl import urllib import asyncio import logging import aiohttp from . import stats from . import config from . import content from .urls import URL LOGGER = logging.getLogger(__name__) # these errors get printed deep in aiohttp but they also bubble up aiohttp_errors = { 'SSL handshake failed', 'SSL error errno:1 reason: CERTIFICATE_VERIFY_FAILED', 'SSL handshake failed on verifying the certificate', 'Fatal error on transport TCPTransport', 'Fatal error on SSL transport', 'SSL error errno:1 reason: UNKNOWN_PROTOCOL', 'Future exception was never retrieved', 'Unclosed connection', 'SSL error errno:1 reason: TLSV1_UNRECOGNIZED_NAME', 'SSL error errno:1 reason: SSLV3_ALERT_HANDSHAKE_FAILURE', 'SSL error errno:1 reason: TLSV1_ALERT_INTERNAL_ERROR', } class AsyncioSSLFilter(logging.Filter): def filter(self, record): stats.stats_sum('filter examined a {} {} log line'.format(record.name, record.levelname), 1) if record.name == 'asyncio' and record.levelname == 'ERROR': msg = record.getMessage() for ae in aiohttp_errors: if msg.startswith(ae): stats.stats_sum('filter suppressed a asyncio ERROR log line', 1) return False return True def establish_filters(): f = AsyncioSSLFilter() logging.getLogger('asyncio').addFilter(f) # XXX should be a policy plugin # XXX cookie handling -- can be per-get -- make per-domain jar def apply_url_policies(url, crawler): headers = {} headers['User-Agent'] = crawler.ua if crawler.prevent_compression: headers['Accept-Encoding'] = 'identity' else: headers['Accept-Encoding'] = content.get_accept_encoding() if crawler.upgrade_insecure_requests: headers['Upgrade-Insecure-Requests'] = '1' proxy, prefetch_dns = global_policies() get_kwargs = {'headers': headers, 'proxy': proxy} return prefetch_dns, get_kwargs def global_policies(): proxy = config.read('Fetcher', 'ProxyAll') prefetch_dns = not proxy or config.read('GeoIP', 'ProxyGeoIP') return proxy, prefetch_dns FetcherResponse = namedtuple('FetcherResponse', ['response', 'body_bytes', 'ip', 'req_headers', 't_first_byte', 't_last_byte', 'is_truncated', 'last_exception']) async def fetch(url, session, allow_redirects=None, max_redirects=None, stats_prefix='', max_page_size=-1, get_kwargs={}): last_exception = None is_truncated = False response = None try: t0 = time.time() last_exception = None body_bytes = b'' blocks = [] left = max_page_size ip = None with stats.coroutine_state(stats_prefix+'fetcher fetching'): with stats.record_latency(stats_prefix+'fetcher fetching', url=url.url): response = await session.get(url.url, allow_redirects=allow_redirects, max_redirects=max_redirects, **get_kwargs) t_first_byte = '{:.3f}'.format(time.time() - t0) if 'proxy' not in get_kwargs and response.connection: # this is racy, often the connection is already None unless the crawler is busy addr = response.connection.transport.get_extra_info('peername') if addr: stats.stats_sum(stats_prefix+'fetch ip from connection', 1) ip = [addr[0]] # ipv4 or ipv6 while left > 0: # reading stream directly to dodge decompression and limit size. # this means that aiohttp tracing on_response_chunk_receive doesn't work block = await response.content.read(left) if not block: body_bytes = b''.join(blocks) break blocks.append(block) left -= len(block) else: body_bytes = b''.join(blocks) if not response.content.at_eof(): stats.stats_sum(stats_prefix+'fetch truncated length', 1) response.close() # this does interrupt the network transfer is_truncated = 'length' t_last_byte = '{:.3f}'.format(time.time() - t0) except asyncio.TimeoutError: stats.stats_sum(stats_prefix+'fetch timeout', 1) last_exception = 'TimeoutError' body_bytes = b''.join(blocks) if len(body_bytes): # these body_bytes are currently dropped because last_exception is set is_truncated = 'time' stats.stats_sum(stats_prefix+'fetch timeout body bytes found', 1) stats.stats_sum(stats_prefix+'fetch timeout body bytes found bytes', len(body_bytes)) except (aiohttp.ClientError) as e: # ClientError is a catchall for a bunch of things # e.g. DNS errors, '400' errors for http parser errors # ClientConnectorCertificateError for an SSL cert that doesn't match hostname # ClientConnectorSSLError see https://bugs.python.org/issue27970 for python not handling missing intermediate certs # ClientConnectorError(None, None) caused by robots redir to DNS fail # ServerDisconnectedError(None,) caused by servers that return 0 bytes for robots.txt fetches # TooManyRedirects("0, message=''",) caused by too many robots.txt redirs stats.stats_sum(stats_prefix+'fetch ClientError', 1) detailed_name = str(type(e).__name__) last_exception = 'ClientError: ' + detailed_name + ': ' + str(e) body_bytes = b''.join(blocks) if len(body_bytes): # these body_bytes are currently dropped because last_exception is set is_truncated = 'disconnect' stats.stats_sum(stats_prefix+'fetch ClientError body bytes found', 1) stats.stats_sum(stats_prefix+'fetch ClientError body bytes found bytes', len(body_bytes)) except ssl.CertificateError as e: # many ssl errors raise and have tracebacks printed deep in python, fixed in 3.8 stats.stats_sum(stats_prefix+'fetch SSL CertificateError', 1) last_exception = 'CertificateError: ' + str(e) except ValueError as e: # no A records found -- raised by our dns code # aiohttp raises: # ValueError Location: https:/// 'Host could not be detected' -- robots fetch # ValueError Location: http:// /URL should be absolute/ -- robots fetch # ValueError 'Can redirect only to http or https' -- robots fetch -- looked OK to curl! stats.stats_sum(stats_prefix+'fetch other error - ValueError', 1) last_exception = 'ValueErorr: ' + str(e) except AttributeError as e: stats.stats_sum(stats_prefix+'fetch other error - AttributeError', 1) last_exception = 'AttributeError: ' + str(e) except RuntimeError as e: stats.stats_sum(stats_prefix+'fetch other error - RuntimeError', 1) last_exception = 'RuntimeError: ' + str(e) except asyncio.CancelledError: raise except Exception as e: last_exception = 'Exception: ' + str(e) stats.stats_sum(stats_prefix+'fetch surprising error', 1) LOGGER.info('Saw surprising exception in fetcher working on %s:\n%s', url.url, last_exception) traceback.print_exc() # if redirs are allowed the url must be set to the final url if response and str(response.url) != url.url: if allow_redirects: url = URL(str(response.url)) else: # TODO: this fires for quoting: {{%20data.src%20}} comes out %7B%7B%20data.src%20%7D%7D LOGGER.error('Surprised that I fetched %s and got %s', url.url, str(response.url)) if last_exception is not None: if body_bytes: LOGGER.info('we failed working on %s, the last exception is %s, dropped %d body bytes', url.url, last_exception, len(body_bytes)) else: LOGGER.info('we failed working on %s, the last exception is %s', url.url, last_exception) return FetcherResponse(None, None, None, None, None, None, False, last_exception) fr = FetcherResponse(response, body_bytes, ip, response.request_info.headers, t_first_byte, t_last_byte, is_truncated, None) if response.status >= 500: LOGGER.debug('server returned http status %d', response.status) stats.stats_sum(stats_prefix+'fetch bytes', len(body_bytes) + len(response.raw_headers)) stats.stats_sum(stats_prefix+'fetch URLs', 1) stats.stats_sum(stats_prefix+'fetch http code=' + str(response.status), 1) # checks after fetch: # hsts header? # if ssl, check strict-transport-security header, remember max-age=foo part., other stuff like includeSubDomains # did we receive cookies? was the security bit set? return fr def upgrade_scheme(url): ''' Upgrade crawled scheme to https, if reasonable. This helps to reduce MITM attacks against the crawler. https://chromium.googlesource.com/chromium/src/net/+/master/http/transport_security_state_static.json Alternately, the return headers from a site might have strict-transport-security set ... a bit more dangerous as we'd have to respect the timeout to avoid permanently learning something that's broken TODO: use HTTPSEverwhere? would have to have a fallback if https failed, which it occasionally will ''' return url
apache-2.0
-2,570,987,281,301,282,000
40.662602
141
0.630891
false
4.181559
false
false
false
lukaszkoczwara/presence-analyzer-lkoczwara
src/presence_analyzer/helpers.py
1
1777
# -*- coding: utf-8 -*- """ Helper functions used in views. """ from json import dumps from functools import wraps from flask import Response def jsonify(function): """ Creates a response with the JSON representation of wrapped function result. """ @wraps(function) def inner(*args, **kwargs): return Response(dumps(function(*args, **kwargs)), mimetype='application/json') return inner def group_by_weekday(items): """ Groups presence entries by weekday. """ result = {i: [] for i in range(7)} for date in items: start = items[date]['start'] end = items[date]['end'] result[date.weekday()].append(interval(start, end)) return result def seconds_since_midnight(time): """ Calculates amount of seconds since midnight. """ return time.hour * 3600 + time.minute * 60 + time.second def interval(start, end): """ Calculates inverval in seconds between two datetime.time objects. """ return seconds_since_midnight(end) - seconds_since_midnight(start) def mean(items): """ Calculates arithmetic mean. Returns zero for empty lists. """ return float(sum(items)) / len(items) if len(items) > 0 else 0 def group_start_end_times_by_weekday(items): """ Groups start and end times in sec. by weekday. """ result = {i: {'start': [], 'end': []} for i in range(7)} for date, start_end in items.iteritems(): start = start_end['start'] end = start_end['end'] result[date.weekday()]['start'].append(seconds_since_midnight(start)) result[date.weekday()]['end'].append(seconds_since_midnight(end)) return result
mit
-492,233,173,606,348,740
24.924242
79
0.601013
false
3.993258
false
false
false
CompSci17/Survey-System
survey_system_files/results.py
1
11810
from .models import Answers, RadioResults, SelectResults, ImportanceOrderResults, CheckboxResults from chartit import DataPool, Chart class Results( ): def render_results( self, questions, survey ): """ Sorts out logic behind how we present our answers. @param questions QuerySet Questions we're working with @param survey Object The survey we're rendering results for @return Returns a tuple of answers to be utilised in the view. Text/Textarea are of the form: ( input_type, list_of_answers, survey_object ) Every other input is of the form: ( input_type, chart_object, survey_object ) """ # A list to hold our output tuples output = [] for question in questions: # For every question in the QuerySet, we're going to check and process # it dependent on input type if question.input_type == 'text': # get question's results results = self.get_results( question ) combined_results = [] for result in results: # For every answer we have, put it inside a list combined_results.append( str( result.text ) ) # Add our input type, list and primary key to our output list output.append( ( "text", combined_results, question.pk ) ) elif question.input_type == 'textarea': # get question's results results = self.get_results( question ) combined_results = [] for result in results: # For every answer we have, put it inside a list combined_results.append( str( result.text ) ) # Add our input type, list and primary key to our output list output.append( ( "textarea", combined_results, question.pk ) ) elif question.input_type == 'radio': # Get all the options offered by the question options = self.get_choices( question.choices ) # Dictionary for counting the occurrences of a selection counter = {} # Get our question's results answers = self.get_results( question ) for option in options: # For every option, add it to our dictionary; starting with 0 counter.update( { option.strip().replace( ",", "" ) : 0 } ) for answer in answers: # For every answer, increment the answer in the dictionary counter[ str( answer.text ).strip().replace( ",", "" ) ] += 1 for option in options: # Check if the count for this question already exists existence_check = RadioResults.objects.filter( survey__exact = survey, question__exact = question, answer__exact = option.strip().replace( ",", "" ) ) if existence_check.exists( ): # If it exists, pass in the primary key result = RadioResults( pk = existence_check[0].pk, survey = survey, question = question, answer = option.strip().replace( ",", "" ), answer_count = counter[ str( option ).strip().replace( ",", "" ) ] ) else: # If it doesn't exist, leave out the primary key result = RadioResults( survey = survey, question = question, answer = option.strip().replace( ",", "" ), answer_count = counter[ str( option ).strip().replace( ",", "" ) ] ) # Save our set of results result.save() # Get our chart object for the list piechart = self.radio_pie_chart( question ) # Add our input type, chart object and primary key to our output list output.append( ( "radio", piechart, question.pk ) ) elif question.input_type == 'select': # Get all the options offered by the question options = self.get_choices( question.choices ) # Dictionary for counting the occurrences of a selection counter = {} # Get our question's results answers = self.get_results( question ) for option in options: # For every option, add it to our dictionary; starting with 0 counter.update( { option.strip().replace( ",", "" ) : 0 } ) for answer in answers: # For every answer, increment the answer in the dictionary counter[ str( answer.text ).strip().replace( ",", "" ) ] += 1 for option in options: # Check if the count for this question already exists existence_check = SelectResults.objects.filter( survey__exact = survey, question__exact = question, answer__exact = option.strip().replace( ",", "" ) ) if existence_check.exists( ): # If it exists, pass in the primary key result = SelectResults( pk = existence_check[0].pk, survey = survey, question = question, answer = option.strip().replace( ",", "" ), answer_count = counter[ str( option ).strip().replace( ",", "" ) ] ) else: # If it doesn't exist, leave out the primary key result = SelectResults( survey = survey, question = question, answer = option.strip().replace( ",", "" ), answer_count = counter[ str( option ).strip().replace( ",", "" ) ] ) # Save our set of results result.save() # Get our chart object for the list piechart = self.select_pie_chart( question ) # Add our input type, chart object and primary key to our output list output.append( ( "select", piechart, question.pk ) ) elif question.input_type == 'checkbox': # Get all the question's answers answers = self.get_results( question ) # We'll use this to keep track of the answer count counter = {} # Get all the question's options/choices options = self.get_choices( question.choices ) for option in options: # initialise each option in the counter with 0 counter.update( { option.strip() : 0 } ) for answer in answers: # Get a list of all the answers delimited_answers = answer.text.split( "," ) for indiv_answer in delimited_answers: # For every answer, increment it in the counter counter[ indiv_answer.strip() ] += 1 for option in counter: # Check if the question already has a count going in the database existence_check = CheckboxResults.objects.filter( survey__exact = survey, question__exact = question, answer__exact = option.strip() ) if existence_check.exists(): # If it exists, just update it result = CheckboxResults( pk = existence_check[0].pk, survey = survey, question = question, answer = option, answer_count = counter[ option.strip() ] ) else: # If it doesn't exist, create it result = CheckboxResults( survey = survey, question = question, answer = option, answer_count = counter[ option.strip() ] ) # Save the result in the model result.save() # Create new bar chart bar_chart = self.checkbox_bar_chart( question ) # Append the checkbox details to the returned output output.append( ( "checkbox", bar_chart, question.pk ) ) elif question.input_type == 'order': # Get all the question's options options = self.get_choices( question.choices ) # Get the number of options number_of_options = len( options ) # We'll use this to keep track of the answer count counter = {} for integer_counter in range( 1, number_of_options + 1 ): # Initialise dict using integers with their own dictionaries counter.update( { integer_counter: { } } ) for option in options: # For every option, initialise the above integer's dicts with the option's counter at 0 counter[ integer_counter ].update( { str( option ).strip().replace( ",", "" ) : 0 } ) # Get the question's answers answers = self.get_results( question ) for answer in answers: # For every answer, split it at every comma split_answers = answer.text.split( "," ) for i, result in enumerate( split_answers ): # Increment the choice's counter by 1 counter[ i + 1 ][ result.strip().replace( ",", "" ) ] += 1 for position in counter: for option in counter[ position ]: existence_check = ImportanceOrderResults.objects.filter( survey__exact = survey, question__exact = question, answer__exact = option.strip().replace( ",", "" ), answer_position__exact = position ) if existence_check.exists(): result = ImportanceOrderResults( pk = existence_check[0].pk, survey = survey, question = question, answer = option.strip().replace( ",", "" ), answer_position = position, answer_count = counter[ position ][ str( option ).strip().replace( ",", "" ) ] ) else: result = ImportanceOrderResults( survey = survey, question = question, answer = option.strip().replace( ",", "" ), answer_position = position, answer_count = counter[ position ][ str( option ).strip().replace( ",", "" ) ] ) result.save() output.append( ( "order_of_importance", counter, str( question.pk ) ) ) return output def get_choices( self, choices ): """ Get all the chocies/options for a question, delimiting them by comma. @param choices String String of choices from the question model @return A list of choices/options """ CHOICES=[] # Delimit our choices choices_delimited = choices.split( ',' ) for choice in choices_delimited: # For every choice, append the value to a list CHOICES.append( str( choice ) ) # Return a list of choices/options return CHOICES def get_results( self, question ): """ Get all the answers for a question @return QuerySet with all the answers for a question """ answers = Answers.objects.filter( question__exact = question ) return answers def radio_pie_chart( request, question ): """ @return Piechart object for radio results """ ds = DataPool( series= [{'options': { 'source': RadioResults.objects.filter( question__exact = question )}, 'terms': [ 'answer', 'answer_count']} ]) chart = Chart( datasource = ds, series_options = [{'options':{ 'type': 'pie', 'stacking': False}, 'terms':{ 'answer': [ 'answer_count'] }}], chart_options = { 'title': { 'text': question.text } } ) return chart def select_pie_chart( request, question ): """ @return Piechart object for select results """ ds = DataPool( series= [{'options': { 'source': SelectResults.objects.filter( question__exact = question )}, 'terms': [ 'answer', 'answer_count']} ]) chart = Chart( datasource = ds, series_options = [{'options':{ 'type': 'pie', 'stacking': False}, 'terms':{ 'answer': [ 'answer_count'] }}], chart_options = { 'title': { 'text': question.text } } ) return chart def checkbox_bar_chart( request, question ): """ @return Barchart for checkbox results """ ds = DataPool( series= [{'options': { 'source': CheckboxResults.objects.filter( question__exact = question ) }, 'terms': [ 'answer', 'answer_count']} ]) chart = Chart( datasource = ds, series_options = [{'options':{ 'type': 'column', 'stacking': True}, 'terms':{ 'answer': [ 'answer_count'] }}], chart_options = {'title': { 'text': question.text }, 'xAxis': { 'title': { 'text': 'Answers'}}}) return chart
mit
-1,745,348,226,818,002,700
27.807317
97
0.590686
false
3.710336
false
false
false
luckielordie/conan
conans/model/info.py
1
13408
import os from conans.client.build.cppstd_flags import cppstd_default from conans.errors import ConanException from conans.model.env_info import EnvValues from conans.model.options import OptionsValues from conans.model.ref import PackageReference from conans.model.values import Values from conans.paths import CONANINFO from conans.util.config_parser import ConfigParser from conans.util.files import load from conans.util.sha import sha1 class RequirementInfo(object): def __init__(self, value_str, indirect=False): """ parse the input into fields name, version... """ ref = PackageReference.loads(value_str) self.package = ref self.full_name = ref.conan.name self.full_version = ref.conan.version self.full_user = ref.conan.user self.full_channel = ref.conan.channel self.full_package_id = ref.package_id # sha values if indirect: self.unrelated_mode() else: self.semver() def dumps(self): if not self.name: return "" result = ["%s/%s" % (self.name, self.version)] if self.user or self.channel: result.append("@%s/%s" % (self.user, self.channel)) if self.package_id: result.append(":%s" % self.package_id) return "".join(result) @property def sha(self): return "/".join([str(n) for n in [self.name, self.version, self.user, self.channel, self.package_id]]) def unrelated_mode(self): self.name = self.version = self.user = self.channel = self.package_id = None def semver_mode(self): self.name = self.full_name self.version = self.full_version.stable() self.user = self.channel = self.package_id = None semver = semver_mode def full_version_mode(self): self.name = self.full_name self.version = self.full_version self.user = self.channel = self.package_id = None def patch_mode(self): self.name = self.full_name self.version = self.full_version.patch() self.user = self.channel = self.package_id = None def base_mode(self): self.name = self.full_name self.version = self.full_version.base self.user = self.channel = self.package_id = None def minor_mode(self): self.name = self.full_name self.version = self.full_version.minor() self.user = self.channel = self.package_id = None def major_mode(self): self.name = self.full_name self.version = self.full_version.major() self.user = self.channel = self.package_id = None def full_recipe_mode(self): self.name = self.full_name self.version = self.full_version self.user = self.full_user self.channel = self.full_channel self.package_id = None def full_package_mode(self): self.name = self.full_name self.version = self.full_version self.user = self.full_user self.channel = self.full_channel self.package_id = self.full_package_id class RequirementsInfo(object): def __init__(self, requires): # {PackageReference: RequirementInfo} self._data = {r: RequirementInfo(str(r)) for r in requires} def copy(self): return RequirementsInfo(self._data.keys()) def clear(self): self._data = {} def remove(self, *args): for name in args: del self._data[self._get_key(name)] def add(self, indirect_reqs): """ necessary to propagate from upstream the real package requirements """ for r in indirect_reqs: self._data[r] = RequirementInfo(str(r), indirect=True) def refs(self): """ used for updating downstream requirements with this """ return list(self._data.keys()) def _get_key(self, item): for reference in self._data: if reference.conan.name == item: return reference raise ConanException("No requirement matching for %s" % (item)) def __getitem__(self, item): """get by package name Necessary to access from conaninfo self.requires["Boost"].version = "2.X" """ return self._data[self._get_key(item)] @property def pkg_names(self): return [r.conan.name for r in self._data.keys()] @property def sha(self): result = [] # Remove requirements without a name, i.e. indirect transitive requirements data = {k: v for k, v in self._data.items() if v.name} for key in sorted(data): result.append(data[key].sha) return sha1('\n'.join(result).encode()) def dumps(self): result = [] for ref in sorted(self._data): dumped = self._data[ref].dumps() if dumped: result.append(dumped) return "\n".join(result) def unrelated_mode(self): self.clear() def semver_mode(self): for r in self._data.values(): r.semver_mode() def patch_mode(self): for r in self._data.values(): r.patch_mode() def minor_mode(self): for r in self._data.values(): r.minor_mode() def major_mode(self): for r in self._data.values(): r.major_mode() def base_mode(self): for r in self._data.values(): r.base_mode() def full_version_mode(self): for r in self._data.values(): r.full_version_mode() def full_recipe_mode(self): for r in self._data.values(): r.full_recipe_mode() def full_package_mode(self): for r in self._data.values(): r.full_package_mode() class RequirementsList(list): @staticmethod def loads(text): return RequirementsList.deserialize(text.splitlines()) def dumps(self): return "\n".join(self.serialize()) def serialize(self): return [str(r) for r in sorted(self)] @staticmethod def deserialize(data): return RequirementsList([PackageReference.loads(line) for line in data]) class ConanInfo(object): def copy(self): """ Useful for build_id implementation """ result = ConanInfo() result.settings = self.settings.copy() result.options = self.options.copy() result.requires = self.requires.copy() return result @staticmethod def create(settings, options, requires, indirect_requires): result = ConanInfo() result.full_settings = settings result.settings = settings.copy() result.full_options = options result.options = options.copy() result.options.clear_indirect() result.full_requires = RequirementsList(requires) result.requires = RequirementsInfo(requires) result.requires.add(indirect_requires) result.full_requires.extend(indirect_requires) result.recipe_hash = None result.env_values = EnvValues() result.vs_toolset_compatible() result.discard_build_settings() result.default_std_matching() return result @staticmethod def loads(text): parser = ConfigParser(text, ["settings", "full_settings", "options", "full_options", "requires", "full_requires", "scope", "recipe_hash", "env"], raise_unexpected_field=False) result = ConanInfo() result.settings = Values.loads(parser.settings) result.full_settings = Values.loads(parser.full_settings) result.options = OptionsValues.loads(parser.options) result.full_options = OptionsValues.loads(parser.full_options) result.full_requires = RequirementsList.loads(parser.full_requires) result.requires = RequirementsInfo(result.full_requires) result.recipe_hash = parser.recipe_hash or None # TODO: Missing handling paring of requires, but not necessary now result.env_values = EnvValues.loads(parser.env) return result def dumps(self): def indent(text): if not text: return "" return '\n'.join(" " + line for line in text.splitlines()) result = list() result.append("[settings]") result.append(indent(self.settings.dumps())) result.append("\n[requires]") result.append(indent(self.requires.dumps())) result.append("\n[options]") result.append(indent(self.options.dumps())) result.append("\n[full_settings]") result.append(indent(self.full_settings.dumps())) result.append("\n[full_requires]") result.append(indent(self.full_requires.dumps())) result.append("\n[full_options]") result.append(indent(self.full_options.dumps())) result.append("\n[recipe_hash]\n%s" % indent(self.recipe_hash)) result.append("\n[env]") result.append(indent(self.env_values.dumps())) return '\n'.join(result) + "\n" def __eq__(self, other): """ currently just for testing purposes """ return self.dumps() == other.dumps() def __ne__(self, other): return not self.__eq__(other) @staticmethod def load_file(conan_info_path): """ load from file """ try: config_text = load(conan_info_path) except IOError: raise ConanException("Does not exist %s" % conan_info_path) else: return ConanInfo.loads(config_text) @staticmethod def load_from_package(package_folder): info_path = os.path.join(package_folder, CONANINFO) return ConanInfo.load_file(info_path) def package_id(self): """ The package_id of a conans is the sha1 of its specific requirements, options and settings """ computed_id = getattr(self, "_package_id", None) if computed_id: return computed_id result = [] result.append(self.settings.sha) # Only are valid requires for OPtions those Non-Dev who are still in requires self.options.filter_used(self.requires.pkg_names) result.append(self.options.sha) result.append(self.requires.sha) self._package_id = sha1('\n'.join(result).encode()) return self._package_id def serialize_min(self): """ This info will be shown in search results. """ conan_info_json = {"settings": dict(self.settings.serialize()), "options": dict(self.options.serialize()["options"]), "full_requires": self.full_requires.serialize(), "recipe_hash": self.recipe_hash} return conan_info_json def header_only(self): self.settings.clear() self.options.clear() self.requires.unrelated_mode() def vs_toolset_compatible(self): """Default behaviour, same package for toolset v140 with compiler=Visual Studio 15 than using Visual Studio 14""" if self.full_settings.compiler != "Visual Studio": return toolsets_versions = { "v141": "15", "v140": "14", "v120": "12", "v110": "11", "v100": "10", "v90": "9", "v80": "8"} toolset = str(self.full_settings.compiler.toolset) version = toolsets_versions.get(toolset) if version is not None: self.settings.compiler.version = version del self.settings.compiler.toolset def vs_toolset_incompatible(self): """Will generate different packages for v140 and visual 15 than the visual 14""" if self.full_settings.compiler != "Visual Studio": return self.settings.compiler.version = self.full_settings.compiler.version self.settings.compiler.toolset = self.full_settings.compiler.toolset def discard_build_settings(self): # When os is defined, os_build is irrelevant for the consumer. # only when os_build is alone (installers, etc) it has to be present in the package_id if self.full_settings.os and self.full_settings.os_build: del self.settings.os_build if self.full_settings.arch and self.full_settings.arch_build: del self.settings.arch_build def include_build_settings(self): self.settings.os_build = self.full_settings.os_build self.settings.arch_build = self.full_settings.arch_build def default_std_matching(self): """ If we are building with gcc 7, and we specify -s cppstd=gnu14, it's the default, so the same as specifying None, packages are the same """ if self.full_settings.cppstd and \ self.full_settings.compiler and \ self.full_settings.compiler.version: default = cppstd_default(str(self.full_settings.compiler), str(self.full_settings.compiler.version)) if default == str(self.full_settings.cppstd): self.settings.cppstd = None def default_std_non_matching(self): if self.full_settings.cppstd: self.settings.cppstd = self.full_settings.cppstd
mit
-8,335,431,246,790,530,000
32.7733
95
0.598747
false
4.008371
false
false
false
HewlettPackard/oneview-ansible
library/oneview_network_set_facts.py
1
4474
#!/usr/bin/python # -*- coding: utf-8 -*- ### # Copyright (2016-2020) Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ### from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: oneview_network_set_facts short_description: Retrieve facts about the OneView Network Sets description: - Retrieve facts about the Network Sets from OneView. version_added: "2.4" requirements: - hpeOneView >= 5.4.0 author: - Felipe Bulsoni (@fgbulsoni) - Thiago Miotto (@tmiotto) - Adriane Cardozo (@adriane-cardozo) options: name: description: - Network Set name. options: description: - "List with options to gather facts about Network Set. Option allowed: C(withoutEthernet). The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks." extends_documentation_fragment: - oneview - oneview.factsparams ''' EXAMPLES = ''' - name: Gather facts about all Network Sets oneview_network_set_facts: hostname: 172.16.101.48 username: administrator password: my_password api_version: 1200 no_log: true delegate_to: localhost - debug: var=network_sets - name: Gather paginated, filtered, and sorted facts about Network Sets oneview_network_set_facts: hostname: 172.16.101.48 username: administrator password: my_password api_version: 1200 params: start: 0 count: 3 sort: 'name:descending' filter: name='netset001' no_log: true delegate_to: localhost - debug: var=network_sets - name: Gather facts about all Network Sets, excluding Ethernet networks oneview_network_set_facts: hostname: 172.16.101.48 username: administrator password: my_password api_version: 1200 options: - withoutEthernet no_log: true delegate_to: localhost - debug: var=network_sets - name: Gather facts about a Network Set by name oneview_network_set_facts: hostname: 172.16.101.48 username: administrator password: my_password api_version: 1200 name: Name of the Network Set no_log: true delegate_to: localhost - debug: var=network_sets - name: Gather facts about a Network Set by name, excluding Ethernet networks oneview_network_set_facts: hostname: 172.16.101.48 username: administrator password: my_password api_version: 1200 name: Name of the Network Set options: - withoutEthernet no_log: true delegate_to: localhost - debug: var=network_sets ''' RETURN = ''' network_sets: description: Has all the OneView facts about the Network Sets. returned: Always, but can be empty. type: dict ''' from ansible.module_utils.oneview import OneViewModule class NetworkSetFactsModule(OneViewModule): argument_spec = dict( name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'), ) def __init__(self): super(NetworkSetFactsModule, self).__init__(additional_arg_spec=self.argument_spec) self.set_resource_object(self.oneview_client.network_sets) def execute_module(self): name = self.module.params.get('name') if 'withoutEthernet' in self.options: filter_by_name = ("\"'name'='%s'\"" % name) if name else '' network_sets = self.resource_client.get_all_without_ethernet(filter=filter_by_name) elif name: network_sets = self.resource_client.get_by('name', name) else: network_sets = self.resource_client.get_all(**self.facts_params) return dict(changed=False, ansible_facts=dict(network_sets=network_sets)) def main(): NetworkSetFactsModule().run() if __name__ == '__main__': main()
apache-2.0
-7,717,356,519,994,723,000
25.790419
104
0.673894
false
3.753356
false
false
false
anntzer/scikit-learn
sklearn/utils/_estimator_html_repr.py
1
9497
from contextlib import closing from contextlib import suppress from io import StringIO from string import Template import uuid import html from sklearn import config_context class _VisualBlock: """HTML Representation of Estimator Parameters ---------- kind : {'serial', 'parallel', 'single'} kind of HTML block estimators : list of estimators or `_VisualBlock`s or a single estimator If kind != 'single', then `estimators` is a list of estimators. If kind == 'single', then `estimators` is a single estimator. names : list of str, default=None If kind != 'single', then `names` corresponds to estimators. If kind == 'single', then `names` is a single string corresponding to the single estimator. name_details : list of str, str, or None, default=None If kind != 'single', then `name_details` corresponds to `names`. If kind == 'single', then `name_details` is a single string corresponding to the single estimator. dash_wrapped : bool, default=True If true, wrapped HTML element will be wrapped with a dashed border. Only active when kind != 'single'. """ def __init__(self, kind, estimators, *, names=None, name_details=None, dash_wrapped=True): self.kind = kind self.estimators = estimators self.dash_wrapped = dash_wrapped if self.kind in ('parallel', 'serial'): if names is None: names = (None, ) * len(estimators) if name_details is None: name_details = (None, ) * len(estimators) self.names = names self.name_details = name_details def _sk_visual_block_(self): return self def _write_label_html(out, name, name_details, outer_class="sk-label-container", inner_class="sk-label", checked=False): """Write labeled html with or without a dropdown with named details""" out.write(f'<div class="{outer_class}">' f'<div class="{inner_class} sk-toggleable">') name = html.escape(name) if name_details is not None: checked_str = 'checked' if checked else '' est_id = uuid.uuid4() out.write(f'<input class="sk-toggleable__control sk-hidden--visually" ' f'id="{est_id}" type="checkbox" {checked_str}>' f'<label class="sk-toggleable__label" for="{est_id}">' f'{name}</label>' f'<div class="sk-toggleable__content"><pre>{name_details}' f'</pre></div>') else: out.write(f'<label>{name}</label>') out.write('</div></div>') # outer_class inner_class def _get_visual_block(estimator): """Generate information about how to display an estimator. """ with suppress(AttributeError): return estimator._sk_visual_block_() if isinstance(estimator, str): return _VisualBlock('single', estimator, names=estimator, name_details=estimator) elif estimator is None: return _VisualBlock('single', estimator, names='None', name_details='None') # check if estimator looks like a meta estimator wraps estimators if hasattr(estimator, 'get_params'): estimators = [] for key, value in estimator.get_params().items(): # Only look at the estimators in the first layer if '__' not in key and hasattr(value, 'get_params'): estimators.append(value) if len(estimators): return _VisualBlock('parallel', estimators, names=None) return _VisualBlock('single', estimator, names=estimator.__class__.__name__, name_details=str(estimator)) def _write_estimator_html(out, estimator, estimator_label, estimator_label_details, first_call=False): """Write estimator to html in serial, parallel, or by itself (single). """ if first_call: est_block = _get_visual_block(estimator) else: with config_context(print_changed_only=True): est_block = _get_visual_block(estimator) if est_block.kind in ('serial', 'parallel'): dashed_wrapped = first_call or est_block.dash_wrapped dash_cls = " sk-dashed-wrapped" if dashed_wrapped else "" out.write(f'<div class="sk-item{dash_cls}">') if estimator_label: _write_label_html(out, estimator_label, estimator_label_details) kind = est_block.kind out.write(f'<div class="sk-{kind}">') est_infos = zip(est_block.estimators, est_block.names, est_block.name_details) for est, name, name_details in est_infos: if kind == 'serial': _write_estimator_html(out, est, name, name_details) else: # parallel out.write('<div class="sk-parallel-item">') # wrap element in a serial visualblock serial_block = _VisualBlock('serial', [est], dash_wrapped=False) _write_estimator_html(out, serial_block, name, name_details) out.write('</div>') # sk-parallel-item out.write('</div></div>') elif est_block.kind == 'single': _write_label_html(out, est_block.names, est_block.name_details, outer_class="sk-item", inner_class="sk-estimator", checked=first_call) _STYLE = """ #$id { color: black; background-color: white; } #$id pre{ padding: 0; } #$id div.sk-toggleable { background-color: white; } #$id label.sk-toggleable__label { cursor: pointer; display: block; width: 100%; margin-bottom: 0; padding: 0.2em 0.3em; box-sizing: border-box; text-align: center; } #$id div.sk-toggleable__content { max-height: 0; max-width: 0; overflow: hidden; text-align: left; background-color: #f0f8ff; } #$id div.sk-toggleable__content pre { margin: 0.2em; color: black; border-radius: 0.25em; background-color: #f0f8ff; } #$id input.sk-toggleable__control:checked~div.sk-toggleable__content { max-height: 200px; max-width: 100%; overflow: auto; } #$id div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label { background-color: #d4ebff; } #$id div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label { background-color: #d4ebff; } #$id input.sk-hidden--visually { border: 0; clip: rect(1px 1px 1px 1px); clip: rect(1px, 1px, 1px, 1px); height: 1px; margin: -1px; overflow: hidden; padding: 0; position: absolute; width: 1px; } #$id div.sk-estimator { font-family: monospace; background-color: #f0f8ff; margin: 0.25em 0.25em; border: 1px dotted black; border-radius: 0.25em; box-sizing: border-box; } #$id div.sk-estimator:hover { background-color: #d4ebff; } #$id div.sk-parallel-item::after { content: ""; width: 100%; border-bottom: 1px solid gray; flex-grow: 1; } #$id div.sk-label:hover label.sk-toggleable__label { background-color: #d4ebff; } #$id div.sk-serial::before { content: ""; position: absolute; border-left: 1px solid gray; box-sizing: border-box; top: 2em; bottom: 0; left: 50%; } #$id div.sk-serial { display: flex; flex-direction: column; align-items: center; background-color: white; } #$id div.sk-item { z-index: 1; } #$id div.sk-parallel { display: flex; align-items: stretch; justify-content: center; background-color: white; } #$id div.sk-parallel-item { display: flex; flex-direction: column; position: relative; background-color: white; } #$id div.sk-parallel-item:first-child::after { align-self: flex-end; width: 50%; } #$id div.sk-parallel-item:last-child::after { align-self: flex-start; width: 50%; } #$id div.sk-parallel-item:only-child::after { width: 0; } #$id div.sk-dashed-wrapped { border: 1px dashed gray; margin: 0.2em; box-sizing: border-box; padding-bottom: 0.1em; background-color: white; position: relative; } #$id div.sk-label label { font-family: monospace; font-weight: bold; background-color: white; display: inline-block; line-height: 1.2em; } #$id div.sk-label-container { position: relative; z-index: 2; text-align: center; } #$id div.sk-container { display: inline-block; position: relative; } """.replace(' ', '').replace('\n', '') # noqa def estimator_html_repr(estimator): """Build a HTML representation of an estimator. Read more in the :ref:`User Guide <visualizing_composite_estimators>`. Parameters ---------- estimator : estimator object The estimator to visualize. Returns ------- html: str HTML representation of estimator. """ with closing(StringIO()) as out: container_id = "sk-" + str(uuid.uuid4()) style_template = Template(_STYLE) style_with_id = style_template.substitute(id=container_id) out.write(f'<style>{style_with_id}</style>' f'<div id="{container_id}" class"sk-top-container">' '<div class="sk-container">') _write_estimator_html(out, estimator, estimator.__class__.__name__, str(estimator), first_call=True) out.write('</div></div>') html_output = out.getvalue() return html_output
bsd-3-clause
-6,215,935,959,145,116,000
28.77116
87
0.605349
false
3.611027
false
false
false
henry0312/LightGBM
python-package/lightgbm/basic.py
1
150238
# coding: utf-8 """Wrapper for C API of LightGBM.""" import ctypes import json import os import warnings from collections import OrderedDict from copy import deepcopy from functools import wraps from logging import Logger from tempfile import NamedTemporaryFile from typing import Any, Dict, List, Set, Union import numpy as np import scipy.sparse from .compat import PANDAS_INSTALLED, concat, dt_DataTable, is_dtype_sparse, pd_DataFrame, pd_Series from .libpath import find_lib_path class _DummyLogger: def info(self, msg): print(msg) def warning(self, msg): warnings.warn(msg, stacklevel=3) _LOGGER = _DummyLogger() def register_logger(logger): """Register custom logger. Parameters ---------- logger : logging.Logger Custom logger. """ if not isinstance(logger, Logger): raise TypeError("Logger should inherit logging.Logger class") global _LOGGER _LOGGER = logger def _normalize_native_string(func): """Join log messages from native library which come by chunks.""" msg_normalized = [] @wraps(func) def wrapper(msg): nonlocal msg_normalized if msg.strip() == '': msg = ''.join(msg_normalized) msg_normalized = [] return func(msg) else: msg_normalized.append(msg) return wrapper def _log_info(msg): _LOGGER.info(msg) def _log_warning(msg): _LOGGER.warning(msg) @_normalize_native_string def _log_native(msg): _LOGGER.info(msg) def _log_callback(msg): """Redirect logs from native library into Python.""" _log_native(str(msg.decode('utf-8'))) def _load_lib(): """Load LightGBM library.""" lib_path = find_lib_path() if len(lib_path) == 0: return None lib = ctypes.cdll.LoadLibrary(lib_path[0]) lib.LGBM_GetLastError.restype = ctypes.c_char_p callback = ctypes.CFUNCTYPE(None, ctypes.c_char_p) lib.callback = callback(_log_callback) if lib.LGBM_RegisterLogCallback(lib.callback) != 0: raise LightGBMError(lib.LGBM_GetLastError().decode('utf-8')) return lib _LIB = _load_lib() NUMERIC_TYPES = (int, float, bool) def _safe_call(ret): """Check the return value from C API call. Parameters ---------- ret : int The return value from C API calls. """ if ret != 0: raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8')) def is_numeric(obj): """Check whether object is a number or not, include numpy number, etc.""" try: float(obj) return True except (TypeError, ValueError): # TypeError: obj is not a string or a number # ValueError: invalid literal return False def is_numpy_1d_array(data): """Check whether data is a numpy 1-D array.""" return isinstance(data, np.ndarray) and len(data.shape) == 1 def is_numpy_column_array(data): """Check whether data is a column numpy array.""" if not isinstance(data, np.ndarray): return False shape = data.shape return len(shape) == 2 and shape[1] == 1 def cast_numpy_1d_array_to_dtype(array, dtype): """Cast numpy 1d array to given dtype.""" if array.dtype == dtype: return array return array.astype(dtype=dtype, copy=False) def is_1d_list(data): """Check whether data is a 1-D list.""" return isinstance(data, list) and (not data or is_numeric(data[0])) def list_to_1d_numpy(data, dtype=np.float32, name='list'): """Convert data to numpy 1-D array.""" if is_numpy_1d_array(data): return cast_numpy_1d_array_to_dtype(data, dtype) elif is_numpy_column_array(data): _log_warning('Converting column-vector to 1d array') array = data.ravel() return cast_numpy_1d_array_to_dtype(array, dtype) elif is_1d_list(data): return np.array(data, dtype=dtype, copy=False) elif isinstance(data, pd_Series): if _get_bad_pandas_dtypes([data.dtypes]): raise ValueError('Series.dtypes must be int, float or bool') return np.array(data, dtype=dtype, copy=False) # SparseArray should be supported as well else: raise TypeError(f"Wrong type({type(data).__name__}) for {name}.\n" "It should be list, numpy 1-D array or pandas Series") def cfloat32_array_to_numpy(cptr, length): """Convert a ctypes float pointer array to a numpy array.""" if isinstance(cptr, ctypes.POINTER(ctypes.c_float)): return np.ctypeslib.as_array(cptr, shape=(length,)).copy() else: raise RuntimeError('Expected float pointer') def cfloat64_array_to_numpy(cptr, length): """Convert a ctypes double pointer array to a numpy array.""" if isinstance(cptr, ctypes.POINTER(ctypes.c_double)): return np.ctypeslib.as_array(cptr, shape=(length,)).copy() else: raise RuntimeError('Expected double pointer') def cint32_array_to_numpy(cptr, length): """Convert a ctypes int pointer array to a numpy array.""" if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)): return np.ctypeslib.as_array(cptr, shape=(length,)).copy() else: raise RuntimeError('Expected int32 pointer') def cint64_array_to_numpy(cptr, length): """Convert a ctypes int pointer array to a numpy array.""" if isinstance(cptr, ctypes.POINTER(ctypes.c_int64)): return np.ctypeslib.as_array(cptr, shape=(length,)).copy() else: raise RuntimeError('Expected int64 pointer') def c_str(string): """Convert a Python string to C string.""" return ctypes.c_char_p(string.encode('utf-8')) def c_array(ctype, values): """Convert a Python array to C array.""" return (ctype * len(values))(*values) def json_default_with_numpy(obj): """Convert numpy classes to JSON serializable objects.""" if isinstance(obj, (np.integer, np.floating, np.bool_)): return obj.item() elif isinstance(obj, np.ndarray): return obj.tolist() else: return obj def param_dict_to_str(data): """Convert Python dictionary to string, which is passed to C API.""" if data is None or not data: return "" pairs = [] for key, val in data.items(): if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val): def to_string(x): if isinstance(x, list): return f"[{','.join(map(str, x))}]" else: return str(x) pairs.append(f"{key}={','.join(map(to_string, val))}") elif isinstance(val, (str, NUMERIC_TYPES)) or is_numeric(val): pairs.append(f"{key}={val}") elif val is not None: raise TypeError(f'Unknown type of parameter:{key}, got:{type(val).__name__}') return ' '.join(pairs) class _TempFile: def __enter__(self): with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f: self.name = f.name return self def __exit__(self, exc_type, exc_val, exc_tb): if os.path.isfile(self.name): os.remove(self.name) def readlines(self): with open(self.name, "r+") as f: ret = f.readlines() return ret def writelines(self, lines): with open(self.name, "w+") as f: f.writelines(lines) class LightGBMError(Exception): """Error thrown by LightGBM.""" pass # DeprecationWarning is not shown by default, so let's create our own with higher level class LGBMDeprecationWarning(UserWarning): """Custom deprecation warning.""" pass class _ConfigAliases: aliases = {"bin_construct_sample_cnt": {"bin_construct_sample_cnt", "subsample_for_bin"}, "boosting": {"boosting", "boosting_type", "boost"}, "categorical_feature": {"categorical_feature", "cat_feature", "categorical_column", "cat_column"}, "data_random_seed": {"data_random_seed", "data_seed"}, "early_stopping_round": {"early_stopping_round", "early_stopping_rounds", "early_stopping", "n_iter_no_change"}, "enable_bundle": {"enable_bundle", "is_enable_bundle", "bundle"}, "eval_at": {"eval_at", "ndcg_eval_at", "ndcg_at", "map_eval_at", "map_at"}, "group_column": {"group_column", "group", "group_id", "query_column", "query", "query_id"}, "header": {"header", "has_header"}, "ignore_column": {"ignore_column", "ignore_feature", "blacklist"}, "is_enable_sparse": {"is_enable_sparse", "is_sparse", "enable_sparse", "sparse"}, "label_column": {"label_column", "label"}, "local_listen_port": {"local_listen_port", "local_port", "port"}, "machines": {"machines", "workers", "nodes"}, "metric": {"metric", "metrics", "metric_types"}, "num_class": {"num_class", "num_classes"}, "num_iterations": {"num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees", "num_round", "num_rounds", "num_boost_round", "n_estimators"}, "num_machines": {"num_machines", "num_machine"}, "num_threads": {"num_threads", "num_thread", "nthread", "nthreads", "n_jobs"}, "objective": {"objective", "objective_type", "app", "application"}, "pre_partition": {"pre_partition", "is_pre_partition"}, "tree_learner": {"tree_learner", "tree", "tree_type", "tree_learner_type"}, "two_round": {"two_round", "two_round_loading", "use_two_round_loading"}, "verbosity": {"verbosity", "verbose"}, "weight_column": {"weight_column", "weight"}} @classmethod def get(cls, *args): ret = set() for i in args: ret |= cls.aliases.get(i, {i}) return ret def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_value: Any) -> Dict[str, Any]: """Get a single parameter value, accounting for aliases. Parameters ---------- main_param_name : str Name of the main parameter to get a value for. One of the keys of ``_ConfigAliases``. params : dict Dictionary of LightGBM parameters. default_value : Any Default value to use for the parameter, if none is found in ``params``. Returns ------- params : dict A ``params`` dict with exactly one value for ``main_param_name``, and all aliases ``main_param_name`` removed. If both ``main_param_name`` and one or more aliases for it are found, the value of ``main_param_name`` will be preferred. """ # avoid side effects on passed-in parameters params = deepcopy(params) # find a value, and remove other aliases with .pop() # prefer the value of 'main_param_name' if it exists, otherwise search the aliases found_value = None if main_param_name in params.keys(): found_value = params[main_param_name] for param in _ConfigAliases.get(main_param_name): val = params.pop(param, None) if found_value is None and val is not None: found_value = val if found_value is not None: params[main_param_name] = found_value else: params[main_param_name] = default_value return params MAX_INT32 = (1 << 31) - 1 """Macro definition of data type in C API of LightGBM""" C_API_DTYPE_FLOAT32 = 0 C_API_DTYPE_FLOAT64 = 1 C_API_DTYPE_INT32 = 2 C_API_DTYPE_INT64 = 3 """Matrix is row major in Python""" C_API_IS_ROW_MAJOR = 1 """Macro definition of prediction type in C API of LightGBM""" C_API_PREDICT_NORMAL = 0 C_API_PREDICT_RAW_SCORE = 1 C_API_PREDICT_LEAF_INDEX = 2 C_API_PREDICT_CONTRIB = 3 """Macro definition of sparse matrix type""" C_API_MATRIX_TYPE_CSR = 0 C_API_MATRIX_TYPE_CSC = 1 """Macro definition of feature importance type""" C_API_FEATURE_IMPORTANCE_SPLIT = 0 C_API_FEATURE_IMPORTANCE_GAIN = 1 """Data type of data field""" FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32, "weight": C_API_DTYPE_FLOAT32, "init_score": C_API_DTYPE_FLOAT64, "group": C_API_DTYPE_INT32} """String name to int feature importance type mapper""" FEATURE_IMPORTANCE_TYPE_MAPPER = {"split": C_API_FEATURE_IMPORTANCE_SPLIT, "gain": C_API_FEATURE_IMPORTANCE_GAIN} def convert_from_sliced_object(data): """Fix the memory of multi-dimensional sliced object.""" if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray): if not data.flags.c_contiguous: _log_warning("Usage of np.ndarray subset (sliced data) is not recommended " "due to it will double the peak memory cost in LightGBM.") return np.copy(data) return data def c_float_array(data): """Get pointer of float numpy array / list.""" if is_1d_list(data): data = np.array(data, copy=False) if is_numpy_1d_array(data): data = convert_from_sliced_object(data) assert data.flags.c_contiguous if data.dtype == np.float32: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) type_data = C_API_DTYPE_FLOAT32 elif data.dtype == np.float64: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) type_data = C_API_DTYPE_FLOAT64 else: raise TypeError(f"Expected np.float32 or np.float64, met type({data.dtype})") else: raise TypeError(f"Unknown type({type(data).__name__})") return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed def c_int_array(data): """Get pointer of int numpy array / list.""" if is_1d_list(data): data = np.array(data, copy=False) if is_numpy_1d_array(data): data = convert_from_sliced_object(data) assert data.flags.c_contiguous if data.dtype == np.int32: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)) type_data = C_API_DTYPE_INT32 elif data.dtype == np.int64: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)) type_data = C_API_DTYPE_INT64 else: raise TypeError(f"Expected np.int32 or np.int64, met type({data.dtype})") else: raise TypeError(f"Unknown type({type(data).__name__})") return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed def _get_bad_pandas_dtypes(dtypes): pandas_dtype_mapper = {'int8': 'int', 'int16': 'int', 'int32': 'int', 'int64': 'int', 'uint8': 'int', 'uint16': 'int', 'uint32': 'int', 'uint64': 'int', 'bool': 'int', 'float16': 'float', 'float32': 'float', 'float64': 'float'} bad_indices = [i for i, dtype in enumerate(dtypes) if (dtype.name not in pandas_dtype_mapper and (not is_dtype_sparse(dtype) or dtype.subtype.name not in pandas_dtype_mapper))] return bad_indices def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical): if isinstance(data, pd_DataFrame): if len(data.shape) != 2 or data.shape[0] < 1: raise ValueError('Input data must be 2 dimensional and non empty.') if feature_name == 'auto' or feature_name is None: data = data.rename(columns=str) cat_cols = list(data.select_dtypes(include=['category']).columns) cat_cols_not_ordered = [col for col in cat_cols if not data[col].cat.ordered] if pandas_categorical is None: # train dataset pandas_categorical = [list(data[col].cat.categories) for col in cat_cols] else: if len(cat_cols) != len(pandas_categorical): raise ValueError('train and valid dataset categorical_feature do not match.') for col, category in zip(cat_cols, pandas_categorical): if list(data[col].cat.categories) != list(category): data[col] = data[col].cat.set_categories(category) if len(cat_cols): # cat_cols is list data = data.copy() # not alter origin DataFrame data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan}) if categorical_feature is not None: if feature_name is None: feature_name = list(data.columns) if categorical_feature == 'auto': # use cat cols from DataFrame categorical_feature = cat_cols_not_ordered else: # use cat cols specified by user categorical_feature = list(categorical_feature) if feature_name == 'auto': feature_name = list(data.columns) bad_indices = _get_bad_pandas_dtypes(data.dtypes) if bad_indices: bad_index_cols_str = ', '.join(data.columns[bad_indices]) raise ValueError("DataFrame.dtypes for data must be int, float or bool.\n" "Did not expect the data types in the following fields: " f"{bad_index_cols_str}") data = data.values if data.dtype != np.float32 and data.dtype != np.float64: data = data.astype(np.float32) else: if feature_name == 'auto': feature_name = None if categorical_feature == 'auto': categorical_feature = None return data, feature_name, categorical_feature, pandas_categorical def _label_from_pandas(label): if isinstance(label, pd_DataFrame): if len(label.columns) > 1: raise ValueError('DataFrame for label cannot have multiple columns') if _get_bad_pandas_dtypes(label.dtypes): raise ValueError('DataFrame.dtypes for label must be int, float or bool') label = np.ravel(label.values.astype(np.float32, copy=False)) return label def _dump_pandas_categorical(pandas_categorical, file_name=None): categorical_json = json.dumps(pandas_categorical, default=json_default_with_numpy) pandas_str = f'\npandas_categorical:{categorical_json}\n' if file_name is not None: with open(file_name, 'a') as f: f.write(pandas_str) return pandas_str def _load_pandas_categorical(file_name=None, model_str=None): pandas_key = 'pandas_categorical:' offset = -len(pandas_key) if file_name is not None: max_offset = -os.path.getsize(file_name) with open(file_name, 'rb') as f: while True: if offset < max_offset: offset = max_offset f.seek(offset, os.SEEK_END) lines = f.readlines() if len(lines) >= 2: break offset *= 2 last_line = lines[-1].decode('utf-8').strip() if not last_line.startswith(pandas_key): last_line = lines[-2].decode('utf-8').strip() elif model_str is not None: idx = model_str.rfind('\n', 0, offset) last_line = model_str[idx:].strip() if last_line.startswith(pandas_key): return json.loads(last_line[len(pandas_key):]) else: return None class _InnerPredictor: """_InnerPredictor of LightGBM. Not exposed to user. Used only for prediction, usually used for continued training. .. note:: Can be converted from Booster, but cannot be converted to Booster. """ def __init__(self, model_file=None, booster_handle=None, pred_parameter=None): """Initialize the _InnerPredictor. Parameters ---------- model_file : string or None, optional (default=None) Path to the model file. booster_handle : object or None, optional (default=None) Handle of Booster. pred_parameter: dict or None, optional (default=None) Other parameters for the prediciton. """ self.handle = ctypes.c_void_p() self.__is_manage_handle = True if model_file is not None: """Prediction task""" out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterCreateFromModelfile( c_str(model_file), ctypes.byref(out_num_iterations), ctypes.byref(self.handle))) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) self.num_class = out_num_class.value self.num_total_iteration = out_num_iterations.value self.pandas_categorical = _load_pandas_categorical(file_name=model_file) elif booster_handle is not None: self.__is_manage_handle = False self.handle = booster_handle out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) self.num_class = out_num_class.value self.num_total_iteration = self.current_iteration() self.pandas_categorical = None else: raise TypeError('Need model_file or booster_handle to create a predictor') pred_parameter = {} if pred_parameter is None else pred_parameter self.pred_parameter = param_dict_to_str(pred_parameter) def __del__(self): try: if self.__is_manage_handle: _safe_call(_LIB.LGBM_BoosterFree(self.handle)) except AttributeError: pass def __getstate__(self): this = self.__dict__.copy() this.pop('handle', None) return this def predict(self, data, start_iteration=0, num_iteration=-1, raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False, is_reshape=True): """Predict logic. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for prediction. When data type is string, it represents the path of txt file. start_iteration : int, optional (default=0) Start index of the iteration to predict. num_iteration : int, optional (default=-1) Iteration used for prediction. raw_score : bool, optional (default=False) Whether to predict raw scores. pred_leaf : bool, optional (default=False) Whether to predict leaf index. pred_contrib : bool, optional (default=False) Whether to predict feature contributions. data_has_header : bool, optional (default=False) Whether data has header. Used only for txt data. is_reshape : bool, optional (default=True) Whether to reshape to (nrow, ncol). Returns ------- result : numpy array, scipy.sparse or list of scipy.sparse Prediction result. Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``). """ if isinstance(data, Dataset): raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead") data = _data_from_pandas(data, None, None, self.pandas_categorical)[0] predict_type = C_API_PREDICT_NORMAL if raw_score: predict_type = C_API_PREDICT_RAW_SCORE if pred_leaf: predict_type = C_API_PREDICT_LEAF_INDEX if pred_contrib: predict_type = C_API_PREDICT_CONTRIB int_data_has_header = 1 if data_has_header else 0 if isinstance(data, str): with _TempFile() as f: _safe_call(_LIB.LGBM_BoosterPredictForFile( self.handle, c_str(data), ctypes.c_int(int_data_has_header), ctypes.c_int(predict_type), ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(self.pred_parameter), c_str(f.name))) lines = f.readlines() nrow = len(lines) preds = [float(token) for line in lines for token in line.split('\t')] preds = np.array(preds, dtype=np.float64, copy=False) elif isinstance(data, scipy.sparse.csr_matrix): preds, nrow = self.__pred_for_csr(data, start_iteration, num_iteration, predict_type) elif isinstance(data, scipy.sparse.csc_matrix): preds, nrow = self.__pred_for_csc(data, start_iteration, num_iteration, predict_type) elif isinstance(data, np.ndarray): preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type) elif isinstance(data, list): try: data = np.array(data) except BaseException: raise ValueError('Cannot convert data list to numpy array.') preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type) elif isinstance(data, dt_DataTable): preds, nrow = self.__pred_for_np2d(data.to_numpy(), start_iteration, num_iteration, predict_type) else: try: _log_warning('Converting data to scipy sparse matrix.') csr = scipy.sparse.csr_matrix(data) except BaseException: raise TypeError(f'Cannot predict data for type {type(data).__name__}') preds, nrow = self.__pred_for_csr(csr, start_iteration, num_iteration, predict_type) if pred_leaf: preds = preds.astype(np.int32) is_sparse = scipy.sparse.issparse(preds) or isinstance(preds, list) if is_reshape and not is_sparse and preds.size != nrow: if preds.size % nrow == 0: preds = preds.reshape(nrow, -1) else: raise ValueError(f'Length of predict result ({preds.size}) cannot be divide nrow ({nrow})') return preds def __get_num_preds(self, start_iteration, num_iteration, nrow, predict_type): """Get size of prediction result.""" if nrow > MAX_INT32: raise LightGBMError('LightGBM cannot perform prediction for data' f'with number of rows greater than MAX_INT32 ({MAX_INT32}).\n' 'You can split your data into chunks' 'and then concatenate predictions for them') n_preds = ctypes.c_int64(0) _safe_call(_LIB.LGBM_BoosterCalcNumPredict( self.handle, ctypes.c_int(nrow), ctypes.c_int(predict_type), ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.byref(n_preds))) return n_preds.value def __pred_for_np2d(self, mat, start_iteration, num_iteration, predict_type): """Predict for a 2-D numpy matrix.""" if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray or list must be 2 dimensional') def inner_predict(mat, start_iteration, num_iteration, predict_type, preds=None): if mat.dtype == np.float32 or mat.dtype == np.float64: data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False) else: # change non-float data to float data, need to copy data = np.array(mat.reshape(mat.size), dtype=np.float32) ptr_data, type_ptr_data, _ = c_float_array(data) n_preds = self.__get_num_preds(start_iteration, num_iteration, mat.shape[0], predict_type) if preds is None: preds = np.zeros(n_preds, dtype=np.float64) elif len(preds.shape) != 1 or len(preds) != n_preds: raise ValueError("Wrong length of pre-allocated predict array") out_num_preds = ctypes.c_int64(0) _safe_call(_LIB.LGBM_BoosterPredictForMat( self.handle, ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int32(mat.shape[0]), ctypes.c_int32(mat.shape[1]), ctypes.c_int(C_API_IS_ROW_MAJOR), ctypes.c_int(predict_type), ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(self.pred_parameter), ctypes.byref(out_num_preds), preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if n_preds != out_num_preds.value: raise ValueError("Wrong length for predict results") return preds, mat.shape[0] nrow = mat.shape[0] if nrow > MAX_INT32: sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32) # __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])] n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum() preds = np.zeros(sum(n_preds), dtype=np.float64) for chunk, (start_idx_pred, end_idx_pred) in zip(np.array_split(mat, sections), zip(n_preds_sections, n_preds_sections[1:])): # avoid memory consumption by arrays concatenation operations inner_predict(chunk, start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred]) return preds, nrow else: return inner_predict(mat, start_iteration, num_iteration, predict_type) def __create_sparse_native(self, cs, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data, indptr_type, data_type, is_csr=True): # create numpy array from output arrays data_indices_len = out_shape[0] indptr_len = out_shape[1] if indptr_type == C_API_DTYPE_INT32: out_indptr = cint32_array_to_numpy(out_ptr_indptr, indptr_len) elif indptr_type == C_API_DTYPE_INT64: out_indptr = cint64_array_to_numpy(out_ptr_indptr, indptr_len) else: raise TypeError("Expected int32 or int64 type for indptr") if data_type == C_API_DTYPE_FLOAT32: out_data = cfloat32_array_to_numpy(out_ptr_data, data_indices_len) elif data_type == C_API_DTYPE_FLOAT64: out_data = cfloat64_array_to_numpy(out_ptr_data, data_indices_len) else: raise TypeError("Expected float32 or float64 type for data") out_indices = cint32_array_to_numpy(out_ptr_indices, data_indices_len) # break up indptr based on number of rows (note more than one matrix in multiclass case) per_class_indptr_shape = cs.indptr.shape[0] # for CSC there is extra column added if not is_csr: per_class_indptr_shape += 1 out_indptr_arrays = np.split(out_indptr, out_indptr.shape[0] / per_class_indptr_shape) # reformat output into a csr or csc matrix or list of csr or csc matrices cs_output_matrices = [] offset = 0 for cs_indptr in out_indptr_arrays: matrix_indptr_len = cs_indptr[cs_indptr.shape[0] - 1] cs_indices = out_indices[offset + cs_indptr[0]:offset + matrix_indptr_len] cs_data = out_data[offset + cs_indptr[0]:offset + matrix_indptr_len] offset += matrix_indptr_len # same shape as input csr or csc matrix except extra column for expected value cs_shape = [cs.shape[0], cs.shape[1] + 1] # note: make sure we copy data as it will be deallocated next if is_csr: cs_output_matrices.append(scipy.sparse.csr_matrix((cs_data, cs_indices, cs_indptr), cs_shape)) else: cs_output_matrices.append(scipy.sparse.csc_matrix((cs_data, cs_indices, cs_indptr), cs_shape)) # free the temporary native indptr, indices, and data _safe_call(_LIB.LGBM_BoosterFreePredictSparse(out_ptr_indptr, out_ptr_indices, out_ptr_data, ctypes.c_int(indptr_type), ctypes.c_int(data_type))) if len(cs_output_matrices) == 1: return cs_output_matrices[0] return cs_output_matrices def __pred_for_csr(self, csr, start_iteration, num_iteration, predict_type): """Predict for a CSR data.""" def inner_predict(csr, start_iteration, num_iteration, predict_type, preds=None): nrow = len(csr.indptr) - 1 n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type) if preds is None: preds = np.zeros(n_preds, dtype=np.float64) elif len(preds.shape) != 1 or len(preds) != n_preds: raise ValueError("Wrong length of pre-allocated predict array") out_num_preds = ctypes.c_int64(0) ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr) ptr_data, type_ptr_data, _ = c_float_array(csr.data) assert csr.shape[1] <= MAX_INT32 csr_indices = csr.indices.astype(np.int32, copy=False) _safe_call(_LIB.LGBM_BoosterPredictForCSR( self.handle, ptr_indptr, ctypes.c_int(type_ptr_indptr), csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csr.indptr)), ctypes.c_int64(len(csr.data)), ctypes.c_int64(csr.shape[1]), ctypes.c_int(predict_type), ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(self.pred_parameter), ctypes.byref(out_num_preds), preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if n_preds != out_num_preds.value: raise ValueError("Wrong length for predict results") return preds, nrow def inner_predict_sparse(csr, start_iteration, num_iteration, predict_type): ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr) ptr_data, type_ptr_data, _ = c_float_array(csr.data) csr_indices = csr.indices.astype(np.int32, copy=False) matrix_type = C_API_MATRIX_TYPE_CSR if type_ptr_indptr == C_API_DTYPE_INT32: out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)() else: out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)() out_ptr_indices = ctypes.POINTER(ctypes.c_int32)() if type_ptr_data == C_API_DTYPE_FLOAT32: out_ptr_data = ctypes.POINTER(ctypes.c_float)() else: out_ptr_data = ctypes.POINTER(ctypes.c_double)() out_shape = np.zeros(2, dtype=np.int64) _safe_call(_LIB.LGBM_BoosterPredictSparseOutput( self.handle, ptr_indptr, ctypes.c_int(type_ptr_indptr), csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csr.indptr)), ctypes.c_int64(len(csr.data)), ctypes.c_int64(csr.shape[1]), ctypes.c_int(predict_type), ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(self.pred_parameter), ctypes.c_int(matrix_type), out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)), ctypes.byref(out_ptr_indptr), ctypes.byref(out_ptr_indices), ctypes.byref(out_ptr_data))) matrices = self.__create_sparse_native(csr, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data, type_ptr_indptr, type_ptr_data, is_csr=True) nrow = len(csr.indptr) - 1 return matrices, nrow if predict_type == C_API_PREDICT_CONTRIB: return inner_predict_sparse(csr, start_iteration, num_iteration, predict_type) nrow = len(csr.indptr) - 1 if nrow > MAX_INT32: sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow] # __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff(sections)] n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum() preds = np.zeros(sum(n_preds), dtype=np.float64) for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip(zip(sections, sections[1:]), zip(n_preds_sections, n_preds_sections[1:])): # avoid memory consumption by arrays concatenation operations inner_predict(csr[start_idx:end_idx], start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred]) return preds, nrow else: return inner_predict(csr, start_iteration, num_iteration, predict_type) def __pred_for_csc(self, csc, start_iteration, num_iteration, predict_type): """Predict for a CSC data.""" def inner_predict_sparse(csc, start_iteration, num_iteration, predict_type): ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr) ptr_data, type_ptr_data, _ = c_float_array(csc.data) csc_indices = csc.indices.astype(np.int32, copy=False) matrix_type = C_API_MATRIX_TYPE_CSC if type_ptr_indptr == C_API_DTYPE_INT32: out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)() else: out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)() out_ptr_indices = ctypes.POINTER(ctypes.c_int32)() if type_ptr_data == C_API_DTYPE_FLOAT32: out_ptr_data = ctypes.POINTER(ctypes.c_float)() else: out_ptr_data = ctypes.POINTER(ctypes.c_double)() out_shape = np.zeros(2, dtype=np.int64) _safe_call(_LIB.LGBM_BoosterPredictSparseOutput( self.handle, ptr_indptr, ctypes.c_int(type_ptr_indptr), csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csc.indptr)), ctypes.c_int64(len(csc.data)), ctypes.c_int64(csc.shape[0]), ctypes.c_int(predict_type), ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(self.pred_parameter), ctypes.c_int(matrix_type), out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)), ctypes.byref(out_ptr_indptr), ctypes.byref(out_ptr_indices), ctypes.byref(out_ptr_data))) matrices = self.__create_sparse_native(csc, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data, type_ptr_indptr, type_ptr_data, is_csr=False) nrow = csc.shape[0] return matrices, nrow nrow = csc.shape[0] if nrow > MAX_INT32: return self.__pred_for_csr(csc.tocsr(), start_iteration, num_iteration, predict_type) if predict_type == C_API_PREDICT_CONTRIB: return inner_predict_sparse(csc, start_iteration, num_iteration, predict_type) n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type) preds = np.zeros(n_preds, dtype=np.float64) out_num_preds = ctypes.c_int64(0) ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr) ptr_data, type_ptr_data, _ = c_float_array(csc.data) assert csc.shape[0] <= MAX_INT32 csc_indices = csc.indices.astype(np.int32, copy=False) _safe_call(_LIB.LGBM_BoosterPredictForCSC( self.handle, ptr_indptr, ctypes.c_int(type_ptr_indptr), csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csc.indptr)), ctypes.c_int64(len(csc.data)), ctypes.c_int64(csc.shape[0]), ctypes.c_int(predict_type), ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(self.pred_parameter), ctypes.byref(out_num_preds), preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if n_preds != out_num_preds.value: raise ValueError("Wrong length for predict results") return preds, nrow def current_iteration(self): """Get the index of the current iteration. Returns ------- cur_iter : int The index of the current iteration. """ out_cur_iter = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetCurrentIteration( self.handle, ctypes.byref(out_cur_iter))) return out_cur_iter.value class Dataset: """Dataset in LightGBM.""" def __init__(self, data, label=None, reference=None, weight=None, group=None, init_score=None, silent=False, feature_name='auto', categorical_feature='auto', params=None, free_raw_data=True): """Initialize Dataset. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays Data source of Dataset. If string, it represents the path to txt file. label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None) Label of the data. reference : Dataset or None, optional (default=None) If this is Dataset for validation, training data should be used as reference. weight : list, numpy 1-D array, pandas Series or None, optional (default=None) Weight for each instance. group : list, numpy 1-D array, pandas Series or None, optional (default=None) Group/query data. Only used in the learning-to-rank task. sum(group) = n_samples. For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups, where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc. init_score : list, numpy 1-D array, pandas Series or None, optional (default=None) Init score for Dataset. silent : bool, optional (default=False) Whether to print messages during construction. feature_name : list of strings or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of strings or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of strings, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. The output cannot be monotonically constrained with respect to a categorical feature. params : dict or None, optional (default=None) Other parameters for Dataset. free_raw_data : bool, optional (default=True) If True, raw data is freed after constructing inner Dataset. """ self.handle = None self.data = data self.label = label self.reference = reference self.weight = weight self.group = group self.init_score = init_score self.silent = silent self.feature_name = feature_name self.categorical_feature = categorical_feature self.params = deepcopy(params) self.free_raw_data = free_raw_data self.used_indices = None self.need_slice = True self._predictor = None self.pandas_categorical = None self.params_back_up = None self.feature_penalty = None self.monotone_constraints = None self.version = 0 def __del__(self): try: self._free_handle() except AttributeError: pass def get_params(self): """Get the used parameters in the Dataset. Returns ------- params : dict or None The used parameters in this Dataset object. """ if self.params is not None: # no min_data, nthreads and verbose in this function dataset_params = _ConfigAliases.get("bin_construct_sample_cnt", "categorical_feature", "data_random_seed", "enable_bundle", "feature_pre_filter", "forcedbins_filename", "group_column", "header", "ignore_column", "is_enable_sparse", "label_column", "linear_tree", "max_bin", "max_bin_by_feature", "min_data_in_bin", "pre_partition", "two_round", "use_missing", "weight_column", "zero_as_missing") return {k: v for k, v in self.params.items() if k in dataset_params} def _free_handle(self): if self.handle is not None: _safe_call(_LIB.LGBM_DatasetFree(self.handle)) self.handle = None self.need_slice = True if self.used_indices is not None: self.data = None return self def _set_init_score_by_predictor(self, predictor, data, used_indices=None): data_has_header = False if isinstance(data, str): # check data has header or not data_has_header = any(self.params.get(alias, False) for alias in _ConfigAliases.get("header")) num_data = self.num_data() if predictor is not None: init_score = predictor.predict(data, raw_score=True, data_has_header=data_has_header, is_reshape=False) if used_indices is not None: assert not self.need_slice if isinstance(data, str): sub_init_score = np.zeros(num_data * predictor.num_class, dtype=np.float32) assert num_data == len(used_indices) for i in range(len(used_indices)): for j in range(predictor.num_class): sub_init_score[i * predictor.num_class + j] = init_score[used_indices[i] * predictor.num_class + j] init_score = sub_init_score if predictor.num_class > 1: # need to regroup init_score new_init_score = np.zeros(init_score.size, dtype=np.float32) for i in range(num_data): for j in range(predictor.num_class): new_init_score[j * num_data + i] = init_score[i * predictor.num_class + j] init_score = new_init_score elif self.init_score is not None: init_score = np.zeros(self.init_score.shape, dtype=np.float32) else: return self self.set_init_score(init_score) def _lazy_init(self, data, label=None, reference=None, weight=None, group=None, init_score=None, predictor=None, silent=False, feature_name='auto', categorical_feature='auto', params=None): if data is None: self.handle = None return self if reference is not None: self.pandas_categorical = reference.pandas_categorical categorical_feature = reference.categorical_feature data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data, feature_name, categorical_feature, self.pandas_categorical) label = _label_from_pandas(label) # process for args params = {} if params is None else params args_names = (getattr(self.__class__, '_lazy_init') .__code__ .co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount]) for key, _ in params.items(): if key in args_names: _log_warning(f'{key} keyword has been found in `params` and will be ignored.\n' f'Please use {key} argument of the Dataset constructor to pass this parameter.') # user can set verbose with params, it has higher priority if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent: params["verbose"] = -1 # get categorical features if categorical_feature is not None: categorical_indices = set() feature_dict = {} if feature_name is not None: feature_dict = {name: i for i, name in enumerate(feature_name)} for name in categorical_feature: if isinstance(name, str) and name in feature_dict: categorical_indices.add(feature_dict[name]) elif isinstance(name, int): categorical_indices.add(name) else: raise TypeError(f"Wrong type({type(name).__name__}) or unknown name({name}) in categorical_feature") if categorical_indices: for cat_alias in _ConfigAliases.get("categorical_feature"): if cat_alias in params: _log_warning(f'{cat_alias} in param dict is overridden.') params.pop(cat_alias, None) params['categorical_column'] = sorted(categorical_indices) params_str = param_dict_to_str(params) self.params = params # process for reference dataset ref_dataset = None if isinstance(reference, Dataset): ref_dataset = reference.construct().handle elif reference is not None: raise TypeError('Reference dataset should be None or dataset instance') # start construct data if isinstance(data, str): self.handle = ctypes.c_void_p() _safe_call(_LIB.LGBM_DatasetCreateFromFile( c_str(data), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) elif isinstance(data, scipy.sparse.csr_matrix): self.__init_from_csr(data, params_str, ref_dataset) elif isinstance(data, scipy.sparse.csc_matrix): self.__init_from_csc(data, params_str, ref_dataset) elif isinstance(data, np.ndarray): self.__init_from_np2d(data, params_str, ref_dataset) elif isinstance(data, list) and len(data) > 0 and all(isinstance(x, np.ndarray) for x in data): self.__init_from_list_np2d(data, params_str, ref_dataset) elif isinstance(data, dt_DataTable): self.__init_from_np2d(data.to_numpy(), params_str, ref_dataset) else: try: csr = scipy.sparse.csr_matrix(data) self.__init_from_csr(csr, params_str, ref_dataset) except BaseException: raise TypeError(f'Cannot initialize Dataset from {type(data).__name__}') if label is not None: self.set_label(label) if self.get_label() is None: raise ValueError("Label should not be None") if weight is not None: self.set_weight(weight) if group is not None: self.set_group(group) if isinstance(predictor, _InnerPredictor): if self._predictor is None and init_score is not None: _log_warning("The init_score will be overridden by the prediction of init_model.") self._set_init_score_by_predictor(predictor, data) elif init_score is not None: self.set_init_score(init_score) elif predictor is not None: raise TypeError(f'Wrong predictor type {type(predictor).__name__}') # set feature names return self.set_feature_name(feature_name) def __init_from_np2d(self, mat, params_str, ref_dataset): """Initialize data from a 2-D numpy matrix.""" if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray must be 2 dimensional') self.handle = ctypes.c_void_p() if mat.dtype == np.float32 or mat.dtype == np.float64: data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False) else: # change non-float data to float data, need to copy data = np.array(mat.reshape(mat.size), dtype=np.float32) ptr_data, type_ptr_data, _ = c_float_array(data) _safe_call(_LIB.LGBM_DatasetCreateFromMat( ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int32(mat.shape[0]), ctypes.c_int32(mat.shape[1]), ctypes.c_int(C_API_IS_ROW_MAJOR), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) return self def __init_from_list_np2d(self, mats, params_str, ref_dataset): """Initialize data from a list of 2-D numpy matrices.""" ncol = mats[0].shape[1] nrow = np.zeros((len(mats),), np.int32) if mats[0].dtype == np.float64: ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))() else: ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))() holders = [] type_ptr_data = None for i, mat in enumerate(mats): if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray must be 2 dimensional') if mat.shape[1] != ncol: raise ValueError('Input arrays must have same number of columns') nrow[i] = mat.shape[0] if mat.dtype == np.float32 or mat.dtype == np.float64: mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False) else: # change non-float data to float data, need to copy mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32) chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i]) if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data: raise ValueError('Input chunks must have same type') ptr_data[i] = chunk_ptr_data type_ptr_data = chunk_type_ptr_data holders.append(holder) self.handle = ctypes.c_void_p() _safe_call(_LIB.LGBM_DatasetCreateFromMats( ctypes.c_int32(len(mats)), ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))), ctypes.c_int(type_ptr_data), nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ctypes.c_int32(ncol), ctypes.c_int(C_API_IS_ROW_MAJOR), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) return self def __init_from_csr(self, csr, params_str, ref_dataset): """Initialize data from a CSR matrix.""" if len(csr.indices) != len(csr.data): raise ValueError(f'Length mismatch: {len(csr.indices)} vs {len(csr.data)}') self.handle = ctypes.c_void_p() ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr) ptr_data, type_ptr_data, _ = c_float_array(csr.data) assert csr.shape[1] <= MAX_INT32 csr_indices = csr.indices.astype(np.int32, copy=False) _safe_call(_LIB.LGBM_DatasetCreateFromCSR( ptr_indptr, ctypes.c_int(type_ptr_indptr), csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csr.indptr)), ctypes.c_int64(len(csr.data)), ctypes.c_int64(csr.shape[1]), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) return self def __init_from_csc(self, csc, params_str, ref_dataset): """Initialize data from a CSC matrix.""" if len(csc.indices) != len(csc.data): raise ValueError(f'Length mismatch: {len(csc.indices)} vs {len(csc.data)}') self.handle = ctypes.c_void_p() ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr) ptr_data, type_ptr_data, _ = c_float_array(csc.data) assert csc.shape[0] <= MAX_INT32 csc_indices = csc.indices.astype(np.int32, copy=False) _safe_call(_LIB.LGBM_DatasetCreateFromCSC( ptr_indptr, ctypes.c_int(type_ptr_indptr), csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csc.indptr)), ctypes.c_int64(len(csc.data)), ctypes.c_int64(csc.shape[0]), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) return self def construct(self): """Lazy init. Returns ------- self : Dataset Constructed Dataset object. """ if self.handle is None: if self.reference is not None: reference_params = self.reference.get_params() if self.get_params() != reference_params: _log_warning('Overriding the parameters from Reference Dataset.') self._update_params(reference_params) if self.used_indices is None: # create valid self._lazy_init(self.data, label=self.label, reference=self.reference, weight=self.weight, group=self.group, init_score=self.init_score, predictor=self._predictor, silent=self.silent, feature_name=self.feature_name, params=self.params) else: # construct subset used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices') assert used_indices.flags.c_contiguous if self.reference.group is not None: group_info = np.array(self.reference.group).astype(np.int32, copy=False) _, self.group = np.unique(np.repeat(range(len(group_info)), repeats=group_info)[self.used_indices], return_counts=True) self.handle = ctypes.c_void_p() params_str = param_dict_to_str(self.params) _safe_call(_LIB.LGBM_DatasetGetSubset( self.reference.construct().handle, used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ctypes.c_int32(used_indices.shape[0]), c_str(params_str), ctypes.byref(self.handle))) if not self.free_raw_data: self.get_data() if self.group is not None: self.set_group(self.group) if self.get_label() is None: raise ValueError("Label should not be None.") if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor: self.get_data() self._set_init_score_by_predictor(self._predictor, self.data, used_indices) else: # create train self._lazy_init(self.data, label=self.label, weight=self.weight, group=self.group, init_score=self.init_score, predictor=self._predictor, silent=self.silent, feature_name=self.feature_name, categorical_feature=self.categorical_feature, params=self.params) if self.free_raw_data: self.data = None return self def create_valid(self, data, label=None, weight=None, group=None, init_score=None, silent=False, params=None): """Create validation data align with current Dataset. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays Data source of Dataset. If string, it represents the path to txt file. label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None) Label of the data. weight : list, numpy 1-D array, pandas Series or None, optional (default=None) Weight for each instance. group : list, numpy 1-D array, pandas Series or None, optional (default=None) Group/query data. Only used in the learning-to-rank task. sum(group) = n_samples. For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups, where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc. init_score : list, numpy 1-D array, pandas Series or None, optional (default=None) Init score for Dataset. silent : bool, optional (default=False) Whether to print messages during construction. params : dict or None, optional (default=None) Other parameters for validation Dataset. Returns ------- valid : Dataset Validation Dataset with reference to self. """ ret = Dataset(data, label=label, reference=self, weight=weight, group=group, init_score=init_score, silent=silent, params=params, free_raw_data=self.free_raw_data) ret._predictor = self._predictor ret.pandas_categorical = self.pandas_categorical return ret def subset(self, used_indices, params=None): """Get subset of current Dataset. Parameters ---------- used_indices : list of int Indices used to create the subset. params : dict or None, optional (default=None) These parameters will be passed to Dataset constructor. Returns ------- subset : Dataset Subset of the current Dataset. """ if params is None: params = self.params ret = Dataset(None, reference=self, feature_name=self.feature_name, categorical_feature=self.categorical_feature, params=params, free_raw_data=self.free_raw_data) ret._predictor = self._predictor ret.pandas_categorical = self.pandas_categorical ret.used_indices = sorted(used_indices) return ret def save_binary(self, filename): """Save Dataset to a binary file. .. note:: Please note that `init_score` is not saved in binary file. If you need it, please set it again after loading Dataset. Parameters ---------- filename : string Name of the output file. Returns ------- self : Dataset Returns self. """ _safe_call(_LIB.LGBM_DatasetSaveBinary( self.construct().handle, c_str(filename))) return self def _update_params(self, params): if not params: return self params = deepcopy(params) def update(): if not self.params: self.params = params else: self.params_back_up = deepcopy(self.params) self.params.update(params) if self.handle is None: update() elif params is not None: ret = _LIB.LGBM_DatasetUpdateParamChecking( c_str(param_dict_to_str(self.params)), c_str(param_dict_to_str(params))) if ret != 0: # could be updated if data is not freed if self.data is not None: update() self._free_handle() else: raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8')) return self def _reverse_update_params(self): if self.handle is None: self.params = deepcopy(self.params_back_up) self.params_back_up = None return self def set_field(self, field_name, data): """Set property into the Dataset. Parameters ---------- field_name : string The field name of the information. data : list, numpy 1-D array, pandas Series or None The array of data to be set. Returns ------- self : Dataset Dataset with set property. """ if self.handle is None: raise Exception(f"Cannot set {field_name} before construct dataset") if data is None: # set to None _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), None, ctypes.c_int(0), ctypes.c_int(FIELD_TYPE_MAPPER[field_name]))) return self dtype = np.float32 if field_name == 'group': dtype = np.int32 elif field_name == 'init_score': dtype = np.float64 data = list_to_1d_numpy(data, dtype, name=field_name) if data.dtype == np.float32 or data.dtype == np.float64: ptr_data, type_data, _ = c_float_array(data) elif data.dtype == np.int32: ptr_data, type_data, _ = c_int_array(data) else: raise TypeError(f"Expected np.float32/64 or np.int32, met type({data.dtype})") if type_data != FIELD_TYPE_MAPPER[field_name]: raise TypeError("Input type error for set_field") _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), ptr_data, ctypes.c_int(len(data)), ctypes.c_int(type_data))) self.version += 1 return self def get_field(self, field_name): """Get property from the Dataset. Parameters ---------- field_name : string The field name of the information. Returns ------- info : numpy array A numpy array with information from the Dataset. """ if self.handle is None: raise Exception(f"Cannot get {field_name} before construct Dataset") tmp_out_len = ctypes.c_int(0) out_type = ctypes.c_int(0) ret = ctypes.POINTER(ctypes.c_void_p)() _safe_call(_LIB.LGBM_DatasetGetField( self.handle, c_str(field_name), ctypes.byref(tmp_out_len), ctypes.byref(ret), ctypes.byref(out_type))) if out_type.value != FIELD_TYPE_MAPPER[field_name]: raise TypeError("Return type error for get_field") if tmp_out_len.value == 0: return None if out_type.value == C_API_DTYPE_INT32: return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value) elif out_type.value == C_API_DTYPE_FLOAT32: return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value) elif out_type.value == C_API_DTYPE_FLOAT64: return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value) else: raise TypeError("Unknown type") def set_categorical_feature(self, categorical_feature): """Set categorical features. Parameters ---------- categorical_feature : list of int or strings Names or indices of categorical features. Returns ------- self : Dataset Dataset with set categorical features. """ if self.categorical_feature == categorical_feature: return self if self.data is not None: if self.categorical_feature is None: self.categorical_feature = categorical_feature return self._free_handle() elif categorical_feature == 'auto': _log_warning('Using categorical_feature in Dataset.') return self else: _log_warning('categorical_feature in Dataset is overridden.\n' f'New categorical_feature is {sorted(list(categorical_feature))}') self.categorical_feature = categorical_feature return self._free_handle() else: raise LightGBMError("Cannot set categorical feature after freed raw data, " "set free_raw_data=False when construct Dataset to avoid this.") def _set_predictor(self, predictor): """Set predictor for continued training. It is not recommended for user to call this function. Please use init_model argument in engine.train() or engine.cv() instead. """ if predictor is self._predictor and (predictor is None or predictor.current_iteration() == self._predictor.current_iteration()): return self if self.handle is None: self._predictor = predictor elif self.data is not None: self._predictor = predictor self._set_init_score_by_predictor(self._predictor, self.data) elif self.used_indices is not None and self.reference is not None and self.reference.data is not None: self._predictor = predictor self._set_init_score_by_predictor(self._predictor, self.reference.data, self.used_indices) else: raise LightGBMError("Cannot set predictor after freed raw data, " "set free_raw_data=False when construct Dataset to avoid this.") return self def set_reference(self, reference): """Set reference Dataset. Parameters ---------- reference : Dataset Reference that is used as a template to construct the current Dataset. Returns ------- self : Dataset Dataset with set reference. """ self.set_categorical_feature(reference.categorical_feature) \ .set_feature_name(reference.feature_name) \ ._set_predictor(reference._predictor) # we're done if self and reference share a common upstrem reference if self.get_ref_chain().intersection(reference.get_ref_chain()): return self if self.data is not None: self.reference = reference return self._free_handle() else: raise LightGBMError("Cannot set reference after freed raw data, " "set free_raw_data=False when construct Dataset to avoid this.") def set_feature_name(self, feature_name): """Set feature name. Parameters ---------- feature_name : list of strings Feature names. Returns ------- self : Dataset Dataset with set feature name. """ if feature_name != 'auto': self.feature_name = feature_name if self.handle is not None and feature_name is not None and feature_name != 'auto': if len(feature_name) != self.num_feature(): raise ValueError(f"Length of feature_name({len(feature_name)}) and num_feature({self.num_feature()}) don't match") c_feature_name = [c_str(name) for name in feature_name] _safe_call(_LIB.LGBM_DatasetSetFeatureNames( self.handle, c_array(ctypes.c_char_p, c_feature_name), ctypes.c_int(len(feature_name)))) return self def set_label(self, label): """Set label of Dataset. Parameters ---------- label : list, numpy 1-D array, pandas Series / one-column DataFrame or None The label information to be set into Dataset. Returns ------- self : Dataset Dataset with set label. """ self.label = label if self.handle is not None: label = list_to_1d_numpy(_label_from_pandas(label), name='label') self.set_field('label', label) self.label = self.get_field('label') # original values can be modified at cpp side return self def set_weight(self, weight): """Set weight of each instance. Parameters ---------- weight : list, numpy 1-D array, pandas Series or None Weight to be set for each data point. Returns ------- self : Dataset Dataset with set weight. """ if weight is not None and np.all(weight == 1): weight = None self.weight = weight if self.handle is not None and weight is not None: weight = list_to_1d_numpy(weight, name='weight') self.set_field('weight', weight) self.weight = self.get_field('weight') # original values can be modified at cpp side return self def set_init_score(self, init_score): """Set init score of Booster to start from. Parameters ---------- init_score : list, numpy 1-D array, pandas Series or None Init score for Booster. Returns ------- self : Dataset Dataset with set init score. """ self.init_score = init_score if self.handle is not None and init_score is not None: init_score = list_to_1d_numpy(init_score, np.float64, name='init_score') self.set_field('init_score', init_score) self.init_score = self.get_field('init_score') # original values can be modified at cpp side return self def set_group(self, group): """Set group size of Dataset (used for ranking). Parameters ---------- group : list, numpy 1-D array, pandas Series or None Group/query data. Only used in the learning-to-rank task. sum(group) = n_samples. For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups, where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc. Returns ------- self : Dataset Dataset with set group. """ self.group = group if self.handle is not None and group is not None: group = list_to_1d_numpy(group, np.int32, name='group') self.set_field('group', group) return self def get_feature_name(self): """Get the names of columns (features) in the Dataset. Returns ------- feature_names : list The names of columns (features) in the Dataset. """ if self.handle is None: raise LightGBMError("Cannot get feature_name before construct dataset") num_feature = self.num_feature() tmp_out_len = ctypes.c_int(0) reserved_string_buffer_size = 255 required_string_buffer_size = ctypes.c_size_t(0) string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)] ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_DatasetGetFeatureNames( self.handle, ctypes.c_int(num_feature), ctypes.byref(tmp_out_len), ctypes.c_size_t(reserved_string_buffer_size), ctypes.byref(required_string_buffer_size), ptr_string_buffers)) if num_feature != tmp_out_len.value: raise ValueError("Length of feature names doesn't equal with num_feature") actual_string_buffer_size = required_string_buffer_size.value # if buffer length is not long enough, reallocate buffers if reserved_string_buffer_size < actual_string_buffer_size: string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)] ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_DatasetGetFeatureNames( self.handle, ctypes.c_int(num_feature), ctypes.byref(tmp_out_len), ctypes.c_size_t(actual_string_buffer_size), ctypes.byref(required_string_buffer_size), ptr_string_buffers)) return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)] def get_label(self): """Get the label of the Dataset. Returns ------- label : numpy array or None The label information from the Dataset. """ if self.label is None: self.label = self.get_field('label') return self.label def get_weight(self): """Get the weight of the Dataset. Returns ------- weight : numpy array or None Weight for each data point from the Dataset. """ if self.weight is None: self.weight = self.get_field('weight') return self.weight def get_init_score(self): """Get the initial score of the Dataset. Returns ------- init_score : numpy array or None Init score of Booster. """ if self.init_score is None: self.init_score = self.get_field('init_score') return self.init_score def get_data(self): """Get the raw data of the Dataset. Returns ------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None Raw data used in the Dataset construction. """ if self.handle is None: raise Exception("Cannot get data before construct Dataset") if self.need_slice and self.used_indices is not None and self.reference is not None: self.data = self.reference.data if self.data is not None: if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data): self.data = self.data[self.used_indices, :] elif isinstance(self.data, pd_DataFrame): self.data = self.data.iloc[self.used_indices].copy() elif isinstance(self.data, dt_DataTable): self.data = self.data[self.used_indices, :] else: _log_warning(f"Cannot subset {type(self.data).__name__} type of raw data.\n" "Returning original raw data") self.need_slice = False if self.data is None: raise LightGBMError("Cannot call `get_data` after freed raw data, " "set free_raw_data=False when construct Dataset to avoid this.") return self.data def get_group(self): """Get the group of the Dataset. Returns ------- group : numpy array or None Group/query data. Only used in the learning-to-rank task. sum(group) = n_samples. For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups, where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc. """ if self.group is None: self.group = self.get_field('group') if self.group is not None: # group data from LightGBM is boundaries data, need to convert to group size self.group = np.diff(self.group) return self.group def num_data(self): """Get the number of rows in the Dataset. Returns ------- number_of_rows : int The number of rows in the Dataset. """ if self.handle is not None: ret = ctypes.c_int(0) _safe_call(_LIB.LGBM_DatasetGetNumData(self.handle, ctypes.byref(ret))) return ret.value else: raise LightGBMError("Cannot get num_data before construct dataset") def num_feature(self): """Get the number of columns (features) in the Dataset. Returns ------- number_of_columns : int The number of columns (features) in the Dataset. """ if self.handle is not None: ret = ctypes.c_int(0) _safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle, ctypes.byref(ret))) return ret.value else: raise LightGBMError("Cannot get num_feature before construct dataset") def get_ref_chain(self, ref_limit=100): """Get a chain of Dataset objects. Starts with r, then goes to r.reference (if exists), then to r.reference.reference, etc. until we hit ``ref_limit`` or a reference loop. Parameters ---------- ref_limit : int, optional (default=100) The limit number of references. Returns ------- ref_chain : set of Dataset Chain of references of the Datasets. """ head = self ref_chain = set() while len(ref_chain) < ref_limit: if isinstance(head, Dataset): ref_chain.add(head) if (head.reference is not None) and (head.reference not in ref_chain): head = head.reference else: break else: break return ref_chain def add_features_from(self, other): """Add features from other Dataset to the current Dataset. Both Datasets must be constructed before calling this method. Parameters ---------- other : Dataset The Dataset to take features from. Returns ------- self : Dataset Dataset with the new features added. """ if self.handle is None or other.handle is None: raise ValueError('Both source and target Datasets must be constructed before adding features') _safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle)) was_none = self.data is None old_self_data_type = type(self.data).__name__ if other.data is None: self.data = None elif self.data is not None: if isinstance(self.data, np.ndarray): if isinstance(other.data, np.ndarray): self.data = np.hstack((self.data, other.data)) elif scipy.sparse.issparse(other.data): self.data = np.hstack((self.data, other.data.toarray())) elif isinstance(other.data, pd_DataFrame): self.data = np.hstack((self.data, other.data.values)) elif isinstance(other.data, dt_DataTable): self.data = np.hstack((self.data, other.data.to_numpy())) else: self.data = None elif scipy.sparse.issparse(self.data): sparse_format = self.data.getformat() if isinstance(other.data, np.ndarray) or scipy.sparse.issparse(other.data): self.data = scipy.sparse.hstack((self.data, other.data), format=sparse_format) elif isinstance(other.data, pd_DataFrame): self.data = scipy.sparse.hstack((self.data, other.data.values), format=sparse_format) elif isinstance(other.data, dt_DataTable): self.data = scipy.sparse.hstack((self.data, other.data.to_numpy()), format=sparse_format) else: self.data = None elif isinstance(self.data, pd_DataFrame): if not PANDAS_INSTALLED: raise LightGBMError("Cannot add features to DataFrame type of raw data " "without pandas installed. " "Install pandas and restart your session.") if isinstance(other.data, np.ndarray): self.data = concat((self.data, pd_DataFrame(other.data)), axis=1, ignore_index=True) elif scipy.sparse.issparse(other.data): self.data = concat((self.data, pd_DataFrame(other.data.toarray())), axis=1, ignore_index=True) elif isinstance(other.data, pd_DataFrame): self.data = concat((self.data, other.data), axis=1, ignore_index=True) elif isinstance(other.data, dt_DataTable): self.data = concat((self.data, pd_DataFrame(other.data.to_numpy())), axis=1, ignore_index=True) else: self.data = None elif isinstance(self.data, dt_DataTable): if isinstance(other.data, np.ndarray): self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data))) elif scipy.sparse.issparse(other.data): self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.toarray()))) elif isinstance(other.data, pd_DataFrame): self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.values))) elif isinstance(other.data, dt_DataTable): self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.to_numpy()))) else: self.data = None else: self.data = None if self.data is None: err_msg = (f"Cannot add features from {type(other.data).__name__} type of raw data to " f"{old_self_data_type} type of raw data.\n") err_msg += ("Set free_raw_data=False when construct Dataset to avoid this" if was_none else "Freeing raw data") _log_warning(err_msg) self.feature_name = self.get_feature_name() _log_warning("Reseting categorical features.\n" "You can set new categorical features via ``set_categorical_feature`` method") self.categorical_feature = "auto" self.pandas_categorical = None return self def _dump_text(self, filename): """Save Dataset to a text file. This format cannot be loaded back in by LightGBM, but is useful for debugging purposes. Parameters ---------- filename : string Name of the output file. Returns ------- self : Dataset Returns self. """ _safe_call(_LIB.LGBM_DatasetDumpText( self.construct().handle, c_str(filename))) return self class Booster: """Booster in LightGBM.""" def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent=False): """Initialize the Booster. Parameters ---------- params : dict or None, optional (default=None) Parameters for Booster. train_set : Dataset or None, optional (default=None) Training dataset. model_file : string or None, optional (default=None) Path to the model file. model_str : string or None, optional (default=None) Model will be loaded from this string. silent : bool, optional (default=False) Whether to print messages during construction. """ self.handle = None self.network = False self.__need_reload_eval_info = True self._train_data_name = "training" self.__attr = {} self.__set_objective_to_none = False self.best_iteration = -1 self.best_score = {} params = {} if params is None else deepcopy(params) # user can set verbose with params, it has higher priority if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent: params["verbose"] = -1 if train_set is not None: # Training task if not isinstance(train_set, Dataset): raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}') params = _choose_param_value( main_param_name="machines", params=params, default_value=None ) # if "machines" is given, assume user wants to do distributed learning, and set up network if params["machines"] is None: params.pop("machines", None) else: machines = params["machines"] if isinstance(machines, str): num_machines_from_machine_list = len(machines.split(',')) elif isinstance(machines, (list, set)): num_machines_from_machine_list = len(machines) machines = ','.join(machines) else: raise ValueError("Invalid machines in params.") params = _choose_param_value( main_param_name="num_machines", params=params, default_value=num_machines_from_machine_list ) params = _choose_param_value( main_param_name="local_listen_port", params=params, default_value=12400 ) self.set_network( machines=machines, local_listen_port=params["local_listen_port"], listen_time_out=params.get("time_out", 120), num_machines=params["num_machines"] ) # construct booster object train_set.construct() # copy the parameters from train_set params.update(train_set.get_params()) params_str = param_dict_to_str(params) self.handle = ctypes.c_void_p() _safe_call(_LIB.LGBM_BoosterCreate( train_set.handle, c_str(params_str), ctypes.byref(self.handle))) # save reference to data self.train_set = train_set self.valid_sets = [] self.name_valid_sets = [] self.__num_dataset = 1 self.__init_predictor = train_set._predictor if self.__init_predictor is not None: _safe_call(_LIB.LGBM_BoosterMerge( self.handle, self.__init_predictor.handle)) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) self.__num_class = out_num_class.value # buffer for inner predict self.__inner_predict_buffer = [None] self.__is_predicted_cur_iter = [False] self.__get_eval_info() self.pandas_categorical = train_set.pandas_categorical self.train_set_version = train_set.version elif model_file is not None: # Prediction task out_num_iterations = ctypes.c_int(0) self.handle = ctypes.c_void_p() _safe_call(_LIB.LGBM_BoosterCreateFromModelfile( c_str(model_file), ctypes.byref(out_num_iterations), ctypes.byref(self.handle))) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) self.__num_class = out_num_class.value self.pandas_categorical = _load_pandas_categorical(file_name=model_file) elif model_str is not None: self.model_from_string(model_str, not silent) else: raise TypeError('Need at least one training dataset or model file or model string ' 'to create Booster instance') self.params = params def __del__(self): try: if self.network: self.free_network() except AttributeError: pass try: if self.handle is not None: _safe_call(_LIB.LGBM_BoosterFree(self.handle)) except AttributeError: pass def __copy__(self): return self.__deepcopy__(None) def __deepcopy__(self, _): model_str = self.model_to_string(num_iteration=-1) booster = Booster(model_str=model_str) return booster def __getstate__(self): this = self.__dict__.copy() handle = this['handle'] this.pop('train_set', None) this.pop('valid_sets', None) if handle is not None: this["handle"] = self.model_to_string(num_iteration=-1) return this def __setstate__(self, state): model_str = state.get('handle', None) if model_str is not None: handle = ctypes.c_void_p() out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterLoadModelFromString( c_str(model_str), ctypes.byref(out_num_iterations), ctypes.byref(handle))) state['handle'] = handle self.__dict__.update(state) def free_dataset(self): """Free Booster's Datasets. Returns ------- self : Booster Booster without Datasets. """ self.__dict__.pop('train_set', None) self.__dict__.pop('valid_sets', None) self.__num_dataset = 0 return self def _free_buffer(self): self.__inner_predict_buffer = [] self.__is_predicted_cur_iter = [] return self def set_network( self, machines: Union[List[str], Set[str], str], local_listen_port: int = 12400, listen_time_out: int = 120, num_machines: int = 1 ) -> "Booster": """Set the network configuration. Parameters ---------- machines : list, set or string Names of machines. local_listen_port : int, optional (default=12400) TCP listen port for local machines. listen_time_out : int, optional (default=120) Socket time-out in minutes. num_machines : int, optional (default=1) The number of machines for distributed learning application. Returns ------- self : Booster Booster with set network. """ if isinstance(machines, (list, set)): machines = ','.join(machines) _safe_call(_LIB.LGBM_NetworkInit(c_str(machines), ctypes.c_int(local_listen_port), ctypes.c_int(listen_time_out), ctypes.c_int(num_machines))) self.network = True return self def free_network(self): """Free Booster's network. Returns ------- self : Booster Booster with freed network. """ _safe_call(_LIB.LGBM_NetworkFree()) self.network = False return self def trees_to_dataframe(self): """Parse the fitted model and return in an easy-to-read pandas DataFrame. The returned DataFrame has the following columns. - ``tree_index`` : int64, which tree a node belongs to. 0-based, so a value of ``6``, for example, means "this node is in the 7th tree". - ``node_depth`` : int64, how far a node is from the root of the tree. The root node has a value of ``1``, its direct children are ``2``, etc. - ``node_index`` : string, unique identifier for a node. - ``left_child`` : string, ``node_index`` of the child node to the left of a split. ``None`` for leaf nodes. - ``right_child`` : string, ``node_index`` of the child node to the right of a split. ``None`` for leaf nodes. - ``parent_index`` : string, ``node_index`` of this node's parent. ``None`` for the root node. - ``split_feature`` : string, name of the feature used for splitting. ``None`` for leaf nodes. - ``split_gain`` : float64, gain from adding this split to the tree. ``NaN`` for leaf nodes. - ``threshold`` : float64, value of the feature used to decide which side of the split a record will go down. ``NaN`` for leaf nodes. - ``decision_type`` : string, logical operator describing how to compare a value to ``threshold``. For example, ``split_feature = "Column_10", threshold = 15, decision_type = "<="`` means that records where ``Column_10 <= 15`` follow the left side of the split, otherwise follows the right side of the split. ``None`` for leaf nodes. - ``missing_direction`` : string, split direction that missing values should go to. ``None`` for leaf nodes. - ``missing_type`` : string, describes what types of values are treated as missing. - ``value`` : float64, predicted value for this leaf node, multiplied by the learning rate. - ``weight`` : float64 or int64, sum of hessian (second-order derivative of objective), summed over observations that fall in this node. - ``count`` : int64, number of records in the training data that fall into this node. Returns ------- result : pandas DataFrame Returns a pandas DataFrame of the parsed model. """ if not PANDAS_INSTALLED: raise LightGBMError('This method cannot be run without pandas installed. ' 'You must install pandas and restart your session to use this method.') if self.num_trees() == 0: raise LightGBMError('There are no trees in this Booster and thus nothing to parse') def _is_split_node(tree): return 'split_index' in tree.keys() def create_node_record(tree, node_depth=1, tree_index=None, feature_names=None, parent_node=None): def _get_node_index(tree, tree_index): tree_num = f'{tree_index}-' if tree_index is not None else '' is_split = _is_split_node(tree) node_type = 'S' if is_split else 'L' # if a single node tree it won't have `leaf_index` so return 0 node_num = tree.get('split_index' if is_split else 'leaf_index', 0) return f"{tree_num}{node_type}{node_num}" def _get_split_feature(tree, feature_names): if _is_split_node(tree): if feature_names is not None: feature_name = feature_names[tree['split_feature']] else: feature_name = tree['split_feature'] else: feature_name = None return feature_name def _is_single_node_tree(tree): return set(tree.keys()) == {'leaf_value'} # Create the node record, and populate universal data members node = OrderedDict() node['tree_index'] = tree_index node['node_depth'] = node_depth node['node_index'] = _get_node_index(tree, tree_index) node['left_child'] = None node['right_child'] = None node['parent_index'] = parent_node node['split_feature'] = _get_split_feature(tree, feature_names) node['split_gain'] = None node['threshold'] = None node['decision_type'] = None node['missing_direction'] = None node['missing_type'] = None node['value'] = None node['weight'] = None node['count'] = None # Update values to reflect node type (leaf or split) if _is_split_node(tree): node['left_child'] = _get_node_index(tree['left_child'], tree_index) node['right_child'] = _get_node_index(tree['right_child'], tree_index) node['split_gain'] = tree['split_gain'] node['threshold'] = tree['threshold'] node['decision_type'] = tree['decision_type'] node['missing_direction'] = 'left' if tree['default_left'] else 'right' node['missing_type'] = tree['missing_type'] node['value'] = tree['internal_value'] node['weight'] = tree['internal_weight'] node['count'] = tree['internal_count'] else: node['value'] = tree['leaf_value'] if not _is_single_node_tree(tree): node['weight'] = tree['leaf_weight'] node['count'] = tree['leaf_count'] return node def tree_dict_to_node_list(tree, node_depth=1, tree_index=None, feature_names=None, parent_node=None): node = create_node_record(tree, node_depth=node_depth, tree_index=tree_index, feature_names=feature_names, parent_node=parent_node) res = [node] if _is_split_node(tree): # traverse the next level of the tree children = ['left_child', 'right_child'] for child in children: subtree_list = tree_dict_to_node_list( tree[child], node_depth=node_depth + 1, tree_index=tree_index, feature_names=feature_names, parent_node=node['node_index']) # In tree format, "subtree_list" is a list of node records (dicts), # and we add node to the list. res.extend(subtree_list) return res model_dict = self.dump_model() feature_names = model_dict['feature_names'] model_list = [] for tree in model_dict['tree_info']: model_list.extend(tree_dict_to_node_list(tree['tree_structure'], tree_index=tree['tree_index'], feature_names=feature_names)) return pd_DataFrame(model_list, columns=model_list[0].keys()) def set_train_data_name(self, name): """Set the name to the training Dataset. Parameters ---------- name : string Name for the training Dataset. Returns ------- self : Booster Booster with set training Dataset name. """ self._train_data_name = name return self def add_valid(self, data, name): """Add validation data. Parameters ---------- data : Dataset Validation data. name : string Name of validation data. Returns ------- self : Booster Booster with set validation data. """ if not isinstance(data, Dataset): raise TypeError(f'Validation data should be Dataset instance, met {type(data).__name__}') if data._predictor is not self.__init_predictor: raise LightGBMError("Add validation data failed, " "you should use same predictor for these data") _safe_call(_LIB.LGBM_BoosterAddValidData( self.handle, data.construct().handle)) self.valid_sets.append(data) self.name_valid_sets.append(name) self.__num_dataset += 1 self.__inner_predict_buffer.append(None) self.__is_predicted_cur_iter.append(False) return self def reset_parameter(self, params): """Reset parameters of Booster. Parameters ---------- params : dict New parameters for Booster. Returns ------- self : Booster Booster with new parameters. """ params_str = param_dict_to_str(params) if params_str: _safe_call(_LIB.LGBM_BoosterResetParameter( self.handle, c_str(params_str))) self.params.update(params) return self def update(self, train_set=None, fobj=None): """Update Booster for one iteration. Parameters ---------- train_set : Dataset or None, optional (default=None) Training data. If None, last training data is used. fobj : callable or None, optional (default=None) Customized objective function. Should accept two parameters: preds, train_data, and return (grad, hess). preds : list or numpy 1-D array The predicted values. Predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task. train_data : Dataset The training dataset. grad : list or numpy 1-D array The value of the first order derivative (gradient) of the loss with respect to the elements of preds for each sample point. hess : list or numpy 1-D array The value of the second order derivative (Hessian) of the loss with respect to the elements of preds for each sample point. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Returns ------- is_finished : bool Whether the update was successfully finished. """ # need reset training data if train_set is None and self.train_set_version != self.train_set.version: train_set = self.train_set is_the_same_train_set = False else: is_the_same_train_set = train_set is self.train_set and self.train_set_version == train_set.version if train_set is not None and not is_the_same_train_set: if not isinstance(train_set, Dataset): raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}') if train_set._predictor is not self.__init_predictor: raise LightGBMError("Replace training data failed, " "you should use same predictor for these data") self.train_set = train_set _safe_call(_LIB.LGBM_BoosterResetTrainingData( self.handle, self.train_set.construct().handle)) self.__inner_predict_buffer[0] = None self.train_set_version = self.train_set.version is_finished = ctypes.c_int(0) if fobj is None: if self.__set_objective_to_none: raise LightGBMError('Cannot update due to null objective function.') _safe_call(_LIB.LGBM_BoosterUpdateOneIter( self.handle, ctypes.byref(is_finished))) self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)] return is_finished.value == 1 else: if not self.__set_objective_to_none: self.reset_parameter({"objective": "none"}).__set_objective_to_none = True grad, hess = fobj(self.__inner_predict(0), self.train_set) return self.__boost(grad, hess) def __boost(self, grad, hess): """Boost Booster for one iteration with customized gradient statistics. .. note:: Score is returned before any transformation, e.g. it is raw margin instead of probability of positive class for binary task. For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Parameters ---------- grad : list or numpy 1-D array The value of the first order derivative (gradient) of the loss with respect to the elements of score for each sample point. hess : list or numpy 1-D array The value of the second order derivative (Hessian) of the loss with respect to the elements of score for each sample point. Returns ------- is_finished : bool Whether the boost was successfully finished. """ grad = list_to_1d_numpy(grad, name='gradient') hess = list_to_1d_numpy(hess, name='hessian') assert grad.flags.c_contiguous assert hess.flags.c_contiguous if len(grad) != len(hess): raise ValueError(f"Lengths of gradient({len(grad)}) and hessian({len(hess)}) don't match") is_finished = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom( self.handle, grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), ctypes.byref(is_finished))) self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)] return is_finished.value == 1 def rollback_one_iter(self): """Rollback one iteration. Returns ------- self : Booster Booster with rolled back one iteration. """ _safe_call(_LIB.LGBM_BoosterRollbackOneIter( self.handle)) self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)] return self def current_iteration(self): """Get the index of the current iteration. Returns ------- cur_iter : int The index of the current iteration. """ out_cur_iter = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetCurrentIteration( self.handle, ctypes.byref(out_cur_iter))) return out_cur_iter.value def num_model_per_iteration(self): """Get number of models per iteration. Returns ------- model_per_iter : int The number of models per iteration. """ model_per_iter = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterNumModelPerIteration( self.handle, ctypes.byref(model_per_iter))) return model_per_iter.value def num_trees(self): """Get number of weak sub-models. Returns ------- num_trees : int The number of weak sub-models. """ num_trees = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterNumberOfTotalModel( self.handle, ctypes.byref(num_trees))) return num_trees.value def upper_bound(self): """Get upper bound value of a model. Returns ------- upper_bound : double Upper bound value of the model. """ ret = ctypes.c_double(0) _safe_call(_LIB.LGBM_BoosterGetUpperBoundValue( self.handle, ctypes.byref(ret))) return ret.value def lower_bound(self): """Get lower bound value of a model. Returns ------- lower_bound : double Lower bound value of the model. """ ret = ctypes.c_double(0) _safe_call(_LIB.LGBM_BoosterGetLowerBoundValue( self.handle, ctypes.byref(ret))) return ret.value def eval(self, data, name, feval=None): """Evaluate for data. Parameters ---------- data : Dataset Data for the evaluating. name : string Name of the data. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, eval_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. preds : list or numpy 1-D array The predicted values. If ``fobj`` is specified, predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task in this case. eval_data : Dataset The evaluation dataset. eval_name : string The name of evaluation function (without whitespace). eval_result : float The eval result. is_higher_better : bool Is eval result higher better, e.g. AUC is ``is_higher_better``. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results. """ if not isinstance(data, Dataset): raise TypeError("Can only eval for Dataset instance") data_idx = -1 if data is self.train_set: data_idx = 0 else: for i in range(len(self.valid_sets)): if data is self.valid_sets[i]: data_idx = i + 1 break # need to push new valid data if data_idx == -1: self.add_valid(data, name) data_idx = self.__num_dataset - 1 return self.__inner_eval(name, data_idx, feval) def eval_train(self, feval=None): """Evaluate for training data. Parameters ---------- feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. preds : list or numpy 1-D array The predicted values. If ``fobj`` is specified, predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task in this case. train_data : Dataset The training dataset. eval_name : string The name of evaluation function (without whitespace). eval_result : float The eval result. is_higher_better : bool Is eval result higher better, e.g. AUC is ``is_higher_better``. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results. """ return self.__inner_eval(self._train_data_name, 0, feval) def eval_valid(self, feval=None): """Evaluate for validation data. Parameters ---------- feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, valid_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. preds : list or numpy 1-D array The predicted values. If ``fobj`` is specified, predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task in this case. valid_data : Dataset The validation dataset. eval_name : string The name of evaluation function (without whitespace). eval_result : float The eval result. is_higher_better : bool Is eval result higher better, e.g. AUC is ``is_higher_better``. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results. """ return [item for i in range(1, self.__num_dataset) for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)] def save_model(self, filename, num_iteration=None, start_iteration=0, importance_type='split'): """Save Booster to file. Parameters ---------- filename : string Filename to save Booster. num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. importance_type : string, optional (default="split") What type of feature importance should be saved. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. Returns ------- self : Booster Returns self. """ if num_iteration is None: num_iteration = self.best_iteration importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type] _safe_call(_LIB.LGBM_BoosterSaveModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int(importance_type_int), c_str(filename))) _dump_pandas_categorical(self.pandas_categorical, filename) return self def shuffle_models(self, start_iteration=0, end_iteration=-1): """Shuffle models. Parameters ---------- start_iteration : int, optional (default=0) The first iteration that will be shuffled. end_iteration : int, optional (default=-1) The last iteration that will be shuffled. If <= 0, means the last available iteration. Returns ------- self : Booster Booster with shuffled models. """ _safe_call(_LIB.LGBM_BoosterShuffleModels( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(end_iteration))) return self def model_from_string(self, model_str, verbose=True): """Load Booster from a string. Parameters ---------- model_str : string Model will be loaded from this string. verbose : bool, optional (default=True) Whether to print messages while loading model. Returns ------- self : Booster Loaded Booster object. """ if self.handle is not None: _safe_call(_LIB.LGBM_BoosterFree(self.handle)) self._free_buffer() self.handle = ctypes.c_void_p() out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterLoadModelFromString( c_str(model_str), ctypes.byref(out_num_iterations), ctypes.byref(self.handle))) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) if verbose: _log_info(f'Finished loading model, total used {int(out_num_iterations.value)} iterations') self.__num_class = out_num_class.value self.pandas_categorical = _load_pandas_categorical(model_str=model_str) return self def model_to_string(self, num_iteration=None, start_iteration=0, importance_type='split'): """Save Booster to string. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. importance_type : string, optional (default="split") What type of feature importance should be saved. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. Returns ------- str_repr : string String representation of Booster. """ if num_iteration is None: num_iteration = self.best_iteration importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type] buffer_len = 1 << 20 tmp_out_len = ctypes.c_int64(0) string_buffer = ctypes.create_string_buffer(buffer_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterSaveModelToString( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int(importance_type_int), ctypes.c_int64(buffer_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) actual_len = tmp_out_len.value # if buffer length is not long enough, re-allocate a buffer if actual_len > buffer_len: string_buffer = ctypes.create_string_buffer(actual_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterSaveModelToString( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int(importance_type_int), ctypes.c_int64(actual_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) ret = string_buffer.value.decode('utf-8') ret += _dump_pandas_categorical(self.pandas_categorical) return ret def dump_model(self, num_iteration=None, start_iteration=0, importance_type='split'): """Dump Booster to JSON format. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be dumped. If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped. If <= 0, all iterations are dumped. start_iteration : int, optional (default=0) Start index of the iteration that should be dumped. importance_type : string, optional (default="split") What type of feature importance should be dumped. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. Returns ------- json_repr : dict JSON format of Booster. """ if num_iteration is None: num_iteration = self.best_iteration importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type] buffer_len = 1 << 20 tmp_out_len = ctypes.c_int64(0) string_buffer = ctypes.create_string_buffer(buffer_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int(importance_type_int), ctypes.c_int64(buffer_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) actual_len = tmp_out_len.value # if buffer length is not long enough, reallocate a buffer if actual_len > buffer_len: string_buffer = ctypes.create_string_buffer(actual_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int(importance_type_int), ctypes.c_int64(actual_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) ret = json.loads(string_buffer.value.decode('utf-8')) ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical, default=json_default_with_numpy)) return ret def predict(self, data, start_iteration=0, num_iteration=None, raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False, is_reshape=True, **kwargs): """Make a prediction. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for prediction. If string, it represents the path to txt file. start_iteration : int, optional (default=0) Start index of the iteration to predict. If <= 0, starts from the first iteration. num_iteration : int or None, optional (default=None) Total number of iterations used in the prediction. If None, if the best iteration exists and start_iteration <= 0, the best iteration is used; otherwise, all iterations from ``start_iteration`` are used (no limits). If <= 0, all iterations from ``start_iteration`` are used (no limits). raw_score : bool, optional (default=False) Whether to predict raw scores. pred_leaf : bool, optional (default=False) Whether to predict leaf index. pred_contrib : bool, optional (default=False) Whether to predict feature contributions. .. note:: If you want to get more explanations for your model's predictions using SHAP values, like SHAP interaction values, you can install the shap package (https://github.com/slundberg/shap). Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra column, where the last column is the expected value. data_has_header : bool, optional (default=False) Whether the data has header. Used only if data is string. is_reshape : bool, optional (default=True) If True, result is reshaped to [nrow, ncol]. **kwargs Other parameters for the prediction. Returns ------- result : numpy array, scipy.sparse or list of scipy.sparse Prediction result. Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``). """ predictor = self._to_predictor(deepcopy(kwargs)) if num_iteration is None: if start_iteration <= 0: num_iteration = self.best_iteration else: num_iteration = -1 return predictor.predict(data, start_iteration, num_iteration, raw_score, pred_leaf, pred_contrib, data_has_header, is_reshape) def refit(self, data, label, decay_rate=0.9, **kwargs): """Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series / one-column DataFrame Label for refit. decay_rate : float, optional (default=0.9) Decay rate of refit, will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees. **kwargs Other parameters for refit. These parameters will be passed to ``predict`` method. Returns ------- result : Booster Refitted Booster. """ if self.__set_objective_to_none: raise LightGBMError('Cannot refit due to null objective function.') predictor = self._to_predictor(deepcopy(kwargs)) leaf_preds = predictor.predict(data, -1, pred_leaf=True) nrow, ncol = leaf_preds.shape out_is_linear = ctypes.c_bool(False) _safe_call(_LIB.LGBM_BoosterGetLinear( self.handle, ctypes.byref(out_is_linear))) new_params = deepcopy(self.params) new_params["linear_tree"] = out_is_linear.value train_set = Dataset(data, label, silent=True, params=new_params) new_params['refit_decay_rate'] = decay_rate new_booster = Booster(new_params, train_set) # Copy models _safe_call(_LIB.LGBM_BoosterMerge( new_booster.handle, predictor.handle)) leaf_preds = leaf_preds.reshape(-1) ptr_data, _, _ = c_int_array(leaf_preds) _safe_call(_LIB.LGBM_BoosterRefit( new_booster.handle, ptr_data, ctypes.c_int32(nrow), ctypes.c_int32(ncol))) new_booster.network = self.network new_booster.__attr = self.__attr.copy() return new_booster def get_leaf_output(self, tree_id, leaf_id): """Get the output of a leaf. Parameters ---------- tree_id : int The index of the tree. leaf_id : int The index of the leaf in the tree. Returns ------- result : float The output of the leaf. """ ret = ctypes.c_double(0) _safe_call(_LIB.LGBM_BoosterGetLeafValue( self.handle, ctypes.c_int(tree_id), ctypes.c_int(leaf_id), ctypes.byref(ret))) return ret.value def _to_predictor(self, pred_parameter=None): """Convert to predictor.""" predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter) predictor.pandas_categorical = self.pandas_categorical return predictor def num_feature(self): """Get number of features. Returns ------- num_feature : int The number of features. """ out_num_feature = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumFeature( self.handle, ctypes.byref(out_num_feature))) return out_num_feature.value def feature_name(self): """Get names of features. Returns ------- result : list List with names of features. """ num_feature = self.num_feature() # Get name of features tmp_out_len = ctypes.c_int(0) reserved_string_buffer_size = 255 required_string_buffer_size = ctypes.c_size_t(0) string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)] ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetFeatureNames( self.handle, ctypes.c_int(num_feature), ctypes.byref(tmp_out_len), ctypes.c_size_t(reserved_string_buffer_size), ctypes.byref(required_string_buffer_size), ptr_string_buffers)) if num_feature != tmp_out_len.value: raise ValueError("Length of feature names doesn't equal with num_feature") actual_string_buffer_size = required_string_buffer_size.value # if buffer length is not long enough, reallocate buffers if reserved_string_buffer_size < actual_string_buffer_size: string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)] ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetFeatureNames( self.handle, ctypes.c_int(num_feature), ctypes.byref(tmp_out_len), ctypes.c_size_t(actual_string_buffer_size), ctypes.byref(required_string_buffer_size), ptr_string_buffers)) return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)] def feature_importance(self, importance_type='split', iteration=None): """Get feature importances. Parameters ---------- importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. iteration : int or None, optional (default=None) Limit number of iterations in the feature importance calculation. If None, if the best iteration exists, it is used; otherwise, all trees are used. If <= 0, all trees are used (no limits). Returns ------- result : numpy array Array with feature importances. """ if iteration is None: iteration = self.best_iteration importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type] result = np.zeros(self.num_feature(), dtype=np.float64) _safe_call(_LIB.LGBM_BoosterFeatureImportance( self.handle, ctypes.c_int(iteration), ctypes.c_int(importance_type_int), result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if importance_type_int == 0: return result.astype(np.int32) else: return result def get_split_value_histogram(self, feature, bins=None, xgboost_style=False): """Get split value histogram for the specified feature. Parameters ---------- feature : int or string The feature name or index the histogram is calculated for. If int, interpreted as index. If string, interpreted as name. .. warning:: Categorical features are not supported. bins : int, string or None, optional (default=None) The maximum number of bins. If None, or int and > number of unique split values and ``xgboost_style=True``, the number of bins equals number of unique split values. If string, it should be one from the list of the supported values by ``numpy.histogram()`` function. xgboost_style : bool, optional (default=False) Whether the returned result should be in the same form as it is in XGBoost. If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function. If True, the returned value is matrix, in which the first column is the right edges of non-empty bins and the second one is the histogram values. Returns ------- result_tuple : tuple of 2 numpy arrays If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature and the bin edges. result_array_like : numpy array or pandas DataFrame (if pandas is installed) If ``xgboost_style=True``, the histogram of used splitting values for the specified feature. """ def add(root): """Recursively add thresholds.""" if 'split_index' in root: # non-leaf if feature_names is not None and isinstance(feature, str): split_feature = feature_names[root['split_feature']] else: split_feature = root['split_feature'] if split_feature == feature: if isinstance(root['threshold'], str): raise LightGBMError('Cannot compute split value histogram for the categorical feature') else: values.append(root['threshold']) add(root['left_child']) add(root['right_child']) model = self.dump_model() feature_names = model.get('feature_names') tree_infos = model['tree_info'] values = [] for tree_info in tree_infos: add(tree_info['tree_structure']) if bins is None or isinstance(bins, int) and xgboost_style: n_unique = len(np.unique(values)) bins = max(min(n_unique, bins) if bins is not None else n_unique, 1) hist, bin_edges = np.histogram(values, bins=bins) if xgboost_style: ret = np.column_stack((bin_edges[1:], hist)) ret = ret[ret[:, 1] > 0] if PANDAS_INSTALLED: return pd_DataFrame(ret, columns=['SplitValue', 'Count']) else: return ret else: return hist, bin_edges def __inner_eval(self, data_name, data_idx, feval=None): """Evaluate training or validation data.""" if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") self.__get_eval_info() ret = [] if self.__num_inner_eval > 0: result = np.zeros(self.__num_inner_eval, dtype=np.float64) tmp_out_len = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetEval( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if tmp_out_len.value != self.__num_inner_eval: raise ValueError("Wrong length of eval results") for i in range(self.__num_inner_eval): ret.append((data_name, self.__name_inner_eval[i], result[i], self.__higher_better_inner_eval[i])) if callable(feval): feval = [feval] if feval is not None: if data_idx == 0: cur_data = self.train_set else: cur_data = self.valid_sets[data_idx - 1] for eval_function in feval: if eval_function is None: continue feval_ret = eval_function(self.__inner_predict(data_idx), cur_data) if isinstance(feval_ret, list): for eval_name, val, is_higher_better in feval_ret: ret.append((data_name, eval_name, val, is_higher_better)) else: eval_name, val, is_higher_better = feval_ret ret.append((data_name, eval_name, val, is_higher_better)) return ret def __inner_predict(self, data_idx): """Predict for training and validation dataset.""" if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") if self.__inner_predict_buffer[data_idx] is None: if data_idx == 0: n_preds = self.train_set.num_data() * self.__num_class else: n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64) # avoid to predict many time in one iteration if not self.__is_predicted_cur_iter[data_idx]: tmp_out_len = ctypes.c_int64(0) data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double)) _safe_call(_LIB.LGBM_BoosterGetPredict( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), data_ptr)) if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]): raise ValueError(f"Wrong length of predict results for data {data_idx}") self.__is_predicted_cur_iter[data_idx] = True return self.__inner_predict_buffer[data_idx] def __get_eval_info(self): """Get inner evaluation count and names.""" if self.__need_reload_eval_info: self.__need_reload_eval_info = False out_num_eval = ctypes.c_int(0) # Get num of inner evals _safe_call(_LIB.LGBM_BoosterGetEvalCounts( self.handle, ctypes.byref(out_num_eval))) self.__num_inner_eval = out_num_eval.value if self.__num_inner_eval > 0: # Get name of eval metrics tmp_out_len = ctypes.c_int(0) reserved_string_buffer_size = 255 required_string_buffer_size = ctypes.c_size_t(0) string_buffers = [ ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(self.__num_inner_eval) ] ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetEvalNames( self.handle, ctypes.c_int(self.__num_inner_eval), ctypes.byref(tmp_out_len), ctypes.c_size_t(reserved_string_buffer_size), ctypes.byref(required_string_buffer_size), ptr_string_buffers)) if self.__num_inner_eval != tmp_out_len.value: raise ValueError("Length of eval names doesn't equal with num_evals") actual_string_buffer_size = required_string_buffer_size.value # if buffer length is not long enough, reallocate buffers if reserved_string_buffer_size < actual_string_buffer_size: string_buffers = [ ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(self.__num_inner_eval) ] ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetEvalNames( self.handle, ctypes.c_int(self.__num_inner_eval), ctypes.byref(tmp_out_len), ctypes.c_size_t(actual_string_buffer_size), ctypes.byref(required_string_buffer_size), ptr_string_buffers)) self.__name_inner_eval = [ string_buffers[i].value.decode('utf-8') for i in range(self.__num_inner_eval) ] self.__higher_better_inner_eval = [ name.startswith(('auc', 'ndcg@', 'map@', 'average_precision')) for name in self.__name_inner_eval ] def attr(self, key): """Get attribute string from the Booster. Parameters ---------- key : string The name of the attribute. Returns ------- value : string or None The attribute value. Returns None if attribute does not exist. """ return self.__attr.get(key, None) def set_attr(self, **kwargs): """Set attributes to the Booster. Parameters ---------- **kwargs The attributes to set. Setting a value to None deletes an attribute. Returns ------- self : Booster Booster with set attributes. """ for key, value in kwargs.items(): if value is not None: if not isinstance(value, str): raise ValueError("Only string values are accepted") self.__attr[key] = value else: self.__attr.pop(key, None) return self
mit
6,481,589,992,037,159,000
41.512168
161
0.553914
false
4.200816
false
false
false
llvm/llvm-zorg
zorg/buildbot/builders/AnnotatedBuilder.py
1
3401
from buildbot.process.properties import WithProperties from buildbot.steps.shell import SetProperty from zorg.buildbot.commands.AnnotatedCommand import AnnotatedCommand from zorg.buildbot.process.factory import LLVMBuildFactory def getAnnotatedBuildFactory( script, clean=False, depends_on_projects=None, env=None, extra_args=None, timeout=1200, checkout_llvm_sources=True): """ Returns a new build factory that uses AnnotatedCommand, which allows the build to be run by version-controlled scripts that do not require a buildmaster restart to update. script: script under "builders/annotated" to be run by python clean: set to true for a clean build of llvm depends_on_projects: which subprojects to enable llvm must be first in the list (default: ["llvm", "clang", "compiler-rt", "libcxx", "libcxxabi", "libunwind", "lld"]) env: environment overrides (map; default is no overrides) extra_args: extra arguments to pass to the script (default: []) timeout: specifies the builder's timeout in seconds (default: 1200) """ if depends_on_projects is None: depends_on_projects = [ "llvm", "clang", "compiler-rt", "libcxx", "libcxxabi", "libunwind", "lld"] if extra_args is None: extra_args = [] f = LLVMBuildFactory( clean=clean, depends_on_projects=depends_on_projects) if clean: f.addStep(SetProperty(property='clean', command='echo 1')) # We normally use the clean property to indicate that we want a # clean build, but AnnotatedCommand uses the clobber property # instead. Therefore, set clobber if clean is set to a truthy # value. This will cause AnnotatedCommand to set # BUILDBOT_CLOBBER=1 in the environment, which is how we # communicate to the script that we need a clean build. f.addStep(SetProperty( property='clobber', command='echo 1', doStepIf=lambda step: step.build.getProperty('clean', False))) merged_env = { 'TERM': 'dumb' # Be cautious and disable color output from all tools. } if env is not None: # Overwrite pre-set items with the given ones, so user can set # anything. merged_env.update(env) scripts_dir = "annotated" # Check out zorg so we can run the annotator scripts. f.addGetSourcecodeForProject( name='update-annotated-scripts', project='zorg', src_dir='llvm-zorg', alwaysUseLatest=True) if checkout_llvm_sources: f.addGetSourcecodeSteps() extra_args_with_props = [WithProperties(arg) for arg in extra_args] # Explicitly use '/' as separator, because it works on *nix and Windows. if script.startswith('/'): command = [script] else: script_path = "../llvm-zorg/zorg/buildbot/builders/annotated/%s" % (script) command = ["python", script_path, WithProperties("--jobs=%(jobs:-)s")] command += extra_args_with_props f.addStep(AnnotatedCommand(name="annotate", description="annotate", timeout=timeout, haltOnFailure=True, command=command, env=merged_env)) return f
apache-2.0
84,255,668,484,032,700
34.427083
81
0.628051
false
4.077938
false
false
false
cbitterfield/JobCard
archive/bulkvideosize.py
1
3578
#!/opt/local/bin/python # encoding: utf-8 ''' bulkvideosize -- shortdesc bulkvideosize is a description It defines classes_and_methods @author: user_name @copyright: 2017 organization_name. All rights reserved. @license: license @contact: user_email @deffield updated: Updated ''' import sys import os import argparse __all__ = [] __version__ = 0.1 __date__ = '2017-10-20' __updated__ = '2017-10-20' DEBUG = 1 TESTRUN = 0 PROFILE = 0 program_name = os.path.basename(sys.argv[0]) # Setup argument parser parser = argparse.ArgumentParser() group = parser.add_mutually_exclusive_group() group.add_argument("-v", "--verbose", action="store_true", help="Display detailed debugging information") parser.add_argument("-l","--logfile", action="store", help="Write Logfile if ommitted write to STDOUT") parser.add_argument("-s","--source", action="store", help="Source Directory") # Process arguments args = parser.parse_args() verbose = args.verbose path = args.source logfile = args.logfile if verbose > 0: print("Verbose mode on") ## Use Get Video Size Function def getvideosize(src): import shlex import os from string import Template import subprocess import datetime FFPROBE="/opt/local/bin/ffprobe" Error = False log_text = open(logfile, "w") for video in os.listdir(src): if video.endswith(".mp4"): CMD_TEMPLATE = "$FFPROBE -v error -of flat=s=_ -select_streams v:0 -show_entries stream=height,width,bit_rate,duration '$VIDEO'" CMD = Template(CMD_TEMPLATE).safe_substitute(FFPROBE=FFPROBE, VIDEO=src + "/" + video) videoName = os.path.basename(video) pathName = os.path.dirname(src + "/" + video) #print("Get the Video Size Information for Video: " + videoName ) #print("Source Dir:" + pathName ) #print("getVideoSizeCMD:\n " ) pCMD = shlex.split(CMD) #print("Command:" + CMD) try: result=subprocess.check_output(pCMD) cWidth = result.splitlines(True)[0] cHeight = result.splitlines(True)[1] cDuration = result.splitlines(True)[2] cBit_Rate = result.splitlines(True)[3] lWidth = cWidth.split("=")[1] lHeight = cHeight.split("=")[1] lDuration = cDuration.split("=")[1] lBitRate = cBit_Rate.split("=")[1] Width = lWidth.replace('\n','') Height = lHeight.replace('\n','') Duration = lDuration.replace('\n','') BitRate = lBitRate.replace('\n','') Duration = Duration.replace('"','') BitRate = BitRate.replace('"','') sizeofVideo = str(Width) + "x" + str(Height) myduration = str(datetime.timedelta(seconds=int(float(Duration)))) mybitrate = str(int(BitRate)/1000) #print("Video Source: " + video + " Size: " + sizeofVideo + " Duration: " + myduration + " BitRate:" + mybitrate + " kbps" ) log_text.write("Video Source: " + video + " Size: " + sizeofVideo + " Duration: " + myduration + " BitRate:" + mybitrate + " kbps\n") except: #print("Video Source: " + video + "ERROR") log_text.write("Video Source: " + video + "ERROR\n") log_text.close() return(Error, sizeofVideo, Duration, BitRate) myError, mySize, myDuraction, myBitrate = getvideosize(path)
gpl-3.0
-261,463,053,978,957,760
28.089431
149
0.58161
false
3.68866
false
false
false
Dapid/pywt
demo/dwt_signal_decomposition.py
1
1789
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import numpy as np import matplotlib.pyplot as plt import pywt ecg = np.load(os.path.join('data', 'ecg.npy')) data1 = np.concatenate((np.arange(1, 400), np.arange(398, 600), np.arange(601, 1024))) x = np.linspace(0.082, 2.128, num=1024)[::-1] data2 = np.sin(40 * np.log(x)) * np.sign((np.log(x))) mode = pywt.MODES.sp1 def plot_signal_decomp(data, w, title): """Decompose and plot a signal S. S = An + Dn + Dn-1 + ... + D1 """ w = pywt.Wavelet(w) a = data ca = [] cd = [] for i in range(5): (a, d) = pywt.dwt(a, w, mode) ca.append(a) cd.append(d) rec_a = [] rec_d = [] for i, coeff in enumerate(ca): coeff_list = [coeff, None] + [None] * i rec_a.append(pywt.waverec(coeff_list, w)) for i, coeff in enumerate(cd): coeff_list = [None, coeff] + [None] * i rec_d.append(pywt.waverec(coeff_list, w)) fig = plt.figure() ax_main = fig.add_subplot(len(rec_a) + 1, 1, 1) ax_main.set_title(title) ax_main.plot(data) ax_main.set_xlim(0, len(data) - 1) for i, y in enumerate(rec_a): ax = fig.add_subplot(len(rec_a) + 1, 2, 3 + i * 2) ax.plot(y, 'r') ax.set_xlim(0, len(y) - 1) ax.set_ylabel("A%d" % (i + 1)) for i, y in enumerate(rec_d): ax = fig.add_subplot(len(rec_d) + 1, 2, 4 + i * 2) ax.plot(y, 'g') ax.set_xlim(0, len(y) - 1) ax.set_ylabel("D%d" % (i + 1)) plot_signal_decomp(data1, 'coif5', "DWT: Signal irregularity") plot_signal_decomp(data2, 'sym5', "DWT: Frequency and phase change - Symmlets5") plot_signal_decomp(ecg, 'sym5', "DWT: Ecg sample - Symmlets5") plt.show()
mit
5,571,234,884,135,789,000
23.847222
80
0.536613
false
2.596517
false
false
false
danrg/RGT-tool
src/RGT/XML/SVG/Attribs/xlinkAttributes.py
1
4217
from RGT.XML.SVG.Attribs.basicSvgAttribute import BasicSvgAttribute from types import StringType class XlinkAttributes(BasicSvgAttribute): ATTRIBUTE_XLINK_HREF = 'xlink:href' ATTRIBUTE_XLINK_SHOW = 'xlink:show' ATTRIBUTE_XLINK_ACTUATE = 'xlink:actuate' ATTRIBUTE_XLINK_TYPE = 'xlink:type' ATTRIBUTE_XLINK_ROLE = 'xlink:role' ATTRIBUTE_XLINK_ARCROLE = 'xlink:arcrole' ATTRIBUTE_XLINK_TITLE = 'xlink:title' def __init__(self): BasicSvgAttribute.__init__(self) def setXlinkHref(self, data): if data is not None: if type(data) is not StringType: data = str(data) self._setNodeAttribute(self.ATTRIBUTE_XLINK_HREF, data) def setXlinkShow(self, data): allowedValues = ['new', 'replace', 'embed', 'other', 'none'] if data is not None: if data not in allowedValues: values = '' for value in allowedValues: values += value + ', ' values = values[0: len(values) - 2] raise ValueError('Value not allowed, only ' + values + 'are allowed') else: self._setNodeAttribute(self.ATTRIBUTE_XLINK_SHOW, data) def setXlinkActuate(self, data): allowedValues = ['onLoad'] if data is not None: if data not in allowedValues: values = '' for value in allowedValues: values += value + ', ' values = values[0: len(values) - 2] raise ValueError('Value not allowed, only ' + values + 'are allowed') else: self._setNodeAttribute(self.ATTRIBUTE_XLINK_ACTUATE, data) def setXlinkType(self, data): allowedValues = ['simple'] if data is not None: if data not in allowedValues: values = '' for value in allowedValues: values += value + ', ' values = values[0: len(values) - 2] raise ValueError('Value not allowed, only ' + values + 'are allowed') else: self._setNodeAttribute(self.ATTRIBUTE_XLINK_TYPE, data) def setXlinkRole(self, data): if data is not None: if type(data) is not StringType: data = str(data) self._setNodeAttribute(self.ATTRIBUTE_XLINK_ROLE, data) def setXlinkArcrole(self, data): if data is not None: if type(data) is not StringType: data = str(data) self._setNodeAttribute(self.ATTRIBUTE_XLINK_ARCROLE, data) def setXlinkTitle(self, data): if data is not None: if type(data) is not StringType: data = str(data) self._setNodeAttribute(self.ATTRIBUTE_XLINK_TITLE, data) def getXlinkHref(self): node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_HREF) if node is not None: return node.nodeValue return None def getXlinkShow(self): node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_SHOW) if node is not None: return node.nodeValue return None def getXlinkActuate(self): node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_ACTUATE) if node is not None: return node.nodeValue return None def getXlinkType(self): node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_TYPE) if node is not None: return node.nodeValue return None def getXlinkRole(self): node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_ROLE) if node is not None: return node.nodeValue return None def getXlinkArcrole(self): node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_ARCROLE) if node is not None: return node.nodeValue return None def getXlinkTitle(self): node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_TITLE) if node is not None: return node.nodeValue return None
mit
2,037,298,200,612,529,700
32.867769
85
0.564857
false
4.183532
false
false
false
hsnr-gamera/gamera
gamera/gui/gaoptimizer/StopCriteriaPanel.py
1
4654
# # Copyright (C) 2012 Tobias Bolten # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # import wx from gamera.gui import compat_wx from gamera.gui.gaoptimizer.ExpertSettingPanel import * #------------------------------------------------------------------------------- class StopCriteriaPanel(ExpertSettingPanel): #------------------------------------------------------------------------------- #--------------------------------------------------------------------------- def __init__(self, parent, id): #--------------------------------------------------------------------------- ExpertSettingPanel.__init__(self, parent, id) sizer = wx.GridBagSizer(hgap=5, vgap=5) self.SetSizer(sizer) # best fitness self.bestFitness = wx.CheckBox(self, -1, "Perfect LOO-recognition reached", \ name = "bestFitnessStop") sizer.Add(self.bestFitness, pos=(0,0), \ flag = wx.LEFT | wx.RIGHT | wx.TOP | wx.EXPAND, border=10) self.genericWidgets.append(self.bestFitness) # generation counter self.maxGeneration = wx.CheckBox(self, -1, "Max. number of generations", \ name = "maxGenerations") sizer.Add(self.maxGeneration, pos=(1,0), \ flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border = 10) self.maxGenerationCount = wx.SpinCtrl(self, -1, size=(100,-1), \ min=10, max=5000, value='100') compat_wx.set_tool_tip(self.maxGenerationCount, "Number of generations") self.maxGenerationCount.Disable() sizer.Add(self.maxGenerationCount, pos=(1,1), \ flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border=10) self.genericWidgets.append(self.maxGeneration) self.AddChildToParent(self.maxGeneration, self.maxGenerationCount) # fitness counter self.maxFitnessEval = wx.CheckBox(self, -1, "Max. number of fitness evals", \ name = "maxFitnessEvals") sizer.Add(self.maxFitnessEval, pos=(2,0), \ flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border=10) self.maxFitnessEvalCount = wx.SpinCtrl(self, -1, size=(100,-1), \ min=10, max=50000, value='5000') compat_wx.set_tool_tip(self.maxFitnessEvalCount, "Number of evaluations") self.maxFitnessEvalCount.Disable() sizer.Add(self.maxFitnessEvalCount, pos=(2,1), \ flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border=10) self.genericWidgets.append(self.maxFitnessEval) self.AddChildToParent(self.maxFitnessEval, self.maxFitnessEvalCount) # steady state continue self.steadyContinue = wx.CheckBox(self, -1, "Steady state continue", \ name = "steadyStateStop") self.steadyContinue.SetValue(True) sizer.Add(self.steadyContinue, pos=(3,0), \ flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10) self.steadyContinueMin = wx.SpinCtrl(self, -1, size=(100,-1), \ min=10, max=250000, value='40') compat_wx.set_tool_tip(self.steadyContinueMin, "Minimum generations") sizer.Add(self.steadyContinueMin, pos=(3,1), \ flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10) self.steadyContinueNoChange = wx.SpinCtrl(self, -1, size=(100,-1), \ min=1, max=10000, value='10') compat_wx.set_tool_tip(self.steadyContinueNoChange, "Generations without improvement") sizer.Add(self.steadyContinueNoChange, pos=(3,2), \ flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10) self.genericWidgets.append(self.steadyContinue) self.AddChildToParent(self.steadyContinue, self.steadyContinueMin) self.AddChildToParent(self.steadyContinue, self.steadyContinueNoChange) # bind the EVT_CHECKBOX to the CheckBoxes self.BindEvent(wx.EVT_CHECKBOX, self.OnCheckBox, \ [self.bestFitness, self.maxGeneration, self.maxFitnessEval, self.steadyContinue])
gpl-2.0
-2,199,924,503,062,840,000
46.489796
94
0.613666
false
3.714286
false
false
false
BenKaehler/q2-feature-classifier
q2_feature_classifier/tests/__init__.py
1
1162
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2019, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import tempfile import shutil from warnings import filterwarnings from qiime2.plugin.testing import TestPluginBase class FeatureClassifierTestPluginBase(TestPluginBase): def setUp(self): try: from q2_feature_classifier.plugin_setup import plugin except ImportError: self.fail("Could not import plugin object.") self.plugin = plugin self.temp_dir = tempfile.TemporaryDirectory( prefix='q2-feature-classifier-test-temp-') filterwarnings('ignore', 'The TaxonomicClassifier ', UserWarning) def _setup_dir(self, filenames, dirfmt): for filename in filenames: filepath = self.get_data_path(filename) shutil.copy(filepath, self.temp_dir.name) return dirfmt(self.temp_dir.name, mode='r')
bsd-3-clause
-7,658,275,673,037,822,000
32.2
78
0.598107
false
4.723577
false
false
false
kalyan02/dayone
do/lib.py
1
2300
from BeautifulSoup import BeautifulSoup as Soupify import urllib, re import settings import oauth2, urlparse, json from do import settings class DropboxAPI(object): def __init__(self, user): self.user = user dinfo = self.user.social_auth.get(provider='dropbox') access_token = urlparse.parse_qs( dinfo.extra_data['access_token'] ) self.user_token = oauth2.Token(key=access_token['oauth_token'][0],secret=access_token['oauth_token_secret'][0]) self.cons_token = oauth2.Consumer(key=settings.DROPBOX_APP_ID,secret=settings.DROPBOX_API_SECRET) def request( self, api_call, extra_params=None ): self.parameters = { 'oauth_signature_method': oauth2.SignatureMethod_PLAINTEXT.name, 'oauth_timestamp' : oauth2.generate_timestamp(), 'oauth_nonce' : oauth2.generate_nonce(), 'oauth_version' : '1.0', } if type(extra_params) is dict: self.parameters.update(extra_params) self.req = oauth2.Request( url=api_call, parameters=self.parameters ) self.req.sign_request( signature_method=oauth2.SignatureMethod_PLAINTEXT(), token=self.user_token, consumer=self.cons_token) return self.req def call(self,method,params): pass def format_json(json_string): return json.dumps( json.loads( json_string ), indent=4 ) # def file_put_contents( fname, fcon ): # fh = open( fname, 'w+' ) # fh.write( fcon ) # fh.close() # def file_get_contents( fname ): # fh = open( fname, 'r') # return fh.read() # dropbox_url = "https://www.dropbox.com/sh/7gcfvmk9h107ryc/F39GaH7W8C" # con = urllib.urlopen( dropbox_url ).read() # file_put_contents( 'fh.txt', con ) # con = file_get_contents('fh.txt') # scon = Soupify( con ) # entries_url = scon.findAll( 'a', attrs={'href':re.compile('/entries$')} )[0]['href'] # photos_url = scon.findAll( 'a', attrs={'href':re.compile('/photos$')} )[0]['href'] # print entries_url # print photos_url # # entries_page = urllib.urlopen(entries_url).read() # # file_put_contents('entries_page.txt',entries_page) # entries_page = file_get_contents('entries_page.txt') # econ = Soupify(entries_page) # posts = econ.findAll( 'a', attrs={'href':re.compile('\.doentry')} ) # urls = [ each['href'] for i,each in enumerate(posts) if i % 2 == 1 ] # mods = econ.findAll( attrs={'class':'modified-time'} )
gpl-2.0
-606,869,924,962,227,300
33.863636
126
0.677391
false
2.90404
false
false
false
cfobel/sconspiracy
Python/racy/plugins/qt/sconstools/qt4.py
1
21143
# ***** BEGIN LICENSE BLOCK ***** # Sconspiracy - Copyright (C) IRCAD, 2004-2010. # Distributed under the terms of the BSD Licence as # published by the Open Source Initiative. # ****** END LICENSE BLOCK ****** """SCons.Tool.qt Tool-specific initialization for Qt. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. Tool provided by http://www.iua.upf.es/~dgarcia/Codders/sconstools.html """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "/home/scons/scons/branch.0/branch.96/baseline/src/engine/SCons/Tool/qt.py 0.96.92.D001 2006/04/10 23:13:27 knight" import os.path import re import SCons.Action import SCons.Builder import SCons.Defaults import SCons.Scanner import SCons.Tool import SCons.Util class ToolQtWarning(SCons.Warnings.Warning): pass class GeneratedMocFileNotIncluded(ToolQtWarning): pass class QtdirNotFound(ToolQtWarning): pass SCons.Warnings.enableWarningClass(ToolQtWarning) qrcinclude_re = re.compile(r'<file>([^<]*)</file>', re.M) def transformToWinePath(path) : return os.popen('winepath -w "%s"'%path).read().strip().replace('\\','/') header_extensions = [".h", ".hxx", ".hpp", ".hh"] if SCons.Util.case_sensitive_suffixes('.h', '.H'): header_extensions.append('.H') # TODO: The following two lines will work when integrated back to SCons # TODO: Meanwhile the third line will do the work #cplusplus = __import__('c++', globals(), locals(), []) #cxx_suffixes = cplusplus.CXXSuffixes cxx_suffixes = [".c", ".cxx", ".cpp", ".cc"] def checkMocIncluded(target, source, env): moc = target[0] cpp = source[0] # looks like cpp.includes is cleared before the build stage :-( # not really sure about the path transformations (moc.cwd? cpp.cwd?) :-/ path = SCons.Defaults.CScan.path_function(env, moc.cwd) includes = SCons.Defaults.CScan(cpp, env, path) if not moc in includes: SCons.Warnings.warn( GeneratedMocFileNotIncluded, "Generated moc file '%s' is not included by '%s'" % (str(moc), str(cpp))) def find_file(filename, paths, node_factory): for dir in paths: node = node_factory(filename, dir) if node.rexists(): return node return None class _Automoc: """ Callable class, which works as an emitter for Programs, SharedLibraries and StaticLibraries. """ def __init__(self, objBuilderName): self.objBuilderName = objBuilderName def __call__(self, target, source, env): """ Smart autoscan function. Gets the list of objects for the Program or Lib. Adds objects and builders for the special qt files. """ try: if int(env.subst('$QT4_AUTOSCAN')) == 0: return target, source except ValueError: pass try: debug = int(env.subst('$QT4_DEBUG')) except ValueError: debug = 0 # some shortcuts used in the scanner splitext = SCons.Util.splitext objBuilder = getattr(env, self.objBuilderName) # some regular expressions: # Q_OBJECT detection q_object_search = re.compile(r'[^A-Za-z0-9]Q_OBJECT[^A-Za-z0-9]') # cxx and c comment 'eater' #comment = re.compile(r'(//.*)|(/\*(([^*])|(\*[^/]))*\*/)') # CW: something must be wrong with the regexp. See also bug #998222 # CURRENTLY THERE IS NO TEST CASE FOR THAT # The following is kind of hacky to get builders working properly (FIXME) objBuilderEnv = objBuilder.env objBuilder.env = env mocBuilderEnv = env.Moc4.env env.Moc4.env = env # make a deep copy for the result; MocH objects will be appended out_sources = source[:] for obj in source: if isinstance(obj,basestring): # big kludge! print "scons: qt4: '%s' MAYBE USING AN OLD SCONS VERSION AND NOT CONVERTED TO 'File'. Discarded." % str(obj) continue if not obj.has_builder(): # binary obj file provided if debug: print "scons: qt: '%s' seems to be a binary. Discarded." % str(obj) continue cpp = obj.sources[0] if not splitext(str(cpp))[1] in cxx_suffixes: if debug: print "scons: qt: '%s' is no cxx file. Discarded." % str(cpp) # c or fortran source continue #cpp_contents = comment.sub('', cpp.get_contents()) try: cpp_contents = cpp.get_contents() except: continue # may be an still not generated source h=None for h_ext in header_extensions: # try to find the header file in the corresponding source # directory hname = splitext(cpp.name)[0] + h_ext h = find_file(hname, (cpp.get_dir(),), env.File) if h: if debug: print "scons: qt: Scanning '%s' (header of '%s')" % (str(h), str(cpp)) #h_contents = comment.sub('', h.get_contents()) h_contents = h.get_contents() break if not h and debug: print "scons: qt: no header for '%s'." % (str(cpp)) if h and q_object_search.search(h_contents): # h file with the Q_OBJECT macro found -> add moc_cpp moc_cpp = env.Moc4(h) moc_o = objBuilder(moc_cpp) out_sources.append(moc_o) #moc_cpp.target_scanner = SCons.Defaults.CScan if debug: print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(moc_cpp)) if cpp and q_object_search.search(cpp_contents): # cpp file with Q_OBJECT macro found -> add moc # (to be included in cpp) moc = env.Moc4(cpp) env.Ignore(moc, moc) if debug: print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc)) #moc.source_scanner = SCons.Defaults.CScan # restore the original env attributes (FIXME) objBuilder.env = objBuilderEnv env.Moc4.env = mocBuilderEnv return (target, out_sources) AutomocShared = _Automoc('SharedObject') AutomocStatic = _Automoc('StaticObject') def _detect(env): """Not really safe, but fast method to detect the QT library""" try: return env['QTDIR'] except KeyError: pass try: return os.environ['QTDIR'] except KeyError: pass moc = env.WhereIs('moc-qt4') or env.WhereIs('moc4') or env.WhereIs('moc') if moc: QTDIR = os.path.dirname(os.path.dirname(moc)) SCons.Warnings.warn( QtdirNotFound, "QTDIR variable is not defined, using moc executable as a hint (QTDIR=%s)" % QTDIR) return QTDIR raise SCons.Errors.StopError( QtdirNotFound, "Could not detect Qt 4 installation") return None def generate(env): """Add Builders and construction variables for qt to an Environment.""" def locateQt4Command(env, command, qtdir) : suffixes = [ '-qt4', '-qt4.exe', '4', '4.exe', '', '.exe', ] triedPaths = [] for suffix in suffixes : fullpath = os.path.join(qtdir,'bin',command + suffix) if os.access(fullpath, os.X_OK) : return fullpath triedPaths.append(fullpath) fullpath = env.Detect([command+'-qt4', command+'4', command]) if not (fullpath is None) : return fullpath raise Exception("Qt4 command '" + command + "' not found. Tried: " + ', '.join(triedPaths)) CLVar = SCons.Util.CLVar Action = SCons.Action.Action Builder = SCons.Builder.Builder splitext = SCons.Util.splitext env['QTDIR'] = _detect(env) # TODO: 'Replace' should be 'SetDefault' # env.SetDefault( env.Replace( QTDIR = env['QTDIR'], QT4_BINPATH = os.path.join('$QTDIR', 'bin'), QT4_CPPPATH = os.path.join('$QTDIR', 'include'), QT4_LIBPATH = os.path.join('$QTDIR', 'lib'), # TODO: This is not reliable to QTDIR value changes but needed in order to support '-qt4' variants QT4_MOC = locateQt4Command(env,'moc', env['QTDIR']), QT4_UIC = locateQt4Command(env,'uic', env['QTDIR']), QT4_RCC = locateQt4Command(env,'rcc', env['QTDIR']), QT4_LUPDATE = locateQt4Command(env,'lupdate', env['QTDIR']), QT4_LRELEASE = locateQt4Command(env,'lrelease', env['QTDIR']), QT4_LIB = '', # KLUDGE to avoid linking qt3 library QT4_AUTOSCAN = 0, # Should the qt tool try to figure out, which sources are to be moc'ed? # Some QT specific flags. I don't expect someone wants to # manipulate those ... QT4_UICFLAGS = CLVar(''), QT4_MOCFROMHFLAGS = CLVar(''), QT4_MOCFROMCXXFLAGS = CLVar('-i'), QT4_QRCFLAGS = '', # suffixes/prefixes for the headers / sources to generate QT4_UISUFFIX = '.ui', QT4_UICDECLPREFIX = 'ui_', QT4_UICDECLSUFFIX = '.h', QT4_MOCINCPREFIX = '-I', QT4_MOCHPREFIX = 'moc_', QT4_MOCHSUFFIX = '$CXXFILESUFFIX', QT4_MOCCXXPREFIX = '', QT4_MOCCXXSUFFIX = '.moc', QT4_QRCSUFFIX = '.qrc', QT4_QRCCXXSUFFIX = '$CXXFILESUFFIX', QT4_QRCCXXPREFIX = 'qrc_', QT4_MOCCPPPATH = [], QT4_MOCINCFLAGS = '$( ${_concat(QT4_MOCINCPREFIX, QT4_MOCCPPPATH, INCSUFFIX, __env__, RDirs)} $)', # Commands for the qt support ... QT4_UICCOM = '$QT4_UIC $QT4_UICFLAGS -o $TARGET $SOURCE', QT4_MOCFROMHCOM = '$QT4_MOC $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE', QT4_MOCFROMCXXCOM = [ '$QT4_MOC $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE', Action(checkMocIncluded,None)], QT4_LUPDATECOM = '$QT4_LUPDATE $SOURCE -ts $TARGET', QT4_LRELEASECOM = '$QT4_LRELEASE $SOURCE', QT4_RCCCOM = '$QT4_RCC $QT4_QRCFLAGS $SOURCE -o $TARGET -name ${SOURCE.filebase}', ) # Translation builder tsbuilder = Builder( action = SCons.Action.Action('$QT4_LUPDATECOM'), #,'$QT4_LUPDATECOMSTR'), multi=1 ) env.Append( BUILDERS = { 'Ts': tsbuilder } ) qmbuilder = Builder( action = SCons.Action.Action('$QT4_LRELEASECOM'),# , '$QT4_LRELEASECOMSTR'), src_suffix = '.ts', suffix = '.qm', single_source = True ) env.Append( BUILDERS = { 'Qm': qmbuilder } ) # Resource builder def scanResources(node, env, path, arg): # I've being careful on providing names relative to the qrc file # If that was not needed that code could be simplified a lot def recursiveFiles(basepath, path) : result = [] for item in os.listdir(os.path.join(basepath, path)) : itemPath = os.path.join(path, item) if os.path.isdir(os.path.join(basepath, itemPath)) : result += recursiveFiles(basepath, itemPath) else: result.append(itemPath) return result contents = node.get_contents() includes = qrcinclude_re.findall(contents) qrcpath = os.path.dirname(node.path) dirs = [included for included in includes if os.path.isdir(os.path.join(qrcpath,included))] # dirs need to include files recursively for dir in dirs : includes.remove(dir) includes+=recursiveFiles(qrcpath,dir) return includes qrcscanner = SCons.Scanner.Scanner(name = 'qrcfile', function = scanResources, argument = None, skeys = ['.qrc']) qrcbuilder = Builder( action = SCons.Action.Action('$QT4_RCCCOM'), #, '$QT4_RCCCOMSTR'), source_scanner = qrcscanner, src_suffix = '$QT4_QRCSUFFIX', suffix = '$QT4_QRCCXXSUFFIX', prefix = '$QT4_QRCCXXPREFIX', single_source = True ) env.Append( BUILDERS = { 'Qrc': qrcbuilder } ) # Interface builder uic4builder = Builder( action = SCons.Action.Action('$QT4_UICCOM'), #, '$QT4_UICCOMSTR'), src_suffix='$QT4_UISUFFIX', suffix='$QT4_UICDECLSUFFIX', prefix='$QT4_UICDECLPREFIX', single_source = True #TODO: Consider the uiscanner on new scons version ) env['BUILDERS']['Uic4'] = uic4builder # Metaobject builder mocBld = Builder(action={}, prefix={}, suffix={}) for h in header_extensions: act = SCons.Action.Action('$QT4_MOCFROMHCOM') #, '$QT4_MOCFROMHCOMSTR') mocBld.add_action(h, act) mocBld.prefix[h] = '$QT4_MOCHPREFIX' mocBld.suffix[h] = '$QT4_MOCHSUFFIX' for cxx in cxx_suffixes: act = SCons.Action.Action('$QT4_MOCFROMCXXCOM') #, '$QT4_MOCFROMCXXCOMSTR') mocBld.add_action(cxx, act) mocBld.prefix[cxx] = '$QT4_MOCCXXPREFIX' mocBld.suffix[cxx] = '$QT4_MOCCXXSUFFIX' env['BUILDERS']['Moc4'] = mocBld # er... no idea what that was for static_obj, shared_obj = SCons.Tool.createObjBuilders(env) static_obj.src_builder.append('Uic4') shared_obj.src_builder.append('Uic4') # We use the emitters of Program / StaticLibrary / SharedLibrary # to scan for moc'able files # We can't refer to the builders directly, we have to fetch them # as Environment attributes because that sets them up to be called # correctly later by our emitter. env.AppendUnique(PROGEMITTER =[AutomocStatic], SHLIBEMITTER=[AutomocShared], LIBEMITTER =[AutomocStatic], # Of course, we need to link against the qt libraries # CPPPATH=["$QT4_CPPPATH"], LIBPATH=["$QT4_LIBPATH"], LIBS=['$QT4_LIB']) # TODO: Does dbusxml2cpp need an adapter env.AddMethod(enable_modules, "EnableQt4Modules") def enable_modules(self, modules, debug=False, crosscompiling=False) : import sys validModules = [ 'QtCore', 'QtGui', 'QtOpenGL', 'Qt3Support', 'QtAssistant', 'QtScript', 'QtDBus', 'QtSql', # The next modules have not been tested yet so, please # maybe they require additional work on non Linux platforms 'QtNetwork', 'QtSvg', 'QtTest', 'QtXml', 'QtXmlPatterns', 'QtUiTools', 'QtDesigner', 'QtDesignerComponents', 'QtWebKit', 'QtHelp', 'QtScript', ] pclessModules = [ # in qt <= 4.3 designer and designerComponents are pcless, on qt4.4 they are not, so removed. # 'QtDesigner', # 'QtDesignerComponents', ] staticModules = [ 'QtUiTools', ] invalidModules=[] for module in modules: if module not in validModules : invalidModules.append(module) if invalidModules : raise Exception("Modules %s are not Qt4 modules. Valid Qt4 modules are: %s"% ( str(invalidModules),str(validModules))) moduleDefines = { 'QtScript' : ['QT_SCRIPT_LIB'], 'QtSvg' : ['QT_SVG_LIB'], 'Qt3Support' : ['QT_QT3SUPPORT_LIB','QT3_SUPPORT'], 'QtSql' : ['QT_SQL_LIB'], 'QtXml' : ['QT_XML_LIB'], 'QtOpenGL' : ['QT_OPENGL_LIB'], 'QtGui' : ['QT_GUI_LIB'], 'QtNetwork' : ['QT_NETWORK_LIB'], 'QtCore' : ['QT_CORE_LIB'], } for module in modules : try : self.AppendUnique(CPPDEFINES=moduleDefines[module]) except: pass debugSuffix = '' if sys.platform in ["darwin", "linux2"] and not crosscompiling : if debug : debugSuffix = '_debug' for module in modules : if module not in pclessModules : continue self.AppendUnique(LIBS=[module+debugSuffix]) self.AppendUnique(LIBPATH=[os.path.join("$QTDIR","lib")]) self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4")]) self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4",module)]) pcmodules = [module+debugSuffix for module in modules if module not in pclessModules ] if 'QtDBus' in pcmodules: self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4","QtDBus")]) if "QtAssistant" in pcmodules: self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4","QtAssistant")]) pcmodules.remove("QtAssistant") pcmodules.append("QtAssistantClient") self.ParseConfig('pkg-config %s --libs --cflags'% ' '.join(pcmodules)) self["QT4_MOCCPPPATH"] = self["CPPPATH"] return if sys.platform == "win32" or crosscompiling : if crosscompiling: transformedQtdir = transformToWinePath(self['QTDIR']) self['QT4_MOC'] = "QTDIR=%s %s"%( transformedQtdir, self['QT4_MOC']) self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include")]) try: modules.remove("QtDBus") except: pass if debug : debugSuffix = 'd' if "QtAssistant" in modules: self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","QtAssistant")]) modules.remove("QtAssistant") modules.append("QtAssistantClient") self.AppendUnique(LIBS=[lib+'4'+debugSuffix for lib in modules if lib not in staticModules]) self.PrependUnique(LIBS=[lib+debugSuffix for lib in modules if lib in staticModules]) if 'QtOpenGL' in modules: self.AppendUnique(LIBS=['opengl32']) self.AppendUnique(CPPPATH=[ '$QTDIR/include/']) self.AppendUnique(CPPPATH=[ '$QTDIR/include/'+module for module in modules]) if crosscompiling : self["QT4_MOCCPPPATH"] = [ path.replace('$QTDIR', transformedQtdir) for path in self['CPPPATH'] ] else : self["QT4_MOCCPPPATH"] = self["CPPPATH"] self.AppendUnique(LIBPATH=[os.path.join('$QTDIR','lib')]) return """ if sys.platform=="darwin" : # TODO: Test debug version on Mac self.AppendUnique(LIBPATH=[os.path.join('$QTDIR','lib')]) self.AppendUnique(LINKFLAGS="-F$QTDIR/lib") self.AppendUnique(LINKFLAGS="-L$QTDIR/lib") #TODO clean! if debug : debugSuffix = 'd' for module in modules : # self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include")]) # self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include",module)]) # port qt4-mac: self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include", "qt4")]) self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include", "qt4", module)]) if module in staticModules : self.AppendUnique(LIBS=[module+debugSuffix]) # TODO: Add the debug suffix self.AppendUnique(LIBPATH=[os.path.join("$QTDIR","lib")]) else : # self.Append(LINKFLAGS=['-framework', module]) # port qt4-mac: self.Append(LIBS=module) if 'QtOpenGL' in modules: self.AppendUnique(LINKFLAGS="-F/System/Library/Frameworks") self.Append(LINKFLAGS=['-framework', 'AGL']) #TODO ughly kludge to avoid quotes self.Append(LINKFLAGS=['-framework', 'OpenGL']) self["QT4_MOCCPPPATH"] = self["CPPPATH"] return # This should work for mac but doesn't # env.AppendUnique(FRAMEWORKPATH=[os.path.join(env['QTDIR'],'lib')]) # env.AppendUnique(FRAMEWORKS=['QtCore','QtGui','QtOpenGL', 'AGL']) """ def exists(env): return _detect(env)
bsd-3-clause
6,324,493,422,340,307,000
38.593633
130
0.593577
false
3.625965
false
false
false
jpopelka/fabric8-analytics-worker
f8a_worker/solver.py
1
37059
"""Classes for resolving dependencies as specified in each ecosystem.""" import anymarkup from bs4 import BeautifulSoup from collections import defaultdict from functools import cmp_to_key import logging from lxml import etree from operator import itemgetter from pip._internal.req.req_file import parse_requirements from pip._vendor.packaging.specifiers import _version_split import re from requests import get from semantic_version import Version as semver_version from subprocess import check_output from tempfile import NamedTemporaryFile, TemporaryDirectory from urllib.parse import urljoin, quote from urllib.request import urlopen import requests from f8a_worker.enums import EcosystemBackend from f8a_worker.models import Analysis, Ecosystem, Package, Version from f8a_worker.utils import cwd, TimedCommand from f8a_worker.process import Git logger = logging.getLogger(__name__) class SolverException(Exception): """Exception to be raised in Solver.""" pass class Tokens(object): """Comparison token representation.""" operators = ['>=', '<=', '==', '>', '<', '=', '!='] (GTE, LTE, EQ1, GT, LT, EQ2, NEQ) = range(len(operators)) def compare_version(a, b): """Compare two version strings. :param a: str :param b: str :return: -1 / 0 / 1 """ def _range(q): """Convert a version string to array of integers. "1.2.3" -> [1, 2, 3] :param q: str :return: List[int] """ r = [] for n in q.replace('-', '.').split('.'): try: r.append(int(n)) except ValueError: # sort rc*, alpha, beta etc. lower than their non-annotated counterparts r.append(-1) return r def _append_zeros(x, num_zeros): """Append `num_zeros` zeros to a copy of `x` and return it. :param x: List[int] :param num_zeros: int :return: List[int] """ nx = list(x) for _ in range(num_zeros): nx.append(0) return nx def _cardinal(x, y): """Make both input lists be of same cardinality. :param x: List[int] :param y: List[int] :return: List[int] """ lx, ly = len(x), len(y) if lx == ly: return x, y elif lx > ly: return x, _append_zeros(y, lx - ly) else: return _append_zeros(x, ly - lx), y left, right = _cardinal(_range(a), _range(b)) return (left > right) - (left < right) class ReleasesFetcher(object): """Base class for fetching releases.""" def __init__(self, ecosystem): """Initialize ecosystem.""" self._ecosystem = ecosystem @property def ecosystem(self): """Get ecosystem property.""" return self._ecosystem def fetch_releases(self, package): """Abstract method for getting list of releases versions.""" raise NotImplementedError class PypiReleasesFetcher(ReleasesFetcher): """Releases fetcher for Pypi.""" def __init__(self, ecosystem): """Initialize instance.""" super(PypiReleasesFetcher, self).__init__(ecosystem) def fetch_releases(self, package): """Fetch package releases versions. XML-RPC API Documentation: https://wiki.python.org/moin/PyPIXmlRpc Signature: package_releases(package_name, show_hidden=False) """ if not package: raise ValueError("package") package = package.lower() pypi_package_url = urljoin( self.ecosystem.fetch_url, '{pkg_name}/json'.format(pkg_name=package) ) response = requests.get(pypi_package_url) if response.status_code != 200: logger.error('Unable to obtain a list of versions for {pkg_name}'.format( pkg_name=package )) return package, [] return package, list({x for x in response.json().get('releases', {})}) class NpmReleasesFetcher(ReleasesFetcher): """Releases fetcher for NPM.""" def __init__(self, ecosystem): """Initialize instance.""" super(NpmReleasesFetcher, self).__init__(ecosystem) def fetch_releases(self, package): """Fetch package releases versions. Example output from the NPM endpoint: { ... versions: { "0.1.0": {}, "0.1.2": {} ... } } """ if not package: raise ValueError("package") # quote '/' (but not '@') in scoped package name, e.g. in '@slicemenice/item-layouter' r = get(self.ecosystem.fetch_url + quote(package, safe='@')) if r.status_code == 200 and r.content: return package, list(r.json().get('versions', {}).keys()) return package, [] class RubyGemsReleasesFetcher(ReleasesFetcher): """Releases fetcher for Rubygems.""" def __init__(self, ecosystem): """Initialize instance.""" super(RubyGemsReleasesFetcher, self).__init__(ecosystem) def _search_package_name(self, package): """Search package on rubygems.org.""" url = '{url}/search.json?query={pkg}'.format(url=self.ecosystem.fetch_url, pkg=package) r = get(url) if r.status_code == 200: exact_match = [p['name'] for p in r.json() if p['name'].lower() == package.lower()] if exact_match: return exact_match.pop() raise ValueError("Package {} not found".format(package)) def fetch_releases(self, package): """Fetch package releases versions. Example output from the RubyGems endpoint [ { "number": "1.0.0", ... }, { "number": "2.0.0", ... } ... ] """ if not package: raise ValueError("package") url = '{url}/versions/{pkg}.json'.format(url=self.ecosystem.fetch_url, pkg=package) r = get(url) if r.status_code == 404: return self.fetch_releases(self._search_package_name(package)) return package, [ver['number'] for ver in r.json()] class NugetReleasesFetcher(ReleasesFetcher): """Releases fetcher for Nuget.""" def __init__(self, ecosystem): """Initialize instance.""" super(NugetReleasesFetcher, self).__init__(ecosystem) def scrape_versions_from_nuget_org(self, package, sort_by_downloads=False): """Scrape 'Version History' from Nuget.""" releases = [] nuget_packages_url = 'https://www.nuget.org/packages/' page = get(nuget_packages_url + package) page = BeautifulSoup(page.text, 'html.parser') version_history = page.find(class_="version-history") for version in version_history.find_all(href=re.compile('/packages/')): version_text = version.text.replace('(current)', '').strip() try: semver_version.coerce(version_text) downloads = int(version.find_next('td').text.strip().replace(',', '')) except ValueError: pass else: releases.append((version_text, downloads)) if sort_by_downloads: releases.sort(key=itemgetter(1)) return package, [p[0] for p in reversed(releases)] def fetch_releases(self, package): """Fetch package releases versions.""" if not package: raise ValueError("package not specified") # There's an API interface which lists available releases at # https://api.nuget.org/v3-flatcontainer/{package}/index.json # But it lists also unlisted/deprecated/shouldn't-be-used versions, # so we don't use it. return self.scrape_versions_from_nuget_org(package) class MavenReleasesFetcher(ReleasesFetcher): """Releases fetcher for Maven.""" def __init__(self, ecosystem): """Initialize instance.""" super().__init__(ecosystem) def releases_from_maven_org(self, group_id, artifact_id): """Fetch releases versions for group_id/artifact_id.""" metadata_filenames = ['maven-metadata.xml', 'maven-metadata-local.xml'] group_id_path = group_id.replace('.', '/') versions = set() we_good = False for filename in metadata_filenames: url = urljoin( self.ecosystem.fetch_url, '{g}/{a}/{f}'.format(g=group_id_path, a=artifact_id, f=filename) ) try: metadata_xml = etree.parse(urlopen(url)) we_good = True # We successfully downloaded at least one of the metadata files version_elements = metadata_xml.findall('.//version') versions = versions.union({x.text for x in version_elements}) except OSError: # Not both XML files have to exist, so don't freak out yet pass if not we_good: logger.error('Unable to obtain a list of versions for {g}:{a}'.format( g=group_id, a=artifact_id) ) return list(versions) def fetch_releases(self, package): """Fetch package releases versions.""" if not package: raise ValueError("package not specified") try: group_id, artifact_id = package.split(':') except ValueError as exc: raise ValueError("Invalid Maven coordinates: {a}".format(a=package)) from exc return package, self.releases_from_maven_org(group_id, artifact_id) class GolangReleasesFetcher(ReleasesFetcher): """Releases fetcher for Golang.""" def __init__(self, ecosystem): """Initialize instance.""" super(GolangReleasesFetcher, self).__init__(ecosystem) def fetch_releases(self, package): """Fetch package releases versions.""" if not package: raise ValueError('package not specified') parts = package.split("/")[:3] if len(parts) == 3: # this assumes github.com/org/project like structure host, org, proj = parts repo_url = 'git://{host}/{org}/{proj}.git'.format(host=host, org=org, proj=proj) elif len(parts) == 2 and parts[0] == 'gopkg.in': # specific to gopkg.in/packages host, proj = parts repo_url = 'https://{host}/{proj}.git'.format(host=host, proj=proj) else: raise ValueError("Package {} is invalid git repository".format(package)) output = Git.ls_remote(repo_url, args=['-q'], refs=['HEAD']) version, ref = output[0].split() if not version: raise ValueError("Package {} does not have associated versions".format(package)) return package, [version] class F8aReleasesFetcher(ReleasesFetcher): """Releases fetcher for internal database.""" def __init__(self, ecosystem, database): """Initialize instance.""" super(F8aReleasesFetcher, self).__init__(ecosystem) self.database = database def fetch_releases(self, package): """Fetch analysed versions for specific ecosystem + package from f8a.""" query = self.database.query(Version).\ join(Analysis).join(Package).join(Ecosystem).\ filter(Package.name == package, Ecosystem.name == self.ecosystem.name, Analysis.finished_at.isnot(None)) versions = {v.identifier for v in query} return package, list(sorted(versions, key=cmp_to_key(compare_version))) class Dependency(object): """A Dependency consists of (package) name and version spec.""" def __init__(self, name, spec): """Initialize instance.""" self._name = name # spec is a list where each item is either 2-tuple (operator, version) or list of these # example: [[('>=', '0.6.0'), ('<', '0.7.0')], ('>', '1.0.0')] means: # (>=0.6.0 and <0.7.0) or >1.0.0 self._spec = spec @property def name(self): """Get name property.""" return self._name @property def spec(self): """Get version spec property.""" return self._spec def __contains__(self, item): """Implement 'in' operator.""" return self.check(item) def __repr__(self): """Return string representation of this instance.""" return "{} {}".format(self.name, self.spec) def __eq__(self, other): """Implement '==' operator.""" return self.name == other.name and self.spec == other.spec def check(self, version): """Check if `version` fits into our dependency specification. :param version: str :return: bool """ def _compare_spec(spec): if len(spec) == 1: spec = ('=', spec[0]) token = Tokens.operators.index(spec[0]) comparison = compare_version(version, spec[1]) if token in [Tokens.EQ1, Tokens.EQ2]: return comparison == 0 elif token == Tokens.GT: return comparison == 1 elif token == Tokens.LT: return comparison == -1 elif token == Tokens.GTE: return comparison >= 0 elif token == Tokens.LTE: return comparison <= 0 elif token == Tokens.NEQ: return comparison != 0 else: raise ValueError('Invalid comparison token') def _all(spec_): return all(_all(s) if isinstance(s, list) else _compare_spec(s) for s in spec_) return any(_all(s) if isinstance(s, list) else _compare_spec(s) for s in self.spec) class DependencyParser(object): """Base class for Dependency parsing.""" def parse(self, specs): """Abstract method for Dependency parsing.""" pass @staticmethod def compose_sep(deps, separator): """Opposite of parse(). :param deps: list of Dependency() :param separator: when joining dependencies, use this separator :return: dict of {name: version spec} """ result = {} for dep in deps: if dep.name not in result: result[dep.name] = separator.join([op + ver for op, ver in dep.spec]) else: result[dep.name] += separator + separator.join([op + ver for op, ver in dep.spec]) return result class PypiDependencyParser(DependencyParser): """Pypi Dependency parsing.""" @staticmethod def _parse_python(spec): """Parse PyPI specification of a single dependency. :param spec: str, for example "Django>=1.5,<1.8" :return: [Django [[('>=', '1.5'), ('<', '1.8')]]] """ def _extract_op_version(spec): # https://www.python.org/dev/peps/pep-0440/#compatible-release if spec.operator == '~=': version = _version_split(spec.version) if len(version) > 1: # ignore pre-release, post-release or developmental release while not version[-1].isdigit(): del version[-1] del version[-1] # will increase the last but one in next line version[-1] = str(int(version[-1]) + 1) else: raise ValueError('%r must not be used with %r' % (spec.operator, spec.version)) return [('>=', spec.version), ('<', '.'.join(version))] # Trailing .* is permitted per # https://www.python.org/dev/peps/pep-0440/#version-matching elif spec.operator == '==' and spec.version.endswith('.*'): try: result = check_output(['/usr/bin/semver-ranger', spec.version], universal_newlines=True).strip() gte, lt = result.split() return [('>=', gte.lstrip('>=')), ('<', lt.lstrip('<'))] except ValueError: logger.info("couldn't resolve ==%s", spec.version) return spec.operator, spec.version # https://www.python.org/dev/peps/pep-0440/#arbitrary-equality # Use of this operator is heavily discouraged, so just convert it to 'Version matching' elif spec.operator == '===': return '==', spec.version else: return spec.operator, spec.version def _get_pip_spec(requirements): """There's no `specs` field In Pip 8+, take info from `specifier` field.""" if hasattr(requirements, 'specs'): return requirements.specs elif hasattr(requirements, 'specifier'): specs = [_extract_op_version(spec) for spec in requirements.specifier] if len(specs) == 0: specs = [('>=', '0.0.0')] elif len(specs) > 1: specs = [specs] return specs # create a temporary file and store the spec there since # `parse_requirements` requires a file with NamedTemporaryFile(mode='w+', suffix='pysolve') as f: f.write(spec) f.flush() parsed = parse_requirements(f.name, session=f.name) dependency = [Dependency(x.name, _get_pip_spec(x.req)) for x in parsed].pop() return dependency def parse(self, specs): """Parse specs.""" return [self._parse_python(s) for s in specs] @staticmethod def compose(deps): """Compose deps.""" return DependencyParser.compose_sep(deps, ',') @staticmethod def restrict_versions(deps): """Not implemented.""" return deps # TODO class NpmDependencyParser(DependencyParser): """NPM Dependency parsing.""" @staticmethod def _parse_npm_tokens(spec): """Parse npm tokens.""" for token in Tokens.operators: if token in spec: return token, spec.split(token)[1] return spec, def _parse_npm(self, name, spec): """Parse NPM specification of a single dependency. :param name: str :param spec: str :return: Dependency """ if spec == 'latest': specs = '*' else: specs = check_output(['/usr/bin/semver-ranger', spec], universal_newlines=True).strip() if specs == 'null': logger.info("invalid version specification for %s = %s", name, spec) return None ret = [] for s in specs.split('||'): if ' ' in s: spaced = s.split(' ') assert len(spaced) == 2 left, right = spaced ret.append([self._parse_npm_tokens(left), self._parse_npm_tokens(right)]) elif s == '*': ret.append(('>=', '0.0.0')) else: ret.append(self._parse_npm_tokens(s)) return Dependency(name, ret) def parse(self, specs): """Transform list of dependencies (strings) to list of Dependency.""" deps = [] for spec in specs: name, ver = spec.split(' ', 1) parsed = self._parse_npm(name, ver) if parsed: deps.append(parsed) return deps @staticmethod def compose(deps): """Oposite of parse().""" return DependencyParser.compose_sep(deps, ' ') @staticmethod def restrict_versions(deps): """From list of semver ranges select only the most restricting ones for each operator. :param deps: list of Dependency(), example: [node [('>=', '0.6.0')], node [('<', '1.0.0')], node [('>=', '0.8.0')]] :return: list of Dependency() with only the most restrictive versions, example: [node [('<', '1.0.0')], node [('>=', '0.8.0')]] """ # list to dict # { # 'node' : { # '>=': ['0.8.0', '0.6.0'], # '<': ['1.0.0'] # } # } dps_dict = defaultdict(dict) for dp in deps: if dp.name not in dps_dict: dps_dict[dp.name] = defaultdict(list) for spec in dp.spec: if len(spec) != 2: continue operator, version = spec dps_dict[dp.name][operator].append(version) # select only the most restrictive versions result = [] for name, version_spec_dict in dps_dict.items(): specs = [] for operator, versions in version_spec_dict.items(): if operator in ['>', '>=']: # select highest version version = sorted(versions, key=cmp_to_key(compare_version))[-1] elif operator in ['<', '<=']: # select lowest version version = sorted(versions, key=cmp_to_key(compare_version))[0] specs.append((operator, version)) # dict back to list result.append(Dependency(name, specs)) return result RubyGemsDependencyParser = NpmDependencyParser class OSSIndexDependencyParser(NpmDependencyParser): """Parse OSS Index version specification.""" def _parse_npm(self, name, spec): """Parse OSS Index version specification. It's similar to NPM semver, with few tweaks.""" # sometimes there's '|' instead of '||', but the meaning seems to be the same spec = spec.replace(' | ', ' || ') # remove superfluous brackets spec = spec.replace('(', '').replace(')', '') return super()._parse_npm(name, spec) class NugetDependencyParser(object): """Nuget version specification parsing.""" def parse(self, specs): """Transform list of dependencies (strings) to list of Dependency. https://docs.microsoft.com/en-us/nuget/create-packages/dependency-versions#version-ranges :param specs: list of dependencies (strings) :return: list of Dependency """ # TODO: reduce cyclomatic complexity deps = [] for spec in specs: name, version_range = spec.split(' ', 1) # 1.0 -> 1.0≤x if re.search(r'[,()\[\]]', version_range) is None: dep = Dependency(name, [('>=', version_range)]) # [1.0,2.0] -> 1.0≤x≤2.0 elif re.fullmatch(r'\[(.+),(.+)\]', version_range): m = re.fullmatch(r'\[(.+),(.+)\]', version_range) dep = Dependency(name, [[('>=', m.group(1)), ('<=', m.group(2))]]) # (1.0,2.0) -> 1.0<x<2.0 elif re.fullmatch(r'\((.+),(.+)\)', version_range): m = re.fullmatch(r'\((.+),(.+)\)', version_range) dep = Dependency(name, [[('>', m.group(1)), ('<', m.group(2))]]) # The following one is not in specification, # so we can just guess what was the intention. # Seen in NLog:5.0.0-beta08 dependencies # [1.0, ) -> 1.0≤x elif re.fullmatch(r'\[(.+), \)', version_range): m = re.fullmatch(r'\[(.+), \)', version_range) dep = Dependency(name, [('>=', m.group(1))]) # [1.0,2.0) -> 1.0≤x<2.0 elif re.fullmatch(r'\[(.+),(.+)\)', version_range): m = re.fullmatch(r'\[(.+),(.+)\)', version_range) dep = Dependency(name, [[('>=', m.group(1)), ('<', m.group(2))]]) # (1.0,) -> 1.0<x elif re.fullmatch(r'\((.+),\)', version_range): m = re.fullmatch(r'\((.+),\)', version_range) dep = Dependency(name, [('>', m.group(1))]) # [1.0] -> x==1.0 elif re.fullmatch(r'\[(.+)\]', version_range): m = re.fullmatch(r'\[(.+)\]', version_range) dep = Dependency(name, [('==', m.group(1))]) # (,1.0] -> x≤1.0 elif re.fullmatch(r'\(,(.+)\]', version_range): m = re.fullmatch(r'\(,(.+)\]', version_range) dep = Dependency(name, [('<=', m.group(1))]) # (,1.0) -> x<1.0 elif re.fullmatch(r'\(,(.+)\)', version_range): m = re.fullmatch(r'\(,(.+)\)', version_range) dep = Dependency(name, [('<', m.group(1))]) elif re.fullmatch(r'\((.+)\)', version_range): raise ValueError("invalid version range %r" % version_range) deps.append(dep) return deps class NoOpDependencyParser(DependencyParser): """Dummy dependency parser for ecosystems that don't support version ranges.""" def parse(self, specs): """Transform list of dependencies (strings) to list of Dependency.""" return [Dependency(*x.split(' ')) for x in specs] @staticmethod def compose(deps): """Opposite of parse().""" return DependencyParser.compose_sep(deps, ' ') @staticmethod def restrict_versions(deps): """Not implemented.""" return deps class GolangDependencyParser(DependencyParser): """Dependency parser for Golang.""" def parse(self, specs): """Transform list of dependencies (strings) to list of Dependency.""" dependencies = [] for spec in specs: spec_list = spec.split(' ') if len(spec_list) > 1: dependencies.append(Dependency(spec_list[0], spec_list[1])) else: dependencies.append(Dependency(spec_list[0], '')) return dependencies @staticmethod def compose(deps): """Opposite of parse().""" return DependencyParser.compose_sep(deps, ' ') @staticmethod def restrict_versions(deps): """Not implemented.""" return deps class Solver(object): """Base class for resolving dependencies.""" def __init__(self, ecosystem, dep_parser=None, fetcher=None, highest_dependency_version=True): """Initialize instance.""" self.ecosystem = ecosystem self._dependency_parser = dep_parser self._release_fetcher = fetcher self._highest_dependency_version = highest_dependency_version @property def dependency_parser(self): """Return DependencyParser instance used by this solver.""" return self._dependency_parser @property def release_fetcher(self): """Return ReleasesFetcher instance used by this solver.""" return self._release_fetcher def solve(self, dependencies, graceful=True, all_versions=False): """Solve `dependencies` against upstream repository. :param dependencies: List, List of dependencies in native format :param graceful: bool, Print info output to stdout :param all_versions: bool, Return all matched versions instead of the latest :return: Dict[str, str], Matched versions """ solved = {} for dep in self.dependency_parser.parse(dependencies): logger.debug("Fetching releases for: {}".format(dep)) name, releases = self.release_fetcher.fetch_releases(dep.name) if name in solved: raise SolverException("Dependency: {} is listed multiple times".format(name)) if not releases: if graceful: logger.info("No releases found for: %s", dep.name) else: raise SolverException("No releases found for: {}".format(dep.name)) matching = sorted([release for release in releases if release in dep], key=cmp_to_key(compare_version)) logger.debug(" matching:\n {}".format(matching)) if all_versions: solved[name] = matching else: if not matching: solved[name] = None else: if self._highest_dependency_version: solved[name] = matching[-1] else: solved[name] = matching[0] return solved class PypiSolver(Solver): """Pypi dependencies solver.""" def __init__(self, ecosystem, parser=None, fetcher=None): """Initialize instance.""" super(PypiSolver, self).__init__(ecosystem, parser or PypiDependencyParser(), fetcher or PypiReleasesFetcher(ecosystem)) class NpmSolver(Solver): """Npm dependencies solver.""" def __init__(self, ecosystem, parser=None, fetcher=None): """Initialize instance.""" super(NpmSolver, self).__init__(ecosystem, parser or NpmDependencyParser(), fetcher or NpmReleasesFetcher(ecosystem)) class RubyGemsSolver(Solver): """Rubygems dependencies solver.""" def __init__(self, ecosystem, parser=None, fetcher=None): """Initialize instance.""" super(RubyGemsSolver, self).__init__(ecosystem, parser or RubyGemsDependencyParser(), fetcher or RubyGemsReleasesFetcher(ecosystem)) class NugetSolver(Solver): """Nuget dependencies solver. Nuget is a bit specific because it by default resolves version specs to lowest possible version. https://docs.microsoft.com/en-us/nuget/release-notes/nuget-2.8#-dependencyversion-switch """ def __init__(self, ecosystem, parser=None, fetcher=None): """Initialize instance.""" super(NugetSolver, self).__init__(ecosystem, parser or NugetDependencyParser(), fetcher or NugetReleasesFetcher(ecosystem), highest_dependency_version=False) class MavenManualSolver(Solver): """Use this only if you need to resolve all versions or use specific DependencyParser. Otherwise use MavenSolver (below). """ def __init__(self, ecosystem, parser, fetcher=None): """Initialize instance.""" super().__init__(ecosystem, parser, fetcher or MavenReleasesFetcher(ecosystem)) class GolangSolver(Solver): """Golang dependencies solver.""" def __init__(self, ecosystem, parser=None, fetcher=None): """Initialize instance.""" super(GolangSolver, self).__init__(ecosystem, parser or GolangDependencyParser(), fetcher or GolangReleasesFetcher(ecosystem)) def solve(self, dependencies): """Solve `dependencies` against upstream repository.""" result = {} for dependency in self.dependency_parser.parse(dependencies): if dependency.spec: result[dependency.name] = dependency.spec else: version = self.release_fetcher.fetch_releases(dependency.name)[1][0] result[dependency.name] = version return result class MavenSolver(object): """Doesn't inherit from Solver, because we don't use its solve(). We also don't need a DependencyParser nor a ReleasesFetcher for Maven. 'mvn versions:resolve-ranges' does all the dirty work for us. Resolves only to one version, so if you need solve(all_versions=True), use MavenManualSolver """ @staticmethod def _generate_pom_xml(to_solve): """Create pom.xml with dependencies from to_solve. And run 'mvn versions:resolve-ranges', which resolves the version ranges (overwrites the pom.xml). :param to_solve: {"groupId:artifactId": "version-range"} """ project = etree.Element('project') etree.SubElement(project, 'modelVersion').text = '4.0.0' etree.SubElement(project, 'groupId').text = 'foo.bar.baz' etree.SubElement(project, 'artifactId').text = 'testing' etree.SubElement(project, 'version').text = '1.0.0' dependencies = etree.SubElement(project, 'dependencies') for name, version_range in to_solve.items(): group_id, artifact_id = name.rstrip(':').split(':') dependency = etree.SubElement(dependencies, 'dependency') etree.SubElement(dependency, 'groupId').text = group_id etree.SubElement(dependency, 'artifactId').text = artifact_id etree.SubElement(dependency, 'version').text = version_range with open('pom.xml', 'wb') as pom: pom.write(etree.tostring(project, xml_declaration=True, pretty_print=True)) TimedCommand.get_command_output(['mvn', 'versions:resolve-ranges'], graceful=False) @staticmethod def _dependencies_from_pom_xml(): """Extract dependencies from pom.xml in current directory. :return: {"groupId:artifactId": "version"} """ solved = {} with open('pom.xml') as r: pom_dict = anymarkup.parse(r.read()) dependencies = pom_dict.get('project', {}).get('dependencies', {}).get('dependency', []) if not isinstance(dependencies, list): dependencies = [dependencies] for dependency in dependencies: name = "{}:{}".format(dependency['groupId'], dependency['artifactId']) solved[name] = str(dependency['version']) return solved @staticmethod def _resolve_versions(to_solve): """Resolve version ranges in to_solve. :param to_solve: {"groupId:artifactId": "version-range"} :return: {"groupId:artifactId": "version"} """ if not to_solve: return {} with TemporaryDirectory() as tmpdir: with cwd(tmpdir): MavenSolver._generate_pom_xml(to_solve) return MavenSolver._dependencies_from_pom_xml() @staticmethod def is_version_range(ver_spec): """Check whether ver_spec contains version range.""" # http://maven.apache.org/enforcer/enforcer-rules/versionRanges.html return re.search(r'[,()\[\]]', ver_spec) is not None def solve(self, dependencies): """Solve version ranges in dependencies.""" already_solved = {} to_solve = {} for dependency in dependencies: name, ver_spec = dependency.split(' ', 1) if not self.is_version_range(ver_spec): already_solved[name] = ver_spec else: to_solve[name] = ver_spec result = already_solved.copy() result.update(self._resolve_versions(to_solve)) return result def get_ecosystem_solver(ecosystem, with_parser=None, with_fetcher=None): """Get Solver subclass instance for particular ecosystem. :param ecosystem: Ecosystem :param with_parser: DependencyParser instance :param with_fetcher: ReleasesFetcher instance :return: Solver """ if ecosystem.is_backed_by(EcosystemBackend.maven): if with_parser is None: return MavenSolver() else: return MavenManualSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.npm): return NpmSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.pypi): return PypiSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.rubygems): return RubyGemsSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.nuget): return NugetSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.go): return GolangSolver(ecosystem, with_parser, with_fetcher) raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name)) def get_ecosystem_parser(ecosystem): """Get DependencyParser subclass instance for particular ecosystem.""" if ecosystem.is_backed_by(EcosystemBackend.maven): return NoOpDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.npm): return NpmDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.pypi): return PypiDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.rubygems): return RubyGemsDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.nuget): return NugetDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.go): return GolangDependencyParser() raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name))
gpl-3.0
-6,675,852,524,338,168,000
35.320588
100
0.562799
false
4.107206
false
false
false
JohnLunzer/flexx
flexx/ui/widgets/_html.py
1
3211
""" Simple example: .. UIExample:: 75 from flexx import app, ui class Example(ui.Widget): def init(self): with ui.html.UL(): ui.html.LI(text='foo') ui.html.LI(text='bar') .. UIExample:: 150 from flexx import app, ui, event class Example(ui.Widget): def init(self): with ui.html.UL(): ui.html.LI(text='foo') ui.html.LI(text='bar') with ui.html.LI(): with ui.html.I(): self.now = ui.html.Span(text='0') self.but = ui.html.Button(text='press me') class JS: @event.connect('but.mouse_down') def on_click(self, *events): self.now.text = window.Date.now() """ from ... import event from . import Widget class Div(Widget): """ This class is the base class for "HTML widgets". These provides a lower-level way of working with HTML content that can feel more natural to users with a background in web development. Via the ``flexx.ui.html`` factory object, it is possible to create *any* type of DOM element. E.g. ``ui.html.Table()`` creates an table and ``ui.html.b(text='foo')`` creates a piece of bold text. Since this class inherits from ``Widget``, all base widget functionality (e.g. mouse events) work as expected. However, the specific functionality of each element (e.g. ``src`` for img elements) must be used in the "JavaScript way". In contrast to regular Flexx widgets, the css class name of the node only consists of the name(s) provided via the ``css_class`` property. Also see :ref:`this example <classic_web_dev.py>`. """ class Both: @event.prop def text(self, v=''): """ The inner HTML for this element. """ return str(v) class JS: def __init__(self, *args): super().__init__(*args) self.node.className = '' def _init_phosphor_and_node(self): self.phosphor = self._create_phosphor_widget(self._class_name.lower()) self.node = self.phosphor.node @event.connect('text') def __on_inner_html(self, *events): self.node.innerHTML = events[-1].new_value def _add_child(self, widget): self.node.appendChild(widget.node) class HTMLElementFactory: """ This object can be used to generate a Flexx Widget class for any HTML element that you'd like. These Widget classes inherit from ``Div``. """ def __getattr__(self, name): name = name.lower() cache = globals() if name.startswith('_'): return super().__getattr__(name) if name not in cache: # Create new class, put it in this module so that JSModule can find it cls = type(name, (Div,), {}) cls.__module__ = cls.__jsmodule__ = __name__ cache[name] = cls return cache[name] html = HTMLElementFactory()
bsd-2-clause
-7,527,310,105,070,645,000
27.415929
82
0.540953
false
4.111396
false
false
false
oskyar/test-TFG
TFG/urls.py
1
2144
from django.conf import settings from django.conf.urls import patterns, include, url from django.contrib import admin from django.contrib.auth.views import login, logout_then_login from django.views.static import serve import logging from importlib import import_module from django.conf import settings from TFG.apps.handlererrors.views import Error403, Error404, Error500 from TFG.apps.user.views import Index from vanilla import TemplateView from django.core import exceptions from TFG.apps.user.views import ClientViewErrors # from registration.views import RegistrationView admin.autodiscover() urlpatterns = [ # Examples: # url(r'^$', 'TFG.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^$', Index.as_view(), name='index'), url(r'^cookies$', TemplateView.as_view(template_name="cookies.html"), name='cookies'), url(r'^$', login, {'template_name': 'user/login.html'}, name='login'), url(r'^logout/$', logout_then_login, name='logout'), # url(r'^', include('TFG.apps.handlererrors.urls')), # url(r'^db', TFG.apps.index.views.db, name='db'), url(r'^admin/', include(admin.site.urls)), url(r'^chaining/', include('smart_selects.urls')), url(r'^user/', include('TFG.apps.user.urls')), url(r'^test/', include('TFG.apps.test.urls')), url(r'^subject/', include('TFG.apps.subject.urls')), url(r'^search/', include('TFG.apps.search.urls')), url(r'^s3direct/', include('s3direct.urls')), # url(r'^test/', include('TFG.apps.test.urls')), url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT,}), ] # import_module("TFG.apps.index.signals") # Este código sirve para buscar todos los signals añadidos a las apps """logger = logging.getLogger(__name__) signal_modules = {} for app in settings.INSTALLED_APPS: signals_module = '%s.signals' % app try: logger.debug('loading "%s" ..' % signals_module) signal_modules[app] = import_module(signals_module) except ImportError as e: logger.warning( 'failed to import "%s", reason: %s' % (signals_module, str(e))) """
gpl-2.0
830,453,397,419,805,600
34.7
90
0.668534
false
3.346875
false
false
false
dingzg/onepanel
lib/module/user.py
1
10650
#!/usr/bin/env python2.6 #-*- coding: utf-8 -*- # Copyright [OnePanel] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Package for user management. """ import os if __name__ == '__main__': import sys root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.insert(0, root_path) import pexpect import shlex import time import pwd import grp import subprocess from utils import b2h, ftime #--------------------------------------------------------------------------------------------------- #Function Name : main_process #Usage : #Parameters : None # #Return value : # 1 #--------------------------------------------------------------------------------------------------- def main_process(self): action = self.get_argument('action', '') if action == 'listuser': fullinfo = self.get_argument('fullinfo', 'on') self.write({'code': 0, 'msg': u'成功获取用户列表!', 'data': listuser(fullinfo=='on')}) elif action == 'listgroup': fullinfo = self.get_argument('fullinfo', 'on') self.write({'code': 0, 'msg': u'成功获取用户组列表!', 'data': listgroup(fullinfo=='on')}) elif action in ('useradd', 'usermod'): if self.config.get('runtime', 'mode') == 'demo': self.write({'code': -1, 'msg': u'DEMO状态不允许添加和修改用户!'}) return pw_name = self.get_argument('pw_name', '') pw_gecos = self.get_argument('pw_gecos', '') pw_gname = self.get_argument('pw_gname', '') pw_dir = self.get_argument('pw_dir', '') pw_shell = self.get_argument('pw_shell', '') pw_passwd = self.get_argument('pw_passwd', '') pw_passwdc = self.get_argument('pw_passwdc', '') lock = self.get_argument('lock', '') lock = (lock == 'on') and True or False if pw_passwd != pw_passwdc: self.write({'code': -1, 'msg': u'两次输入的密码不一致!'}) return options = { 'pw_gecos': _u(pw_gecos), 'pw_gname': _u(pw_gname), 'pw_dir': _u(pw_dir), 'pw_shell': _u(pw_shell), 'lock': lock } if len(pw_passwd)>0: options['pw_passwd'] = _u(pw_passwd) if action == 'useradd': createhome = self.get_argument('createhome', '') createhome = (createhome == 'on') and True or False options['createhome'] = createhome if useradd(_u(pw_name), options): self.write({'code': 0, 'msg': u'用户添加成功!'}) else: self.write({'code': -1, 'msg': u'用户添加失败!'}) elif action == 'usermod': if usermod(_u(pw_name), options): self.write({'code': 0, 'msg': u'用户修改成功!'}) else: self.write({'code': -1, 'msg': u'用户修改失败!'}) elif action == 'userdel': if self.config.get('runtime', 'mode') == 'demo': self.write({'code': -1, 'msg': u'DEMO状态不允许删除用户!'}) return pw_name = self.get_argument('pw_name', '') if userdel(_u(pw_name)): self.write({'code': 0, 'msg': u'用户删除成功!'}) else: self.write({'code': -1, 'msg': u'用户删除失败!'}) elif action in ('groupadd', 'groupmod', 'groupdel'): if self.config.get('runtime', 'mode') == 'demo': self.write({'code': -1, 'msg': u'DEMO状态不允许操作用户组!'}) return gr_name = self.get_argument('gr_name', '') gr_newname = self.get_argument('gr_newname', '') actionstr = {'groupadd': u'添加', 'groupmod': u'修改', 'groupdel': u'删除'}; if action == 'groupmod': rt = groupmod(_u(gr_name), _u(gr_newname)) else: rt = getattr(user, action)(_u(gr_name)) if rt: self.write({'code': 0, 'msg': u'用户组%s成功!' % actionstr[action]}) else: self.write({'code': -1, 'msg': u'用户组%s失败!' % actionstr[action]}) elif action in ('groupmems_add', 'groupmems_del'): if self.config.get('runtime', 'mode') == 'demo': self.write({'code': -1, 'msg': u'DEMO状态不允许操作用户组成员!'}) return gr_name = self.get_argument('gr_name', '') mem = self.get_argument('mem', '') option = action.split('_')[1] optionstr = {'add': u'添加', 'del': u'删除'} if groupmems(_u(gr_name), _u(option), _u(mem)): self.write({'code': 0, 'msg': u'用户组成员%s成功!' % optionstr[option]}) else: self.write({'code': -1, 'msg': u'用户组成员%s成功!' % optionstr[option]}) def listuser(fullinfo=True): if fullinfo: # get lock status from /etc/shadow locks = {} with open('/etc/shadow') as f: for line in f: fields = line.split(':', 2) locks[fields[0]] = fields[1].startswith('!') users = pwd.getpwall() for i, user in enumerate(users): users[i] = dict((name, getattr(user, name)) for name in dir(user) if not name.startswith('__')) try: gname = grp.getgrgid(user.pw_gid).gr_name except: gname = '' users[i]['pw_gname'] = gname users[i]['lock'] = locks[user.pw_name] else: users = [pw.pw_name for pw in pwd.getpwall()] return users def passwd(username, password): try: cmd = shlex.split('passwd \'%s\'' % username) except: return False child = pexpect.spawn(cmd[0], cmd[1:]) i = child.expect(['New password', 'Unknown user name']) if i == 1: if child.isalive(): child.wait() return False child.sendline(password) child.expect('Retype new password') child.sendline(password) i = child.expect(['updated successfully', pexpect.EOF]) if child.isalive(): child.wait() return i == 0 def useradd(username, options): # command like: useradd -c 'New User' -g newgroup -s /bin/bash -m newuser cmd = ['useradd'] if options.has_key('pw_gname') and options['pw_gname']: cmd.extend(['-g', options['pw_gname']]) if options.has_key('pw_gecos'): cmd.extend(['-c', options['pw_gecos']]) if options.has_key('pw_shell'): cmd.extend(['-s', options['pw_shell']]) if options.has_key('createhome') and options['createhome']: cmd.append('-m') else: cmd.append('-M') cmd.append(username) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdout.read() p.stderr.read() if p.wait() != 0: return False # check if need to lock/unlock the new account if options.has_key('lock') and options['lock']: if not usermod(username, {'lock': options['lock']}): return False # check if need to set passwd if options.has_key('pw_passwd'): if not passwd(username, options['pw_passwd']): return False return True def usermod(username, options): user = pwd.getpwnam(username) # command like: usermod -c 'I am root' -g root -d /root/ -s /bin/bash -U root cmd = ['usermod'] if options.has_key('pw_gname'): cmd.extend(['-g', options['pw_gname']]) if options.has_key('pw_gecos') and options['pw_gecos'] != user.pw_gecos: cmd.extend(['-c', options['pw_gecos']]) if options.has_key('pw_dir') and options['pw_dir'] != user.pw_dir: cmd.extend(['-d', options['pw_dir']]) if options.has_key('pw_shell') and options['pw_shell'] != user.pw_shell: cmd.extend(['-s', options['pw_shell']]) if options.has_key('lock') and options['lock']: cmd.append('-L') else: cmd.append('-U') cmd.append(username) if len(cmd) > 2: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdout.read() msg = p.stderr.read() if p.wait() != 0: if not 'no changes' in msg: return False # check if need to change passwd if options.has_key('pw_passwd'): if not passwd(username, options['pw_passwd']): return False return True def userdel(username): p = subprocess.Popen(['userdel', username], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdout.read() p.stderr.read() return p.wait() == 0 def listgroup(fullinfo=True): if fullinfo: groups = grp.getgrall() for i, group in enumerate(groups): groups[i] = dict((name, getattr(group, name)) for name in dir(group) if not name.startswith('__')) else: groups = [gr.gr_name for gr in grp.getgrall()] return groups def groupadd(groupname): p = subprocess.Popen(['groupadd', groupname], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdout.read() p.stderr.read() return p.wait() == 0 def groupmod(groupname, newgroupname): p = subprocess.Popen(['groupmod', '-n', newgroupname, groupname], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdout.read() p.stderr.read() return p.wait() == 0 def groupdel(groupname): p = subprocess.Popen(['groupdel', groupname], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdout.read() p.stderr.read() return p.wait() == 0 def groupmems(groupname, option, mem): cmd = ['groupmems', '-g', groupname] if option == 'add': cmd.extend(['-a', mem]) elif option == 'del': cmd.extend(['-d', mem]) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) p.stdout.read() p.stderr.read() return p.wait() == 0
apache-2.0
-5,547,206,471,437,395,000
33.801347
100
0.543731
false
3.362394
false
false
false
Fat-Zer/FreeCAD_sf_master
src/Tools/updatefromcrowdin.py
11
12203
#!/usr/bin/python #*************************************************************************** #* * #* Copyright (c) 2009 Yorik van Havre <[email protected]> * #* * #* This program is free software; you can redistribute it and/or modify * #* it under the terms of the GNU Library General Public License (LGPL) * #* as published by the Free Software Foundation; either version 2 of * #* the License, or (at your option) any later version. * #* for detail see the LICENCE text file. * #* * #* This program is distributed in the hope that it will be useful, * #* but WITHOUT ANY WARRANTY; without even the implied warranty of * #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * #* GNU Library General Public License for more details. * #* * #* You should have received a copy of the GNU Library General Public * #* License along with this program; if not, write to the Free Software * #* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * #* USA * #* * #*************************************************************************** from __future__ import print_function ''' Usage: updatefromcrowdin.py [options] [LANGCODE] [LANGCODE LANGCODE...] Example: ./updatefromcrowdin.py [-d <directory>] fr nl pt_BR Options: -h or --help : prints this help text -d or --directory : specifies a directory containing unzipped translation folders -z or --zipfile : specifies a path to the freecad.zip file -m or --module : specifies a single module name to be updated, instead of all modules If no argument is specified, the command will try to find and use a freecad.zip file located in the current src/Tools directory (such as the one obtained by running updatecrowdin.py download) and will extract the default languages specified below in this file. This command must be run from its current source tree location (/src/Tools) so it can find the correct places to put the translation files. If run with no arguments, the latest translations from crowdin will be downloaded, unzipped and put to the correct locations. The necessary renaming of files and .qm generation will be taken care of. The qrc files will also be updated when new translations are added. NOTE! The crowdin site only allows to download "builds" (zipped archives) which must be built prior to downloading. This means a build might not reflect the latest state of the translations. Better always make a build before using this script! You can specify a directory with the -d option if you already downloaded and extracted the build, or you can specify a single module to update with -m. You can also run the script without any language code, in which case all the languages contained in the archive or directory will be added. ''' import sys, os, shutil, tempfile, zipfile, getopt, StringIO, re crowdinpath = "http://crowdin.net/download/project/freecad.zip" # locations list contains Module name, relative path to translation folder and relative path to qrc file locations = [["AddonManager","../Mod/AddonManager/Resources/translations","../Mod/AddonManager/Resources/AddonManager.qrc"], ["Arch","../Mod/Arch/Resources/translations","../Mod/Arch/Resources/Arch.qrc"], ["Assembly","../Mod/Assembly/Gui/Resources/translations","../Mod/Assembly/Gui/Resources/Assembly.qrc"], ["draft","../Mod/Draft/Resources/translations","../Mod/Draft/Resources/Draft.qrc"], ["Drawing","../Mod/Drawing/Gui/Resources/translations","../Mod/Drawing/Gui/Resources/Drawing.qrc"], ["Fem","../Mod/Fem/Gui/Resources/translations","../Mod/Fem/Gui/Resources/Fem.qrc"], ["FreeCAD","../Gui/Language","../Gui/Language/translation.qrc"], ["Image","../Mod/Image/Gui/Resources/translations","../Mod/Image/Gui/Resources/Image.qrc"], ["Mesh","../Mod/Mesh/Gui/Resources/translations","../Mod/Mesh/Gui/Resources/Mesh.qrc"], ["MeshPart","../Mod/MeshPart/Gui/Resources/translations","../Mod/MeshPart/Gui/Resources/MeshPart.qrc"], ["OpenSCAD","../Mod/OpenSCAD/Resources/translations","../Mod/OpenSCAD/Resources/OpenSCAD.qrc"], ["Part","../Mod/Part/Gui/Resources/translations","../Mod/Part/Gui/Resources/Part.qrc"], ["PartDesign","../Mod/PartDesign/Gui/Resources/translations","../Mod/PartDesign/Gui/Resources/PartDesign.qrc"], ["Points","../Mod/Points/Gui/Resources/translations","../Mod/Points/Gui/Resources/Points.qrc"], ["Raytracing","../Mod/Raytracing/Gui/Resources/translations","../Mod/Raytracing/Gui/Resources/Raytracing.qrc"], ["ReverseEngineering","../Mod/ReverseEngineering/Gui/Resources/translations","../Mod/ReverseEngineering/Gui/Resources/ReverseEngineering.qrc"], ["Robot","../Mod/Robot/Gui/Resources/translations","../Mod/Robot/Gui/Resources/Robot.qrc"], ["Sketcher","../Mod/Sketcher/Gui/Resources/translations","../Mod/Sketcher/Gui/Resources/Sketcher.qrc"], ["StartPage","../Mod/Start/Gui/Resources/translations","../Mod/Start/Gui/Resources/Start.qrc"], ["Test","../Mod/Test/Gui/Resources/translations","../Mod/Test/Gui/Resources/Test.qrc"], ["Ship","../Mod/Ship/resources/translations","../Mod/Ship/resources/Ship.qrc"], ["Plot","../Mod/Plot/resources/translations","../Mod/Plot/resources/Plot.qrc"], ["Web","../Mod/Web/Gui/Resources/translations","../Mod/Web/Gui/Resources/Web.qrc"], ["Spreadsheet","../Mod/Spreadsheet/Gui/Resources/translations","../Mod/Spreadsheet/Gui/Resources/Spreadsheet.qrc"], ["Path","../Mod/Path/Gui/Resources/translations","../Mod/Path/Gui/Resources/Path.qrc"], ["Tux","../Mod/Tux/Resources/translations","../Mod/Tux/Resources/Tux.qrc"], ["TechDraw","../Mod/TechDraw/Gui/Resources/translations","../Mod/TechDraw/Gui/Resources/TechDraw.qrc"], ] default_languages = "af ar ca cs de el es-ES eu fi fil fr gl hr hu id it ja kab ko lt nl no pl pt-BR pt-PT ro ru sk sl sr sv-SE tr uk val-ES vi zh-CN zh-TW" def updateqrc(qrcpath,lncode): "updates a qrc file with the given translation entry" print("opening " + qrcpath + "...") # getting qrc file contents if not os.path.exists(qrcpath): print("ERROR: Resource file " + qrcpath + " doesn't exist") sys.exit() f = open(qrcpath,"ro") resources = [] for l in f.readlines(): resources.append(l) f.close() # checking for existing entry name = "_" + lncode + ".qm" for r in resources: if name in r: print("language already exists in qrc file") return # find the latest qm line pos = None for i in range(len(resources)): if ".qm" in resources[i]: pos = i if pos is None: print("No existing .qm file in this resource. Appending to the end position") for i in range(len(resources)): if "</qresource>" in resources[i]: pos = i-1 if pos is None: print("ERROR: couldn't add qm files to this resource: " + qrcpath) sys.exit() # inserting new entry just after the last one line = resources[pos] if ".qm" in line: line = re.sub("_.*\.qm","_"+lncode+".qm",line) else: modname = os.path.splitext(os.path.basename(qrcpath))[0] line = " <file>translations/"+modname+"_"+lncode+".qm</file>\n" #print "ERROR: no existing qm entry in this resource: Please add one manually " + qrcpath #sys.exit() print("inserting line: ",line) resources.insert(pos+1,line) # writing the file f = open(qrcpath,"wb") for r in resources: f.write(r) f.close() print("successfully updated ",qrcpath) def doFile(tsfilepath,targetpath,lncode,qrcpath): "updates a single ts file, and creates a corresponding qm file" basename = os.path.basename(tsfilepath)[:-3] # special fix of the draft filename... if basename == "draft": basename = "Draft" newname = basename + "_" + lncode + ".ts" newpath = targetpath + os.sep + newname shutil.copyfile(tsfilepath, newpath) os.system("lrelease " + newpath) newqm = targetpath + os.sep + basename + "_" + lncode + ".qm" if not os.path.exists(newqm): print("ERROR: impossible to create " + newqm + ", aborting") sys.exit() updateqrc(qrcpath,lncode) def doLanguage(lncode,fmodule=""): " treats a single language" if lncode == "en": # never treat "english" translation... For now :) return mods = [] if fmodule: for l in locations: if l[0].upper() == fmodule.upper(): mods = [l] else: mods = locations if not mods: print("Error: Couldn't find module "+fmodule) sys.exit() for target in mods: basefilepath = tempfolder + os.sep + lncode + os.sep + target[0] + ".ts" targetpath = os.path.abspath(target[1]) qrcpath = os.path.abspath(target[2]) doFile(basefilepath,targetpath,lncode,qrcpath) print(lncode + " done!") if __name__ == "__main__": inputdir = "" inputzip = "" fmodule = "" args = sys.argv[1:] if len(args) < 1: inputzip = os.path.join(os.path.abspath(os.curdir),"freecad.zip") if os.path.exists(inputzip): print("Using zip file found at",inputzip) else: print(__doc__) sys.exit() else: try: opts, args = getopt.getopt(sys.argv[1:], "hd:z:m:", ["help", "directory=","zipfile=", "module="]) except getopt.GetoptError: print(__doc__) sys.exit() # checking on the options for o, a in opts: if o in ("-h", "--help"): print(__doc__) sys.exit() if o in ("-d", "--directory"): inputdir = a if o in ("-z", "--zipfile"): inputzip = a if o in ("-m", "--module"): fmodule = a currentfolder = os.getcwd() if inputdir: tempfolder = os.path.realpath(inputdir) if not os.path.exists(tempfolder): print("ERROR: " + tempfolder + " not found") sys.exit() elif inputzip: tempfolder = tempfile.mkdtemp() print("creating temp folder " + tempfolder) inputzip=os.path.realpath(inputzip) if not os.path.exists(inputzip): print("ERROR: " + inputzip + " not found") sys.exit() shutil.copy(inputzip,tempfolder) os.chdir(tempfolder) zfile=zipfile.ZipFile("freecad.zip") print("extracting freecad.zip...") zfile.extractall() else: tempfolder = tempfile.mkdtemp() print("creating temp folder " + tempfolder) os.chdir(tempfolder) os.system("wget "+crowdinpath) if not os.path.exists("freecad.zip"): print("download failed!") sys.exit() zfile=zipfile.ZipFile("freecad.zip") print("extracting freecad.zip...") zfile.extractall() os.chdir(currentfolder) if not args: #args = [o for o in os.listdir(tempfolder) if o != "freecad.zip"] # do not treat all languages in the zip file. Some are not translated enough. args = default_languages.split() for ln in args: if not os.path.exists(tempfolder + os.sep + ln): print("ERROR: language path for " + ln + " not found!") else: doLanguage(ln,fmodule)
lgpl-2.1
-2,449,099,759,645,174,300
44.87594
156
0.588462
false
3.916239
true
false
false
tlksio/tlksio
env/lib/python3.4/site-packages/logilab/astng/mixins.py
1
4348
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:[email protected] # # This file is part of logilab-astng. # # logilab-astng is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 2.1 of the License, or (at your # option) any later version. # # logilab-astng is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License # for more details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-astng. If not, see <http://www.gnu.org/licenses/>. """This module contains some mixins for the different nodes. """ from logilab.astng.exceptions import (ASTNGBuildingException, InferenceError, NotFoundError) class BlockRangeMixIn(object): """override block range """ def set_line_info(self, lastchild): self.fromlineno = self.lineno self.tolineno = lastchild.tolineno self.blockstart_tolineno = self._blockstart_toline() def _elsed_block_range(self, lineno, orelse, last=None): """handle block line numbers range for try/finally, for, if and while statements """ if lineno == self.fromlineno: return lineno, lineno if orelse: if lineno >= orelse[0].fromlineno: return lineno, orelse[-1].tolineno return lineno, orelse[0].fromlineno - 1 return lineno, last or self.tolineno class FilterStmtsMixin(object): """Mixin for statement filtering and assignment type""" def _get_filtered_stmts(self, _, node, _stmts, mystmt): """method used in _filter_stmts to get statemtents and trigger break""" if self.statement() is mystmt: # original node's statement is the assignment, only keep # current node (gen exp, list comp) return [node], True return _stmts, False def ass_type(self): return self class AssignTypeMixin(object): def ass_type(self): return self def _get_filtered_stmts(self, lookup_node, node, _stmts, mystmt): """method used in filter_stmts""" if self is mystmt: return _stmts, True if self.statement() is mystmt: # original node's statement is the assignment, only keep # current node (gen exp, list comp) return [node], True return _stmts, False class ParentAssignTypeMixin(AssignTypeMixin): def ass_type(self): return self.parent.ass_type() class FromImportMixIn(FilterStmtsMixin): """MixIn for From and Import Nodes""" def _infer_name(self, frame, name): return name def do_import_module(self, modname): """return the ast for a module whose name is <modname> imported by <self> """ # handle special case where we are on a package node importing a module # using the same name as the package, which may end in an infinite loop # on relative imports # XXX: no more needed ? mymodule = self.root() level = getattr(self, 'level', None) # Import as no level # XXX we should investigate deeper if we really want to check # importing itself: modname and mymodule.name be relative or absolute if mymodule.relative_to_absolute_name(modname, level) == mymodule.name: # FIXME: we used to raise InferenceError here, but why ? return mymodule try: return mymodule.import_module(modname, level=level) except ASTNGBuildingException: raise InferenceError(modname) except SyntaxError as ex: raise InferenceError(str(ex)) def real_name(self, asname): """get name from 'as' name""" for name, _asname in self.names: if name == '*': return asname if not _asname: name = name.split('.', 1)[0] _asname = name if asname == _asname: return name raise NotFoundError(asname)
mit
-2,813,748,875,574,029,000
34.639344
81
0.635005
false
4.082629
false
false
false
naturali/tensorflow
tensorflow/python/kernel_tests/variable_scope_test.py
1
35348
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for variable store.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy import tensorflow as tf from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variable_scope class VariableScopeTest(tf.test.TestCase): def testGetVar(self): vs = variable_scope._get_default_variable_store() v = vs.get_variable("v", [1]) v1 = vs.get_variable("v", [1]) assert v == v1 def testNameExists(self): vs = variable_scope._get_default_variable_store() # No check by default, so we can both create and get existing names. v = vs.get_variable("v", [1]) v1 = vs.get_variable("v", [1]) assert v == v1 # When reuse is False, we fail when variables are already there. vs.get_variable("w", [1], reuse=False) # That's ok. with self.assertRaises(ValueError): vs.get_variable("v", [1], reuse=False) # That fails. # When reuse is True, we fail when variables are new. vs.get_variable("v", [1], reuse=True) # That's ok. with self.assertRaises(ValueError): vs.get_variable("u", [1], reuse=True) # That fails. def testNamelessStore(self): vs = variable_scope._get_default_variable_store() vs.get_variable("v1", [2]) vs.get_variable("v2", [2]) expected_names = ["%s:0" % name for name in ["v1", "v2"]] self.assertEqual(set(expected_names), set([v.name for v in vs._vars.values()])) def testVarScopeInitializer(self): with self.test_session() as sess: init = tf.constant_initializer(0.3) with tf.variable_scope("tower") as tower: with tf.variable_scope("foo", initializer=init): v = tf.get_variable("v", []) sess.run(tf.initialize_variables([v])) self.assertAllClose(v.eval(), 0.3) with tf.variable_scope(tower, initializer=init): w = tf.get_variable("w", []) sess.run(tf.initialize_variables([w])) self.assertAllClose(w.eval(), 0.3) def testVarScopeDType(self): with self.test_session(): with tf.variable_scope("tower") as tower: with tf.variable_scope("foo", dtype=tf.float16): v = tf.get_variable("v", []) self.assertEqual(v.dtype, tf.float16_ref) with tf.variable_scope(tower, dtype=tf.float16): w = tf.get_variable("w", []) self.assertEqual(w.dtype, tf.float16_ref) def testInitFromNonTensorValue(self): with self.test_session() as sess: v = tf.get_variable("v", initializer=4, dtype=tf.int32) sess.run(tf.initialize_variables([v])) self.assertAllClose(v.eval(), 4) w = tf.get_variable("w", initializer=numpy.array([1, 2, 3]), dtype=tf.int64) sess.run(tf.initialize_variables([w])) self.assertAllClose(w.eval(), [1, 2, 3]) with self.assertRaises(TypeError): tf.get_variable("x", initializer={}) def testVarScopeCachingDevice(self): with self.test_session(): caching_device = "/job:moo" with tf.variable_scope("tower"): with tf.variable_scope("caching", caching_device=caching_device): v = tf.get_variable("v", []) self.assertTrue(v.value().device.startswith(caching_device)) with tf.variable_scope("child"): v2 = tf.get_variable("v", []) self.assertTrue(v2.value().device.startswith(caching_device)) with tf.variable_scope("not_cached", caching_device=""): v2_not_cached = tf.get_variable("v", []) self.assertFalse( v2_not_cached.value().device.startswith(caching_device)) with tf.variable_scope( "not_cached_identity_device", caching_device=lambda op: op.device): v2_identity_device = tf.get_variable("v", []) self.assertFalse( v2_identity_device.value().device.startswith(caching_device)) with tf.variable_scope("we_will_do_it_live") as vs_live: vs_live.set_caching_device("/job:live") v_live = tf.get_variable("v", []) self.assertTrue(v_live.value().device.startswith("/job:live")) v_tower = tf.get_variable("v", []) self.assertFalse(v_tower.value().device.startswith(caching_device)) def testVarScopeRegularizer(self): with self.test_session() as sess: init = tf.constant_initializer(0.3) def regularizer1(v): return tf.reduce_mean(v) + 0.1 def regularizer2(v): return tf.reduce_mean(v) + 0.2 with tf.variable_scope("tower", regularizer=regularizer1) as tower: with tf.variable_scope("foo", initializer=init): v = tf.get_variable("v", []) sess.run(tf.initialize_variables([v])) losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(1, len(losses)) self.assertAllClose(losses[0].eval(), 0.4) with tf.variable_scope(tower, initializer=init) as vs: u = tf.get_variable("u", []) vs.set_regularizer(regularizer2) w = tf.get_variable("w", []) # Next 3 variable not regularized to test disabling regularization. x = tf.get_variable("x", [], regularizer=tf.no_regularizer) with tf.variable_scope("baz", regularizer=tf.no_regularizer): y = tf.get_variable("y", []) vs.set_regularizer(tf.no_regularizer) z = tf.get_variable("z", []) # Check results. losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(3, len(losses)) sess.run(tf.initialize_variables([u, w, x, y, z])) self.assertAllClose(losses[0].eval(), 0.4) self.assertAllClose(losses[1].eval(), 0.4) self.assertAllClose(losses[2].eval(), 0.5) with tf.variable_scope("foo", reuse=True): v = tf.get_variable("v", []) # "v" is alredy there, reused losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(3, len(losses)) # No new loss added. def testIntializeFromValue(self): with self.test_session() as sess: init = tf.constant(0.1) w = tf.get_variable("v", initializer=init) sess.run(tf.initialize_variables([w])) self.assertAllClose(w.eval(), 0.1) with self.assertRaisesRegexp(ValueError, "shape"): # We disallow explicit shape specification when initializer is constant. tf.get_variable("u", [1], initializer=init) with tf.variable_scope("foo", initializer=init): # Constant initializer can be passed through scopes if needed. v = tf.get_variable("v") sess.run(tf.initialize_variables([v])) self.assertAllClose(v.eval(), 0.1) # Check that non-float32 initializer creates a non-float32 variable. init = tf.constant(1, dtype=tf.int32) t = tf.get_variable("t", initializer=init) self.assertEqual(t.dtype.base_dtype, tf.int32) # Raise error if `initializer` dtype and `dtype` are not identical. with self.assertRaisesRegexp(ValueError, "don't match"): tf.get_variable("s", initializer=init, dtype=tf.float64) def testControlDeps(self): with self.test_session() as sess: v0 = tf.get_variable("v0", [1], initializer=tf.constant_initializer(0)) with tf.control_dependencies([v0.value()]): v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1)) add = v1 + v0 # v0 should be uninitialized. with self.assertRaisesRegexp(tf.OpError, "uninitialized"): sess.run(v0) # We should be able to initialize and run v1 without initializing # v0, even if the variable was created with a control dep on v0. sess.run(v1.initializer) self.assertEqual(1, sess.run(v1)) # v0 should still be uninitialized. with self.assertRaisesRegexp(tf.OpError, "uninitialized"): sess.run(v0) with self.assertRaisesRegexp(tf.OpError, "uninitialized"): sess.run(add) # If we initialize v0 we should be able to run 'add'. sess.run(v0.initializer) sess.run(add) def testControlFlow(self): with self.test_session() as sess: v0 = tf.get_variable("v0", [], initializer=tf.constant_initializer(0)) var_dict = {} # Call get_variable in each of the cond clauses. def var_in_then_clause(): v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1)) var_dict["v1"] = v1 return v1 + v0 def var_in_else_clause(): v2 = tf.get_variable("v2", [1], initializer=tf.constant_initializer(2)) var_dict["v2"] = v2 return v2 + v0 add = control_flow_ops.cond(tf.less(v0, 10), var_in_then_clause, var_in_else_clause) v1 = var_dict["v1"] v2 = var_dict["v2"] # We should be able to initialize and run v1 and v2 without initializing # v0, even if the variable was created with a control dep on v0. sess.run(v1.initializer) self.assertEqual([1], sess.run(v1)) sess.run(v2.initializer) self.assertEqual([2], sess.run(v2)) # v0 should still be uninitialized. with self.assertRaisesRegexp(tf.OpError, "uninitialized"): sess.run(v0) # We should not be able to run 'add' yet. with self.assertRaisesRegexp(tf.OpError, "uninitialized"): sess.run(add) # If we initialize v0 we should be able to run 'add'. sess.run(v0.initializer) sess.run(add) def testGetVariableScope(self): # Test the get_variable_scope() function and setting properties of result. with self.test_session() as sess: init = tf.constant_initializer(0.3) with tf.variable_scope("foo"): new_init1 = tf.get_variable_scope().initializer self.assertEqual(new_init1, None) # Check that we can set initializer like this. tf.get_variable_scope().set_initializer(init) v = tf.get_variable("v", []) sess.run(tf.initialize_variables([v])) self.assertAllClose(v.eval(), 0.3) # Check that we can set reuse. tf.get_variable_scope().reuse_variables() with self.assertRaises(ValueError): # Fail, w does not exist yet. tf.get_variable("w", [1]) # Check that the set initializer goes away. new_init = tf.get_variable_scope().initializer self.assertEqual(new_init, None) def testVarScope(self): with self.test_session(): with tf.variable_scope("tower") as tower: self.assertEqual(tower.name, "tower") with tf.name_scope("scope") as sc: self.assertEqual(sc, "tower/scope/") with tf.variable_scope("foo"): with tf.variable_scope("bar") as bar: self.assertEqual(bar.name, "foo/bar") with tf.name_scope("scope") as sc: self.assertEqual(sc, "foo/bar/scope/") with tf.variable_scope("foo"): with tf.variable_scope(tower, reuse=True) as tower_shared: self.assertEqual(tower_shared.name, "tower") with tf.name_scope("scope") as sc: self.assertEqual(sc, "foo_1/tower/scope/") def testVarScopeNameScope(self): with self.test_session(): with tf.name_scope("scope1"): with tf.variable_scope("tower") as tower: with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope1/tower/scope2/") with tf.variable_scope(tower): # Re-entering acts like another "tower". with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope1/tower_1/scope2/") with tf.variable_scope("tower"): # Re-entering by string acts the same. with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope1/tower_2/scope2/") with tf.name_scope("scope3"): with tf.variable_scope("tower"): with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope3/tower/scope2/") with tf.variable_scope(tower): with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope3/tower_1/scope2/") root_var_scope = tf.get_variable_scope() with tf.name_scope("scope4"): with tf.variable_scope(root_var_scope): with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope4/scope2/") def testVarScopeOriginalNameScope(self): with self.test_session(): with tf.name_scope("scope1"): with tf.variable_scope("tower") as tower: self.assertEqual(tower.original_name_scope, "scope1/tower/") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope1/tower/scope2/") with tf.name_scope("scope2"): with tf.variable_scope(tower) as tower1: # Re-entering preserves original name scope. self.assertEqual(tower1.original_name_scope, "scope1/tower/") with tf.name_scope("foo") as sc2: self.assertEqual(sc2, "scope2/tower/foo/") # Test re-entering original name scope. with tf.name_scope(tower.original_name_scope): with tf.name_scope("bar") as sc3: self.assertEqual(sc3, "scope1/tower/bar/") with tf.name_scope("scope2"): with tf.variable_scope(tower): with tf.name_scope(tower.original_name_scope): with tf.name_scope("bar") as sc3: self.assertEqual(sc3, "scope1/tower/bar_1/") def testVarScopeObjectReuse(self): with self.test_session(): vs = None with tf.variable_scope("jump", reuse=True) as scope: vs = scope with tf.variable_scope(vs) as jump: self.assertTrue(jump.reuse) with tf.variable_scope(vs, reuse=True) as jump_reuse: self.assertTrue(jump_reuse.reuse) with tf.variable_scope(vs, reuse=False) as jump_no_reuse: self.assertFalse(jump_no_reuse.reuse) with tf.variable_scope("jump", reuse=False) as scope: vs = scope with tf.variable_scope(vs) as jump: self.assertFalse(jump.reuse) with tf.variable_scope(vs, reuse=True) as jump_reuse: self.assertTrue(jump_reuse.reuse) with tf.variable_scope(vs, reuse=False) as jump_no_reuse: self.assertFalse(jump_no_reuse.reuse) def testVarOpScope(self): with self.test_session(): with tf.name_scope("scope1"): with tf.variable_scope("tower", "default", []): self.assertEqual(tf.get_variable("w", []).name, "tower/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope1/tower/scope2/") with tf.variable_scope("tower", "default", []): with self.assertRaises(ValueError): tf.get_variable("w", []) with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope1/tower_1/scope2/") with tf.name_scope("scope2"): with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope2/default/scope2/") with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "default_1/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope2/default_1/scope2/") def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self): with self.test_session(): with tf.variable_scope(None, "defaultScope1"): with tf.variable_scope(None, "layer"): self.assertEqual(tf.get_variable("w", []).name, "defaultScope1/layer/w:0") with tf.variable_scope(None, "defaultScope1"): with tf.variable_scope(None, "layer"): self.assertEqual(tf.get_variable("w", []).name, "defaultScope1_1/layer/w:0") with tf.variable_scope(None, "defaultScope"): with tf.variable_scope(None, "layer"): self.assertEqual(tf.get_variable("w", []).name, "defaultScope/layer/w:0") with tf.variable_scope(None, "defaultScope1"): with tf.variable_scope(None, "layer"): self.assertEqual(tf.get_variable("w", []).name, "defaultScope1_2/layer/w:0") def testVarOpScopeReuse(self): with self.test_session(): with tf.variable_scope("outer") as outer: with tf.variable_scope("tower", "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/tower/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/tower/scope2/") with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default/scope2/") with tf.variable_scope(outer, reuse=True) as outer: with tf.variable_scope("tower", "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/tower/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/tower/scope2/") with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") def testVarScopeGetVar(self): with self.test_session(): with tf.variable_scope("root"): with tf.variable_scope("towerA") as tower_a: va = tf.get_variable("v", [1]) self.assertEqual(va.name, "root/towerA/v:0") with tf.variable_scope(tower_a, reuse=True): va2 = tf.get_variable("v", [1]) self.assertEqual(va2, va) with tf.variable_scope("towerB"): vb = tf.get_variable("v", [1]) self.assertEqual(vb.name, "root/towerB/v:0") with self.assertRaises(ValueError): with tf.variable_scope("towerA"): va2 = tf.get_variable("v", [1]) with tf.variable_scope("towerA", reuse=True): va2 = tf.get_variable("v", [1]) self.assertEqual(va2, va) with tf.variable_scope("foo"): with tf.variable_scope("bar"): v = tf.get_variable("v", [1]) self.assertEqual(v.name, "root/foo/bar/v:0") with tf.variable_scope(tower_a, reuse=True): va3 = tf.get_variable("v", [1]) self.assertEqual(va, va3) with self.assertRaises(ValueError): with tf.variable_scope(tower_a, reuse=True): with tf.variable_scope("baz"): tf.get_variable("v", [1]) with self.assertRaises(ValueError) as exc: with tf.variable_scope(tower_a, reuse=True): tf.get_variable("v", [2]) # Different shape. self.assertEqual("shape" in str(exc.exception), True) with self.assertRaises(ValueError) as exc: with tf.variable_scope(tower_a, reuse=True): tf.get_variable("v", [1], dtype=tf.int32) self.assertEqual("dtype" in str(exc.exception), True) def testVarScopeOuterScope(self): with self.test_session(): with tf.variable_scope("outer") as outer: pass with tf.variable_scope(outer): self.assertEqual(tf.get_variable("w", []).name, "outer/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/scope2/") with tf.variable_scope("default"): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") with tf.variable_scope(outer, reuse=True): self.assertEqual(tf.get_variable("w", []).name, "outer/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_2/scope2/") with tf.variable_scope("default", reuse=True): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_2/default/scope2/") def testVarScopeNestedOuterScope(self): with self.test_session(): with tf.variable_scope("outer") as outer: with tf.variable_scope(outer): self.assertEqual(tf.get_variable("w", []).name, "outer/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/outer/scope2/") with tf.variable_scope("default"): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default/scope2/") with tf.variable_scope(outer, reuse=True): self.assertEqual(tf.get_variable("w", []).name, "outer/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/outer_1/scope2/") with tf.variable_scope("default", reuse=True): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default_1/scope2/") def testVarOpScopeReuseParam(self): with self.test_session(): with tf.variable_scope("outer") as outer: with tf.variable_scope("tower", "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/tower/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/tower/scope2/") with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default/scope2/") with tf.variable_scope(outer) as outer: with tf.variable_scope("tower", "default", reuse=True): self.assertEqual(tf.get_variable("w", []).name, "outer/tower/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/tower/scope2/") outer.reuse_variables() with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") def testVarOpScopeReuseError(self): with self.test_session(): with self.assertRaises(ValueError): with tf.variable_scope(None, "default", reuse=True): self.assertEqual(tf.get_variable("w", []).name, "outer/tower/w:0") def testVarOpScopeOuterScope(self): with self.test_session(): with tf.variable_scope("outer") as outer: pass with tf.variable_scope(outer, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/scope2/") with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") with tf.variable_scope(outer, "default", reuse=True): self.assertEqual(tf.get_variable("w", []).name, "outer/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_2/scope2/") outer.reuse_variables() with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_2/default/scope2/") def testVarOpScopeNestedOuterScope(self): with self.test_session(): with tf.variable_scope("outer") as outer: with tf.variable_scope(outer, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/outer/scope2/") with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default/scope2/") with tf.variable_scope(outer, "default", reuse=True): self.assertEqual(tf.get_variable("w", []).name, "outer/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/scope2/") with tf.variable_scope(None, "default", []): self.assertEqual(tf.get_variable("w", []).name, "outer/default/w:0") with tf.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") def testGetLocalVar(self): with self.test_session(): # Check that local variable respects naming. with tf.variable_scope("outer") as outer: with tf.variable_scope(outer, "default", []): local_var = variable_scope.get_local_variable( "w", [], collections=["foo"]) self.assertEqual(local_var.name, "outer/w:0") # Since variable is local, it should be in the local variable collection # but not the the trainable collection. self.assertIn(local_var, tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)) self.assertIn(local_var, tf.get_collection("foo")) self.assertNotIn( local_var, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)) # Check that local variable respects `reuse`. with tf.variable_scope(outer, "default", reuse=True): self.assertEqual(variable_scope.get_local_variable("w", []).name, "outer/w:0") def axis0_into1_partitioner(shape=None, **unused_kwargs): part = [1] * len(shape) return part def axis0_into2_partitioner(shape=None, **unused_kwargs): part = [1] * len(shape) part[0] = 2 return part def axis0_into3_partitioner(shape=None, **unused_kwargs): part = [1] * len(shape) part[0] = 3 return part class VariableScopeWithPartitioningTest(tf.test.TestCase): def testResultNameMatchesRequested(self): with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner): v = tf.get_variable("name0", shape=(3, 1, 1)) self.assertEqual(v.name, "scope0/name0") v_concat = v.as_tensor() self.assertEqual(v_concat.name, "scope0/name0:0") variables = tf.get_collection(tf.GraphKeys.VARIABLES) self.assertTrue("scope0/name0/part_0:0" in [x.name for x in variables]) self.assertTrue("scope0/name0/part_1:0" in [x.name for x in variables]) self.assertFalse("scope0/name0/part_2:0" in [x.name for x in variables]) def testBreaksIfPartitioningChanges(self): with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner): tf.get_variable("name0", shape=(3, 1, 1)) with tf.variable_scope("scope0", partitioner=axis0_into3_partitioner, reuse=True): with self.assertRaisesRegexp( ValueError, "Trying to reuse partitioned variable .* but specified partitions .* " "and found partitions .*"): tf.get_variable("name0", shape=(3, 1, 1)) with tf.variable_scope("scope0", partitioner=axis0_into1_partitioner, reuse=True): with self.assertRaisesRegexp( ValueError, "Trying to reuse partitioned variable .* but specified partitions .* " "and found partitions .*"): tf.get_variable("name0", shape=(3, 1, 1)) def testReturnsExistingConcatenatedValueIfReuse(self): with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner): v_concat = tf.get_variable("name0", shape=(3, 1, 1)) tf.get_variable_scope().reuse_variables() v_concat_2 = tf.get_variable("name0", shape=(3, 1, 1)) self.assertEqual(v_concat, v_concat_2) def testAllowsReuseWithoutPartitioner(self): with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner): v = tf.get_variable("name0", shape=(3, 1, 1)) with tf.variable_scope("scope0", reuse=True): v_reused = tf.get_variable("name0") self.assertEqual(v, v_reused) def testPropagatePartitionerOnReopening(self): with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner) as vs: self.assertEqual(axis0_into2_partitioner, vs.partitioner) with tf.variable_scope(vs) as vs1: self.assertEqual(axis0_into2_partitioner, vs1.partitioner) def testPartitionConcatenatesAlongCorrectAxis(self): def _part_axis_0(**unused_kwargs): return (2, 1, 1) def _part_axis_1(**unused_kwargs): return (1, 2, 1) with tf.variable_scope("root"): v0 = tf.get_variable("n0", shape=(2, 2, 2), partitioner=_part_axis_0) v1 = tf.get_variable("n1", shape=(2, 2, 2), partitioner=_part_axis_1) self.assertEqual(v0.get_shape(), (2, 2, 2)) self.assertEqual(v1.get_shape(), (2, 2, 2)) n0_0 = tf.get_default_graph().get_tensor_by_name("root/n0/part_0:0") n0_1 = tf.get_default_graph().get_tensor_by_name("root/n0/part_1:0") self.assertEqual(n0_0.get_shape(), (1, 2, 2)) self.assertEqual(n0_1.get_shape(), (1, 2, 2)) n1_0 = tf.get_default_graph().get_tensor_by_name("root/n1/part_0:0") n1_1 = tf.get_default_graph().get_tensor_by_name("root/n1/part_1:0") self.assertEqual(n1_0.get_shape(), (2, 1, 2)) self.assertEqual(n1_1.get_shape(), (2, 1, 2)) class VariableScopeWithCustomGetterTest(tf.test.TestCase): def testNonCallableGetterFails(self): with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"): with tf.variable_scope("scope0", custom_getter=3): tf.get_variable("name0") with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"): tf.get_variable("name0", custom_getter=3) def testNoSideEffectsWithIdentityCustomGetter(self): called = [0] def custom_getter(getter, *args, **kwargs): called[0] += 1 return getter(*args, **kwargs) with tf.variable_scope("scope", custom_getter=custom_getter) as scope: v = tf.get_variable("v", [1]) with tf.variable_scope(scope, reuse=True): v2 = tf.get_variable("v", [1]) with tf.variable_scope("new_scope") as new_scope: v3 = tf.get_variable("v3", [1]) with tf.variable_scope(new_scope, reuse=True, custom_getter=custom_getter): v4 = tf.get_variable("v3", [1]) self.assertEqual(v, v2) self.assertEqual(v3, v4) self.assertEqual(3, called[0]) # skipped one in the first new_scope def testGetterThatCreatesTwoVariablesAndSumsThem(self): def custom_getter(getter, name, *args, **kwargs): g_0 = getter("%s/0" % name, *args, **kwargs) g_1 = getter("%s/1" % name, *args, **kwargs) with tf.name_scope("custom_getter"): return g_0 + g_1 with tf.variable_scope("scope", custom_getter=custom_getter): v = tf.get_variable("v", [1, 2, 3]) self.assertEqual([1, 2, 3], v.get_shape()) true_vars = tf.trainable_variables() self.assertEqual(2, len(true_vars)) self.assertEqual("scope/v/0:0", true_vars[0].name) self.assertEqual("scope/v/1:0", true_vars[1].name) self.assertEqual("custom_getter/add:0", v.name) with self.test_session() as sess: tf.initialize_all_variables().run() np_vars, np_v = sess.run([true_vars, v]) self.assertAllClose(np_v, sum(np_vars)) class PartitionInfoTest(tf.test.TestCase): def testConstructorChecks(self): # Invalid arg types. with self.assertRaises(TypeError): variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1]) with self.assertRaises(TypeError): variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None) with self.assertRaises(TypeError): variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1]) with self.assertRaises(TypeError): variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo") # full_shape and var_offset must have same length. with self.assertRaises(ValueError): variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0]) # Offset must always be less than shape. with self.assertRaises(ValueError): variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1]) def testSingleOffset(self): partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[4, 0]) self.assertEqual(4, partition_info.single_offset([1, 3])) # Tests when the variable isn't partitioned at all. partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[0, 0]) self.assertEqual(0, partition_info.single_offset([9, 3])) def testSingleSliceDim(self): partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[4, 0]) # Invalid shape. with self.assertRaises(TypeError): partition_info.single_slice_dim(None) # Rank of shape differs from full_shape. with self.assertRaises(ValueError): partition_info.single_slice_dim([1, 2, 3]) # Shape is too large given var_offset (4+6 > 9). with self.assertRaises(ValueError): partition_info.single_slice_dim([6, 3]) # Multiple possible slice dim from shape. with self.assertRaises(ValueError): partition_info.single_slice_dim([1, 1]) partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[0, 0]) self.assertEqual(1, partition_info.single_slice_dim([9, 2])) partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[4, 0]) self.assertEqual(0, partition_info.single_slice_dim([2, 3])) if __name__ == "__main__": tf.test.main()
apache-2.0
-7,995,408,519,929,527,000
40.931198
80
0.606286
false
3.542239
true
false
false
madedotcom/photon-pump
test/conversations/test_read_all_events_stream_conversation.py
1
4703
from asyncio import Queue from uuid import uuid4 import pytest from photonpump import messages as msg, exceptions from photonpump import messages_pb2 as proto from photonpump.conversations import ReadAllEvents @pytest.mark.asyncio async def test_read_all_request(): output = Queue() convo = ReadAllEvents(msg.Position(10, 11)) await convo.start(output) request = await output.get() body = proto.ReadAllEvents() body.ParseFromString(request.payload) assert request.command is msg.TcpCommand.ReadAllEventsForward assert body.commit_position == 10 assert body.prepare_position == 11 assert body.resolve_link_tos is True assert body.require_master is False assert body.max_count == 100 @pytest.mark.asyncio async def test_read_all_backward(): output = Queue() convo = ReadAllEvents( from_position=msg.Position(10, 11), direction=msg.StreamDirection.Backward, max_count=20, ) await convo.start(output) request = await output.get() body = proto.ReadAllEvents() body.ParseFromString(request.payload) assert request.command is msg.TcpCommand.ReadAllEventsBackward assert body.commit_position == 10 assert body.prepare_position == 11 assert body.resolve_link_tos is True assert body.require_master is False assert body.max_count == 20 @pytest.mark.asyncio async def test_read_all_success(): event_1_id = uuid4() event_2_id = uuid4() convo = ReadAllEvents() response = proto.ReadAllEventsCompleted() response.result = msg.ReadEventResult.Success response.next_commit_position = 10 response.next_prepare_position = 10 response.commit_position = 9 response.prepare_position = 9 event_1 = proto.ResolvedEvent() event_1.commit_position = 8 event_1.prepare_position = 8 event_1.event.event_stream_id = "stream-123" event_1.event.event_number = 32 event_1.event.event_id = event_1_id.bytes_le event_1.event.event_type = "event-type" event_1.event.data_content_type = msg.ContentType.Json event_1.event.metadata_content_type = msg.ContentType.Binary event_1.event.data = """ { 'color': 'red', 'winner': true } """.encode( "UTF-8" ) event_2 = proto.ResolvedEvent() event_2.CopyFrom(event_1) event_2.event.event_stream_id = "stream-456" event_2.event.event_type = "event-2-type" event_2.event.event_id = event_2_id.bytes_le event_2.event.event_number = 32 response.events.extend([event_1, event_2]) await convo.respond_to( msg.InboundMessage( uuid4(), msg.TcpCommand.ReadAllEventsForwardCompleted, response.SerializeToString(), ), None, ) result = await convo.result assert isinstance(result, msg.AllStreamSlice) [event_1, event_2] = result.events assert event_1.stream == "stream-123" assert event_1.id == event_1_id assert event_1.type == "event-type" assert event_1.event_number == 32 assert event_2.stream == "stream-456" assert event_2.id == event_2_id assert event_2.type == "event-2-type" assert event_2.event_number == 32 @pytest.mark.asyncio async def test_all_events_error(): convo = ReadAllEvents() response = proto.ReadAllEventsCompleted() response.result = msg.ReadAllResult.Error response.next_commit_position = 10 response.next_prepare_position = 10 response.commit_position = 9 response.prepare_position = 9 response.error = "Something really weird just happened" await convo.respond_to( msg.InboundMessage( uuid4(), msg.TcpCommand.ReadAllEventsForwardCompleted, response.SerializeToString(), ), None, ) with pytest.raises(exceptions.ReadError) as exn: await convo.result assert exn.stream == "$all" assert exn.conversation_id == convo.conversation_id @pytest.mark.asyncio async def test_all_events_access_denied(): convo = ReadAllEvents() response = proto.ReadAllEventsCompleted() response.result = msg.ReadAllResult.AccessDenied response.next_commit_position = 10 response.next_prepare_position = 10 response.commit_position = 9 response.prepare_position = 9 await convo.respond_to( msg.InboundMessage( uuid4(), msg.TcpCommand.ReadAllEventsForward, response.SerializeToString() ), None, ) with pytest.raises(exceptions.AccessDenied) as exn: await convo.result assert exn.conversation_id == convo.conversation_id assert exn.conversation_type == "ReadAllEvents"
mit
23,669,084,093,015,520
26.828402
86
0.672549
false
3.651398
true
false
false
unixunion/python-libsolace
bin/solace-list-clients.py
1
4986
#!/usr/bin/env python """ Show solace clients and counts, optionally pump all stats into influxdb """ import logging import sys logging.basicConfig(format='[%(module)s] %(filename)s:%(lineno)s %(asctime)s %(levelname)s %(message)s', stream=sys.stderr) import libsolace.settingsloader as settings from libsolace.SolaceAPI import SolaceAPI from libsolace.SolaceXMLBuilder import SolaceXMLBuilder from optparse import OptionParser import simplejson as json import sys import pprint import demjson from time import gmtime, strftime import time pp = pprint.PrettyPrinter(indent=4, width=20) if __name__ == '__main__': """ parse opts, read site.xml, start provisioning vpns. """ usage = "list all vpns in an environment" parser = OptionParser(usage=usage) parser.add_option("-e", "--env", "--environment", action="store", type="string", dest="env", help="environment to run job in eg:[ dev | ci1 | si1 | qa1 | pt1 | prod ]") parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="toggles solace debug mode") parser.add_option("--details", action="store_true", dest="details", help="Show client details", default=False) parser.add_option("--stats", action="store_true", dest="stats", help="Show client stats", default=False) parser.add_option("--client", action="store", type="string", dest="client", help="client filter e.g. 'dev_*'", default="*") parser.add_option("--influxdb", action="store_true", dest="influxdb", help="influxdb url and port", default=False) parser.add_option("--influxdb-host", action="store", type="string", dest="influxdb_host", help="influxdb hostname", default="defiant") parser.add_option("--influxdb-port", action="store", type="int", dest="influxdb_port", help="influxdb port", default=8086) parser.add_option("--influxdb-user", action="store", type="string", dest="influxdb_user", help="influxdb user", default="root") parser.add_option("--influxdb-pass", action="store", type="string", dest="influxdb_pass", help="influxdb pass", default="root") parser.add_option("--influxdb-db", action="store", type="string", dest="influxdb_db", help="influxdb db name", default="solace-clients") (options, args) = parser.parse_args() if not options.env: parser.print_help() sys.exit() if options.debug: logging.getLogger().setLevel(logging.DEBUG) if options.influxdb: logging.info("Connecting to influxdb") from influxdb import InfluxDBClient try: client = InfluxDBClient(options.influxdb_host, options.influxdb_port, options.influxdb_user, options.influxdb_pass, options.influxdb_db) try: client.create_database(options.influxdb_db) except Exception, e: logging.warn("Unable to create database, does it already exist?") except Exception, e: logging.error("Unable to connect to influxdb") sys.exit(1) # forces read-only options.testmode = True settings.env = options.env.lower() logging.info("Connecting to appliance in %s, testmode:%s" % (settings.env, options.testmode)) connection = SolaceAPI(settings.env, testmode=options.testmode) if options.details: connection.x = SolaceXMLBuilder("show clients details") connection.x.show.client.name = options.client connection.x.show.client.detais elif options.stats: connection.x = SolaceXMLBuilder("show clients stats") connection.x.show.client.name = options.client connection.x.show.client.stats # get the clients clients = connection.rpc(str(connection.x), primaryOnly=True) count = 0 # print clients[0] timeNow = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime()) startTime = time.time() for c in clients[0]['rpc-reply']['rpc']['show']['client']['primary-virtual-router']['client']: j = demjson.encode(c) p = json.loads(j) if options.stats: t = {} for k in p["stats"]: logging.debug("Key: %s value %s" % (k, p["stats"][k])) try: t[k] = long(p["stats"][k]) except Exception, ve: logging.debug("skipping") pass json_body = [{ "measurement": "client-stats", "tags": { "message-vpn": p['message-vpn'], "name": p['name'] }, "fields": t, "time": timeNow }] # print json.dumps(json_body) # print json.dumps(json_body, sort_keys=False, indent=4, separators=(',', ': ')) client.write_points(json_body) logging.info("Total Clients: %s" % count) logging.info("Time Taken: %s" % (time.time()-startTime) )
mit
-2,089,378,427,571,660,000
37.953125
140
0.609306
false
3.783005
false
false
false
espensirnes/paneltime
paneltime/system/system_arguments.py
1
8361
#!/usr/bin/env python # -*- coding: utf-8 -*- #This module contains the arguments class used to handle regression arguments import numpy as np import functions as fu import stat_functions as stat class arguments: """Sets initial arguments and stores static properties of the arguments""" def __init__(self,panel, args_d_old): p,q,d,k,m=panel.pqdkm self.args_d_old=args_d_old self.categories=['beta','rho','lambda','gamma','psi','omega','z'] self.args_init,self.args_d_OLS, self.args_d_restricted=dict(),dict(),dict() self.panel=panel self.equations=[] self.n_equations=len(panel.X) self.n_args=[] self.positions=dict() self.positions_map=dict() self.name_positions_map=dict() arg_count=0 self.names_v=[] self.eq_number_v=[] if args_d_old is None: args_d_old=[None]*self.n_equations for i in range(self.n_equations): e=equation(panel.X[i],panel.Y[i],panel.W,self, args_d_old[i],i,arg_count,panel.X_names[i]) self.equations.append(e) self.n_args.append(e.n_args) self.args_init[i]=e.args_init self.args_d_OLS[i]=e.args_d_OLS self.args_d_restricted[i]=e.args_d_restricted arg_count+=e.n_args self.args_init['rho']=np.diag(np.ones(self.n_equations)) self.args_d_OLS['rho']=np.diag(np.ones(self.n_equations)) self.args_d_restricted['rho']=np.diag(np.ones(self.n_equations)) self.n_args_eq=arg_count self.n_args_tot=int(np.sum(self.n_args)+(self.n_equations-1)*self.n_equations/2) add_rho_names(self.names_v,arg_count) self.eq_number_v.extend([None]*(self.n_args_tot-arg_count)) def system_conv_to_dicts(self,args): args_d=dict() if type(args[0])==dict: return args for eq in self.equations: d=dict() for category in self.categories: rng=eq.positions[category] s=self.args_init[eq.id][category].shape d[category]=args[rng].reshape(s) args_d[eq.id]=d args_d['rho']=rho_list_to_matrix(args[self.n_args_eq:],self.n_equations) return args_d def system_conv_to_vector(self,args): args_v=[] if type(args[0])!=dict: return args n=0 for i in range(self.n_equations): for name in self.categories: args_v.extend(args[i][name].flatten()) n+=len(args[i][name]) args_v.extend(rho_matrix_to_list(args['rho'],self.n_equations)) args_v=np.array(args_v) return args_v def rho_definitions(self): n=self.n_args_eq self.rho_position_list=[] r=range(n) x=[[[min((i,j)),max((i,j))] for i in r] for j in r] self.rho_position_matrix=np.array([[str(x[i,j]) for i in r] for j in r]) for i in range(n): for j in range(i,n): self.names_v.append('System reg. rho(%s,%s)' %(i,j)) self.rho_position_list.append[x[i,j]] def rho_list_to_matrix(self,lst): n=len(self.rho_position_list) m=np.zeros((n,n)) for k in range(n): i,j=self.rho_position_list[k] m[i,j]=lst[k] m[j,i]=lst[k] return m def rho_matrix_to_list(self,m): n=len(self.rho_position_list) lst=np.zeros(n) for k in range(n): i,j=self.rho_position_list[k] lst[k]=m[i,j] return lst class equation: def __init__(self,X,Y,W,arguments,args_d_old,i,arg_count,X_names): a=arguments self.id=i p,q,d,k,m=panel.pqdkm self.args_init,self.args_d_OLS, self.args_d_restricted=set_init_args(X,Y,W,args_d_old,p, d, q, m, k,a.panel) self.names_d=get_namevector(a.panel,p, q, m, k,X_names,a,i) self.position_defs(a,arg_count,X_names) self.args_v=conv_to_vector(self.args_init,a.categories) self.n_args=len(self.args_v) self.args_rng=range(arg_count,arg_count+self.n_args) a.eq_number_v.extend([i]*self.n_args) def position_defs(self,system,arg_count,X_names): """Defines positions in vector argument in each equation for the system args_v vector""" self.positions_map=dict()#a dictionary of indicies containing the string name and sub-position of index within the category self.positions=dict()#a dictionary of category strings containing the index range of the category self.beta_map=dict() k=arg_count for category in system.categories: n=len(self.args_init[category]) rng=range(k,k+n) self.positions[category]=rng#self.positions[<category>]=range(<system position start>,<system position end>) if category in system.positions: system.positions[category].append(rng) else: system.positions[category]=[rng] for j in rng: self.positions_map[j]=[category,j-k]#self.positions_map[<system position>]=<category>,<equation position> system.positions_map[j]=[self.id,category,j-k]#system.positions_map[<system position>]=<equation number>,<category>,<equation position> k+=n for i in range(len(X_names)): self.beta_map[X_names[i]]=self.positions['beta'][i] def initargs(X,Y,W,args_old,p,d,q,m,k,panel): N,T,k=X.shape if args_old is None: armacoefs=0 else: armacoefs=0 args=dict() args['beta']=np.zeros((k,1)) args['omega']=np.zeros((W.shape[2],1)) args['rho']=np.ones(p)*armacoefs args['lambda']=np.ones(q)*armacoefs args['psi']=np.ones(m)*armacoefs args['gamma']=np.ones(k)*armacoefs args['z']=np.array([]) if m>0 and N>1: args['omega'][0][0]=0 if m>0: args['psi'][0]=0.00001 args['z']=np.array([0.00001]) return args def set_init_args(X,Y,W,args_old,p,d,q,m,k,panel): args=initargs(X,Y,W,args_old,p, d, q, m, k, panel) args_restricted=fu.copy_array_dict(args) if panel.has_intercept: args_restricted['beta'][0][0]=panel.mean(Y) args_restricted['omega'][0][0]=np.log(panel.var(Y)) else: args_restricted['omega'][0][0]=np.log(panel.var(Y,k=0,mean=0)) beta,e=stat.OLS(panel,X,Y,return_e=True) args['beta']=beta args['omega'][0]=np.log(np.sum(e**2*panel.included)/np.sum(panel.included)) args_OLS=fu.copy_array_dict(args) if panel.m_zero: args['omega'][0]=0 if not args_old is None: args['beta']=insert_arg(args['beta'],args_old['beta']) args['omega']=insert_arg(args['omega'],args_old['omega']) args['rho']=insert_arg(args['rho'],args_old['rho']) args['lambda']=insert_arg(args['lambda'],args_old['lambda']) args['psi']=insert_arg(args['psi'],args_old['psi']) args['gamma']=insert_arg(args['gamma'],args_old['gamma']) args['z']=insert_arg(args['z'],args_old['z']) return args,args_OLS, args_restricted def conv_to_dict(args,categories,positions): """Converts a vector argument args to a dictionary argument. If args is a dict, it is returned unchanged""" if type(args)==dict: return args else: d=dict() k=0 for i in categories: n=len(positions[i]) rng=range(k,k+n) d[i]=args[rng] if i=='beta' or i=='omega': d[i]=d[i].reshape((n,1)) k+=n return d def conv_to_vector(args,categories): """Converts a dict argument args to vector argument. if args is a vector, it is returned unchanged.\n If args=None, the vector of self.args is returned""" if type(args)==list or type(args)==np.ndarray: return args v=np.array([]) for category in categories: s=args[category] if type(s)==np.ndarray: s=s.flatten() v=np.concatenate((v,s)) return v def get_namevector(panel,p, q, m, k,X_names,system,eq_num): """Creates a vector of the names of all regression varaibles, including variables, ARIMA and GARCH terms. This defines the positions of the variables througout the estimation.""" names_d=dict() #sequence must match definition of categories in arguments.__init__: #self.categories=['beta','rho','lambda','gamma','psi','omega','z'] eq_prefix='%02d|' %(eq_num,) names_v=[eq_prefix+i for i in X_names]#copy variable names names_d['beta']=names_v add_names(p,eq_prefix+'AR term %s (p)','rho',names_d,names_v) add_names(q,eq_prefix+'MA term %s (q)','lambda',names_d,names_v) add_names(m,eq_prefix+'MACH term %s (m)','psi',names_d,names_v) add_names(k,eq_prefix+'ARCH term %s (k)','gamma',names_d,names_v) names_d['omega']=[eq_prefix+i for i in panel.heteroscedasticity_factors]#copy variable names names_v.extend(names_d['omega']) if m>0: names_d['z']=[eq_prefix+'z in h(e,z)'] names_v.extend(names_d['z']) n=len(system.names_v) for i in range(len(names_v)): system.name_positions_map[names_v[i]]=n+i system.names_v.extend(names_v) return names_d def add_names(T,namesstr,category,d,names): a=[] for i in range(T): a.append(namesstr %(i,)) names.extend(a) d[category]=a def insert_arg(arg,add): n=min((len(arg),len(add))) arg[:n]=add[:n] return arg
gpl-3.0
7,984,326,016,556,555,000
28.234266
139
0.668222
false
2.527509
false
false
false
praekelt/panya-event
event/models.py
1
1514
from django.db import models from django.core.urlresolvers import reverse from ckeditor.fields import RichTextField from panya.models import ModelBase PROVINCES = ( ('Eastern Cape', 'Eastern Cape'), ('Free State', 'Free State'), ('Gauteng', 'Gauteng'), ('KwaZulu-Natal', 'KwaZulu-Natal'), ('Limpopo', 'Limpopo'), ('Mpumalanga', 'Mpumalanga'), ('Northern Cape', 'Northern Cape'), ('North-West', 'North-West'), ('Western Cape', 'Western Cape'), ) class Location(models.Model): city = models.CharField(max_length=255, help_text='Name of the city.') province = models.CharField( choices=PROVINCES, max_length=255, help_text='Name of the province.' ) def __unicode__(self): return "%s, %s" % (self.city, self.province) class Venue(models.Model): name = models.CharField(max_length=255, help_text='A short descriptive name.') address = models.CharField(max_length=512, help_text='Physical venue address.') location = models.ForeignKey( Location, blank=True, null=True, help_text='Location of the venue.' ) def __unicode__(self): return self.name class Event(ModelBase): venue = models.ForeignKey( Venue, help_text='Venue where the event will take place.' ) content = RichTextField(help_text='Full article detailing this event.') def get_absolute_url(self): return reverse('event_object_detail', kwargs={'slug': self.slug})
bsd-3-clause
7,660,587,681,568,411,000
28.115385
83
0.637384
false
3.356984
false
false
false
xindiguo/pythonSynapseUtils
pythonSynapseUtils/synutils.py
1
4081
#!/usr/bin/env python import argparse import os import sys import synapseclient import hashlib import string script_path = os.path.dirname(__file__) local_module_path = os.path.abspath(os.path.join(script_path,'lib')) sys.path.append(local_module_path) import s3 STATIC_BUCKET = "static.synapse.org" def create_html_file(html_link): #get a unique file name from txt/link html_file_name = str(hashlib.md5(html_link).hexdigest()) + '.html' f = open(html_file_name, 'w') html_template = string.Template(""" <!DOCTYPE html> <html> <body> <iframe src="$HTML_LINK" width="1500" height="1000" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe> </body> </html> """) html_content = html_template.substitute(HTML_LINK=html_link) f.write(html_content) f.close() os.chmod(html_file_name, 0755) #make the file web readable before upload return(html_file_name) def s3manage(args): """ Utilities for managing S3 bukcets """ #establish a connection to S3 bucket = s3.bucketManager(STATIC_BUCKET, args.aws_key, args.aws_secret, rememberMe=args.rememberMe) #if user specifies an html link if args.html_link is not None: html_file = create_html_file(args.html_link) args.upload_path = html_file if os.path.isdir(args.upload_path) is True: url = bucket.uploadDir(args.upload_path,args.upload_prefix) else: url = bucket.uploadFiles(args.upload_path,args.upload_prefix) if args.synapse_wikiID is not None: embed_url_in_synapse_wiki(url,args.synapse_wikiID) def embed_url_in_synapse_wiki(url, wikiID): import synapseclient syn = synapseclient.login() wiki = syn.getWiki(wikiID) markdown = wiki['markdown'] #complete hack if len(url) > 1: url = [url[x] for x in url if x.endswith('index.html')] url = url[0] else: url = url.values()[0] #percent encoded URL import urllib url = urllib.quote(url, safe='') link_markdown = '${iframe?site=' + url + '&height=1000}' wiki['markdown'] = link_markdown wiki = syn.store(wiki) syn.onweb(wikiID) def build_parser(): """Builds the argument parser and returns the result.""" parser = argparse.ArgumentParser(description='Synapse Python Utilities') parser.add_argument('--debug', dest='debug', action='store_true') subparsers = parser.add_subparsers(title='commands', description='The following commands are available:', help='For additional help: "synutils.py <COMMAND> -h"') parser_s3 = subparsers.add_parser('s3',help='utilities to manage data on static.synapse.org') parser_s3.add_argument('-k' , '--aws_key', dest='aws_key', help='AWS Key', default=None) parser_s3.add_argument('-s' , '--aws_secret', dest='aws_secret', help='AWS secret key', default=None) parser_s3.add_argument('-up', '--upload', dest='upload_path', type=str, default=None) parser_s3.add_argument('-l', '--link', dest='html_link', type=str, default=None, help = "html link to embed in a synapse wiki") parser_s3.add_argument('-w', '--wikiID', dest='synapse_wikiID', type=str, default=None, help = "synapse wiki id to embed the link in") parser_s3.add_argument('-p', '--prefix', dest='upload_prefix', type=str, default='scratch/', help = 'prefix adds the sub dir structure on S3 eg. test/ will add the file under test/ folder on s3 bucket') parser_s3.add_argument('--rememberMe', '--remember-me', dest='rememberMe', action='store_true', default=False, help='Cache credentials for automatic authentication for future interactions') parser_s3.set_defaults(func=s3manage) return parser def perform_main(args): if 'func' in args: try: args.func(args) except Exception as ex: raise def main(): args = build_parser().parse_args() perform_main(args) if __name__ == "__main__": main()
apache-2.0
-1,364,867,809,172,800,000
33.008333
144
0.645675
false
3.435185
false
false
false
dddomodossola/remi
examples/onclose_window_app.py
1
1513
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import remi.gui as gui from remi import start, App class MyApp(App): def main(self, name='world'): # margin 0px auto allows to center the app to the screen wid = gui.VBox(width=300, height=200, margin='0px auto') lbl = gui.Label("Close or reload the page, the console thread will stop automatically.") wid.append(lbl) # add the following 3 lines to your app and the on_window_close method to make the console close automatically tag = gui.Tag(_type='script') tag.add_child("javascript", """window.onunload=function(e){remi.sendCallback('%s','%s');return "close?";};""" % ( str(id(self)), "on_window_close")) wid.add_child("onunloadevent", tag) # returning the root widget return wid def on_window_close(self): # here you can handle the unload print("app closing") self.close() if __name__ == "__main__": start(MyApp)
apache-2.0
-3,865,904,934,796,648,400
34.186047
121
0.662921
false
3.992084
false
false
false
opentripplanner/OTPQA
hreport.py
1
2481
import simplejson as json import numpy as np def parsetime(aa): if aa is None: return None return float( aa.split()[0] ) def main(filenames): if len(filenames)==0: return yield "<html>" yield """<head><style>table, th, td { border: 1px solid black; border-collapse: collapse; } th, td { text-align: left; }</style></head>""" datasets = [] shas = [] for fn in filenames: blob = json.load( open(fn) ) shas.append( blob['git_sha1'] ) dataset = dict( [(response["id_tuple"], response) for response in blob['responses']] ) datasets.append( dataset ) id_tuples = datasets[0].keys() yield """<table border="1">""" dataset_total_times = dict(zip( range(len(datasets)),[[] for x in range(len(datasets))]) ) dataset_avg_times = dict(zip(range(len(datasets)),[[] for x in range(len(datasets))]) ) dataset_fails = dict(zip(range(len(datasets)), [0]*len(datasets))) yield "<tr><td>request id</td>" for fn,sha in zip(filenames,shas): yield "<td>%s (commit:%s)</td>"%(fn,sha) yield "</tr>" for id_tuple in id_tuples: yield """<tr><td rowspan="2"><a href="%s">%s</a></td>"""%(datasets[0][id_tuple]['url'], id_tuple) for i, dataset in enumerate( datasets ): response = dataset[id_tuple] dataset_total_times[i].append( parsetime( response['total_time'] ) ) dataset_avg_times[i].append( parsetime( response['avg_time'] ) ) yield "<td>%s total, %s avg</td>"%(response['total_time'],response['avg_time']) yield "</tr>" for i, dataset in enumerate( datasets ): yield "<td>" response = dataset[id_tuple] yield "<table border=1 width=100%><tr>" if len(response['itins']) == 0: dataset_fails[i] += 1 yield "<td style=\"background-color:#EDA1A1\">NONE</td>" for itin in response['itins']: filling = itin['routes'] if filling=="{}": color = "#EDECA1" else: color = "#AEEDA1" yield "<td style=\"background-color:%s\">%s</td>"%(color,filling) yield "</tr></table>" yield "</td>" yield "</tr>" yield "<tr><td>stats</td>" for i in range(len(datasets)): yield "<td>fails: %s (%.2f%%). total time: median:%.2fs mean:%.2fs</td>"%(dataset_fails[i], 100*dataset_fails[i]/float(len(id_tuples)), np.median(dataset_total_times[i]),np.mean(dataset_total_times[i])) yield "</tr>" yield "</table>" yield "</html>" if __name__=='__main__': import sys if len(sys.argv)<2: print "usage: cmd fn1 [fn2 [fn3 ...]]" exit() for line in main(sys.argv[1:]): print line
gpl-3.0
-7,302,979,014,916,302,000
24.854167
204
0.614672
false
2.868208
false
false
false
pdamodaran/yellowbrick
yellowbrick/text/dispersion.py
1
10916
# yellowbrick.text.dispersion # Implementations of lexical dispersions for text visualization. # # Author: Larry Gray # Created: 2018-06-21 10:06 # # Copyright (C) 2018 District Data Labs # For license information, see LICENSE.txt # # ID: dispersion.py [] [email protected] $ """ Implementation of lexical dispersion for text visualization """ ########################################################################## ## Imports ########################################################################## from collections import defaultdict import itertools from yellowbrick.text.base import TextVisualizer from yellowbrick.style.colors import resolve_colors from yellowbrick.exceptions import YellowbrickValueError import numpy as np ########################################################################## ## Dispersion Plot Visualizer ########################################################################## class DispersionPlot(TextVisualizer): """ DispersionPlotVisualizer allows for visualization of the lexical dispersion of words in a corpus. Lexical dispersion is a measure of a word's homeogeneity across the parts of a corpus. This plot notes the occurences of a word and how many words from the beginning it appears. Parameters ---------- target_words : list A list of target words whose dispersion across a corpus passed at fit will be visualized. ax : matplotlib axes, default: None The axes to plot the figure on. labels : list of strings The names of the classes in the target, used to create a legend. Labels must match names of classes in sorted order. colors : list or tuple of colors Specify the colors for each individual class colormap : string or matplotlib cmap Qualitative colormap for discrete target ignore_case : boolean, default: False Specify whether input will be case-sensitive. annotate_docs : boolean, default: False Specify whether document boundaries will be displayed. Vertical lines are positioned at the end of each document. kwargs : dict Pass any additional keyword arguments to the super class. These parameters can be influenced later on in the visualization process, but can and should be set as early as possible. """ # NOTE: cannot be np.nan NULL_CLASS = None def __init__(self, target_words, ax=None, colors=None, ignore_case=False, annotate_docs=False, labels=None, colormap=None, **kwargs): super(DispersionPlot, self).__init__(ax=ax, **kwargs) self.labels = labels self.colors = colors self.colormap = colormap self.target_words = target_words self.ignore_case = ignore_case self.annotate_docs = annotate_docs def _compute_dispersion(self, text, y): self.boundaries_ = [] offset = 0 if y is None: y = itertools.repeat(None) for doc, target in zip(text, y): for word in doc: if self.ignore_case: word = word.lower() # NOTE: this will find all indices if duplicate words are supplied # In the case that word is not in target words, any empty list is # returned and no data will be yielded offset += 1 for y_coord in (self.indexed_words_ == word).nonzero()[0]: y_coord = int(y_coord) yield (offset, y_coord, target) if self.annotate_docs: self.boundaries_.append(offset) self.boundaries_ = np.array(self.boundaries_, dtype=int) def _check_missing_words(self, points): for index in range(len(self.indexed_words_)): if index in points[:,1]: pass else: raise YellowbrickValueError(( "The indexed word '{}' is not found in " "this corpus" ).format(self.indexed_words_[index])) def fit(self, X, y=None, **kwargs): """ The fit method is the primary drawing input for the dispersion visualization. Parameters ---------- X : list or generator Should be provided as a list of documents or a generator that yields a list of documents that contain a list of words in the order they appear in the document. y : ndarray or Series of length n An optional array or series of target or class values for instances. If this is specified, then the points will be colored according to their class. kwargs : dict Pass generic arguments to the drawing method Returns ------- self : instance Returns the instance of the transformer/visualizer """ if y is not None: self.classes_ = np.unique(y) elif y is None and self.labels is not None: self.classes_ = np.array([self.labels[0]]) else: self.classes_ = np.array([self.NULL_CLASS]) # Create an index (e.g. the y position) for the target words self.indexed_words_ = np.flip(self.target_words, axis=0) if self.ignore_case: self.indexed_words_ = np.array([w.lower() for w in self.indexed_words_]) # Stack is used to create a 2D array from the generator try: points_target = np.stack(self._compute_dispersion(X, y)) except ValueError: raise YellowbrickValueError(( "No indexed words were found in the corpus" )) points = np.stack(zip(points_target[:,0].astype(int), points_target[:,1].astype(int))) self.target = points_target[:,2] self._check_missing_words(points) self.draw(points, self.target) return self def draw(self, points, target=None, **kwargs): """ Called from the fit method, this method creates the canvas and draws the plot on it. Parameters ---------- kwargs: generic keyword arguments. """ # Resolve the labels with the classes labels = self.labels if self.labels is not None else self.classes_ if len(labels) != len(self.classes_): raise YellowbrickValueError(( "number of supplied labels ({}) does not " "match the number of classes ({})" ).format(len(labels), len(self.classes_))) # Create the color mapping for the labels. color_values = resolve_colors( n_colors=len(labels), colormap=self.colormap, colors=self.color) colors = dict(zip(labels, color_values)) # Transform labels into a map of class to label labels = dict(zip(self.classes_, labels)) # Define boundaries with a vertical line if self.annotate_docs: for xcoords in self.boundaries_: self.ax.axvline(x=xcoords, color='lightgray', linestyle='dashed') series = defaultdict(lambda: {'x':[], 'y':[]}) if target is not None: for point, t in zip(points, target): label = labels[t] series[label]['x'].append(point[0]) series[label]['y'].append(point[1]) else: label = self.classes_[0] for x, y in points: series[label]['x'].append(x) series[label]['y'].append(y) for label, points in series.items(): self.ax.scatter(points['x'], points['y'], marker='|', c=colors[label], zorder=100, label=label) self.ax.set_yticks(list(range(len(self.indexed_words_)))) self.ax.set_yticklabels(self.indexed_words_) def finalize(self, **kwargs): """ The finalize method executes any subclass-specific axes finalization steps. The user calls poof & poof calls finalize. Parameters ---------- kwargs: generic keyword arguments. """ self.ax.set_ylim(-1, len(self.indexed_words_)) self.ax.set_title("Lexical Dispersion Plot") self.ax.set_xlabel("Word Offset") self.ax.grid(False) # Add the legend outside of the figure box. if not all(self.classes_ == np.array([self.NULL_CLASS])): box = self.ax.get_position() self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) self.ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ########################################################################## ## Quick Method ########################################################################## def dispersion(words, corpus, y=None, ax=None, colors=None, colormap=None, labels=None, annotate_docs=False, ignore_case=False, **kwargs): """ Displays lexical dispersion plot for words in a corpus This helper function is a quick wrapper to utilize the DisperstionPlot Visualizer for one-off analysis Parameters ---------- words : list A list of words whose dispersion will be examined within a corpus y : ndarray or Series of length n An optional array or series of target or class values for instances. If this is specified, then the points will be colored according to their class. corpus : list Should be provided as a list of documents that contain a list of words in the order they appear in the document. ax : matplotlib axes, default: None The axes to plot the figure on. labels : list of strings The names of the classes in the target, used to create a legend. Labels must match names of classes in sorted order. colors : list or tuple of colors Specify the colors for each individual class colormap : string or matplotlib cmap Qualitative colormap for discrete target annotate_docs : boolean, default: False Specify whether document boundaries will be displayed. Vertical lines are positioned at the end of each document. ignore_case : boolean, default: False Specify whether input will be case-sensitive. kwargs : dict Pass any additional keyword arguments to the super class. Returns ------- ax: matplotlib axes Returns the axes that the plot was drawn on """ # Instantiate the visualizer visualizer = DispersionPlot( words, ax=ax, colors=colors, colormap=colormap, ignore_case=ignore_case, labels=labels, annotate_docs=annotate_docs, **kwargs ) # Fit and transform the visualizer (calls draw) visualizer.fit(corpus, y, **kwargs) # Return the axes object on the visualizer return visualizer.ax
apache-2.0
-2,299,755,290,369,907,200
33.764331
84
0.588402
false
4.501443
false
false
false
lucasberti/telegrao-py
plugins/apex.py
1
2106
import requests import json from api import send_message ENDPOINT = "https://public-api.tracker.gg/apex/v1/standard/profile/5/" PLAYERS = { 14160874: "bertoncio", 16631085: "beartz", 85867003: "R3TCH4", 52451934: "xisteaga", 10549434: "Axasdas123", 123123862: "MeroFabio", 569341881: "burnovisk", 299335806: "Springl3s", 77547673: "BeDabul" } def get_stats(username): headers = { "TRN-Api-Key": "987c5b41-5649-4b4e-9d3f-4d58cc904584" } return requests.get(ENDPOINT + username, headers=headers).json() def get_string(data): data = data["data"] legend_on_menu = data["children"][0]["metadata"]["legend_name"] username = data["metadata"]["platformUserHandle"] hero_stats = "" for legend in data["children"]: hero_stats += f"{legend['metadata']['legend_name']}\n" for stat in legend["stats"]: name = stat["metadata"]["name"] value = stat["displayValue"] percentile = stat["percentile"] if "percentile" in stat.keys() else "desconecidi" rank = stat["rank"] if "rank" in stat.keys() else "desconecidi" hero_stats += f"{name}: {value} (top {percentile}% rank {rank})\n" hero_stats += "\n" global_stats = "" for stat in data["stats"]: global_stats += f"{stat['metadata']['name']}: {stat['displayValue']}\n" return f"""{username} mt noob rs ta c {legend_on_menu} selelessiondn {hero_stats} globau: {global_stats}""" def on_msg_received(msg, matches): chat = msg["chat"]["id"] user = msg["from"]["id"] player = None if matches.group(1): player = matches.group(1) else: if user in PLAYERS: player = PLAYERS[user] if player is not None: try: data = get_stats(player) stats = get_string(data) print(stats) send_message(chat, stats) except Exception as e: send_message(chat, f"vish deu merda..... {e}")
mit
2,123,689,757,657,363,200
25
93
0.561254
false
3.337559
false
false
false
vik001ind/RSAExploits
RSAExploits/exploits/hastad.py
1
3524
""" Class defintion for simple hastad broadcast exploit """ from RSAExploits import util from RSAExploits.exploits.exploit import Exploit class Hastad(Exploit): """ Class providing a run interface to hastad broadcast exploit""" def run(self, rsadata_list, info_dict = None): """ Attempts to recover plaintext using Hastad's broadcast attack This attack works when we have a list of RSA_Data objects such that the length of the list is greater than or equal to e, unique and coprime moduli are used for each encryption, and the same plaintext message is encrypted to generate all ciphertexts. Args: rsadata_list: A list of RSA_Data objects on which to attempt Hastad's exploit info_dict: Not currently used Assumptions: None of the RSA_Data objects in rsadata_list have the same public exponent e, same id number, and same modulus N. This should be prevented by calling the remove_duplicates() function in RSA_Data.py Side Effects: If a message is recovered, the corresponding RSA_Data objects will be updated with this information Return: True if at least one message was recovered """ print ("Hastad: Running Attack...") success = False e_id_dict = self.group_by_e_and_id(rsadata_list) for group in e_id_dict: msg = self.hastad_broadcast_exploit(e_id_dict[group]) if msg != None: success = True for rsadata in e_id_dict[group]: rsadata.set_m(msg) if success: print("Hastad: Success, message found.") else: print("Hastad: Failure, message not found.") return success @staticmethod def group_by_e_and_id(rsadata_list): """ Group the RSA_Data objects by public exponent and id """ e_id_dict = {} for rsadata in rsadata_list: # Only consider entries with an ID number if rsadata.get_id() == None: continue # Only consider entries with a ciphertext if rsadata.get_c() == None: continue # If the (e, idnum) tuple already exists in the dictionary, just # append the new entry to the already existing list if (rsadata.get_e(), rsadata.get_id()) in e_id_dict: e_id_dict[(rsadata.get_e(), rsadata.get_id())].append(rsadata) # Otherwise, create a new list for the new tuple else: e_id_dict[(rsadata.get_e(), rsadata.get_id())] = [rsadata] return e_id_dict @staticmethod def hastad_broadcast_exploit(rsadata_list): """ Recover the plaintext message using chinese remainder theorem """ e = rsadata_list[0].get_e() if len(rsadata_list) < e: return None ns = [] cs = [] for index in range(e): ns.append(rsadata_list[index].get_n()) cs.append(rsadata_list[index].get_c()) s = util.crt(ns, cs) pt = util.int_nthroot(s, e) if pt is not None: return pt else: return None
mit
-132,503,831,933,218,540
33.891089
80
0.542849
false
4.308068
false
false
false
aschleg/mathpy
mathpy/special/gamma.py
1
1555
# encoding=utf8 import numpy as np def k_function(n): r""" Returns the K-function up to a given integer n. Parameters ---------- n : int The length of the returned K-function as in :math:`K(n)`. Returns ------- array-like numpy array of the computed integers returned by the K-function. Notes ----- The K-function generalizes the hyperfactorial for complex numbers and is defined for positive integers as: .. math:: K(n) \equiv 1^1 2^2 3^3 \cdots (n - 1)^{n - 1} The K-function can also be expressed as a hyperfactorial, :math:`H`: .. math:: K(n) = H(n - 1) The Gamma function and Barnes G-Function are also closely related by: .. math:: K(n) = \frac{[\Gamma{n}]^{n - 1}}{G(n)} Examples -------- >>> k_function(3) array([1., 1., 4.]) >>> k_function(5).astype(int) array([ 1, 1, 4, 108, 27648]) References ---------- Sloane, N. J. A. Sequence A002109/M3706 in "The On-Line Encyclopedia of Integer Sequences." Weisstein, Eric W. "K-Function." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/K-Function.html Wikipedia contributors. (2015, December 5). K-function. In Wikipedia, The Free Encyclopedia. Retrieved 13:56, March 1, 2018, from https://en.wikipedia.org/w/index.php?title=K-function&oldid=693891074 """ kn = np.empty(n) k = 1 for i in np.arange(0, n): k *= (i) ** (i) kn[i] = k return kn
mit
-6,329,161,334,717,358,000
21.867647
114
0.573633
false
3.246347
false
false
false
GiulioRossetti/ndlib
ndlib/models/epidemics/IndependentCascadesModel.py
1
3628
from ..DiffusionModel import DiffusionModel import numpy as np import future.utils __author__ = 'Giulio Rossetti' __license__ = "BSD-2-Clause" __email__ = "[email protected]" class IndependentCascadesModel(DiffusionModel): """ Edge Parameters to be specified via ModelConfig :param threshold: The edge threshold. As default a value of 0.1 is assumed for all edges. """ def __init__(self, graph, seed=None): """ Model Constructor :param graph: A networkx graph object """ super(self.__class__, self).__init__(graph, seed) self.available_statuses = { "Susceptible": 0, "Infected": 1, "Removed": 2 } self.parameters = { "model": {}, "nodes": {}, "edges": { "threshold": { "descr": "Edge threshold", "range": [0, 1], "optional": True, "default": 0.1 } }, } self.name = "Independent Cascades" def iteration(self, node_status=True): """ Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status) """ self.clean_initial_status(self.available_statuses.values()) actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)} if self.actual_iteration == 0: self.actual_iteration += 1 delta, node_count, status_delta = self.status_delta(actual_status) if node_status: return {"iteration": 0, "status": actual_status.copy(), "node_count": node_count.copy(), "status_delta": status_delta.copy()} else: return {"iteration": 0, "status": {}, "node_count": node_count.copy(), "status_delta": status_delta.copy()} for u in self.graph.nodes: if self.status[u] != 1: continue neighbors = list(self.graph.neighbors(u)) # neighbors and successors (in DiGraph) produce the same result # Standard threshold if len(neighbors) > 0: threshold = 1.0/len(neighbors) for v in neighbors: if actual_status[v] == 0: key = (u, v) # Individual specified thresholds if 'threshold' in self.params['edges']: if key in self.params['edges']['threshold']: threshold = self.params['edges']['threshold'][key] elif (v, u) in self.params['edges']['threshold'] and not self.graph.directed: threshold = self.params['edges']['threshold'][(v, u)] flip = np.random.random_sample() if flip <= threshold: actual_status[v] = 1 actual_status[u] = 2 delta, node_count, status_delta = self.status_delta(actual_status) self.status = actual_status self.actual_iteration += 1 if node_status: return {"iteration": self.actual_iteration - 1, "status": delta.copy(), "node_count": node_count.copy(), "status_delta": status_delta.copy()} else: return {"iteration": self.actual_iteration - 1, "status": {}, "node_count": node_count.copy(), "status_delta": status_delta.copy()}
bsd-2-clause
-6,692,687,738,186,138,000
35.28
118
0.507993
false
4.435208
false
false
false
Northeaster/TargetSentimentAnalysis
lib/rnn_cells/gru_cell.py
1
1575
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from lib.rnn_cells.base_cell import BaseCell from lib import linalg #*************************************************************** class GRUCell(BaseCell): """""" #============================================================= def __call__(self, inputs, state, scope=None): """""" with tf.variable_scope(scope or type(self).__name__): with tf.variable_scope('Gates'): linear = linalg.linear([inputs, state], self.output_size, add_bias=True, n_splits=2, moving_params=self.moving_params) update_act, reset_act = linear update_gate = linalg.sigmoid(update_act-self.forget_bias) reset_gate = linalg.sigmoid(reset_act) reset_state = reset_gate * state with tf.variable_scope('Candidate'): hidden_act = linalg.linear([inputs, reset_state], self.output_size, add_bias=False, moving_params=self.moving_params) hidden_tilde = self.recur_func(hidden_act) hidden = update_gate * state + (1-update_gate) * hidden_tilde return hidden, hidden #============================================================= @property def state_size(self): return self.output_size
apache-2.0
-7,597,540,596,920,359,000
34.795455
68
0.486984
false
4.578488
false
false
false
cbenhagen/kivy
kivy/uix/textinput.py
1
108858
# -*- encoding: utf-8 -*- ''' Text Input ========== .. versionadded:: 1.0.4 .. image:: images/textinput-mono.jpg .. image:: images/textinput-multi.jpg The :class:`TextInput` widget provides a box for editable plain text. Unicode, multiline, cursor navigation, selection and clipboard features are supported. The :class:`TextInput` uses two different coordinate systems: * (x, y) - coordinates in pixels, mostly used for rendering on screen. * (row, col) - cursor index in characters / lines, used for selection and cursor movement. Usage example ------------- To create a multiline :class:`TextInput` (the 'enter' key adds a new line):: from kivy.uix.textinput import TextInput textinput = TextInput(text='Hello world') To create a singleline :class:`TextInput`, set the :class:`TextInput.multiline` property to False (the 'enter' key will defocus the TextInput and emit an 'on_text_validate' event):: def on_enter(instance, value): print('User pressed enter in', instance) textinput = TextInput(text='Hello world', multiline=False) textinput.bind(on_text_validate=on_enter) The textinput's text is stored in its :attr:`TextInput.text` property. To run a callback when the text changes:: def on_text(instance, value): print('The widget', instance, 'have:', value) textinput = TextInput() textinput.bind(text=on_text) You can set the :class:`focus <kivy.uix.behaviors.FocusBehavior>` to a Textinput, meaning that the input box will be highlighted and keyboard focus will be requested:: textinput = TextInput(focus=True) The textinput is defocused if the 'escape' key is pressed, or if another widget requests the keyboard. You can bind a callback to the focus property to get notified of focus changes:: def on_focus(instance, value): if value: print('User focused', instance) else: print('User defocused', instance) textinput = TextInput() textinput.bind(focus=on_focus) See :class:`~kivy.uix.behaviors.FocusBehavior`, from which the :class:`TextInput` inherits, for more details. Selection --------- The selection is automatically updated when the cursor position changes. You can get the currently selected text from the :attr:`TextInput.selection_text` property. Filtering --------- You can control which text can be added to the :class:`TextInput` by overwriting :meth:`TextInput.insert_text`. Every string that is typed, pasted or inserted by any other means into the :class:`TextInput` is passed through this function. By overwriting it you can reject or change unwanted characters. For example, to write only in capitalized characters:: class CapitalInput(TextInput): def insert_text(self, substring, from_undo=False): s = substring.upper() return super(CapitalInput, self).insert_text(s,\ from_undo=from_undo) Or to only allow floats (0 - 9 and a single period):: class FloatInput(TextInput): pat = re.compile('[^0-9]') def insert_text(self, substring, from_undo=False): pat = self.pat if '.' in self.text: s = re.sub(pat, '', substring) else: s = '.'.join([re.sub(pat, '', s) for s in\ substring.split('.', 1)]) return super(FloatInput, self).insert_text(s, from_undo=from_undo) Default shortcuts ----------------- =============== ======================================================== Shortcuts Description --------------- -------------------------------------------------------- Left Move cursor to left Right Move cursor to right Up Move cursor to up Down Move cursor to down Home Move cursor at the beginning of the line End Move cursor at the end of the line PageUp Move cursor to 3 lines before PageDown Move cursor to 3 lines after Backspace Delete the selection or character before the cursor Del Delete the selection of character after the cursor Shift + <dir> Start a text selection. Dir can be Up, Down, Left or Right Control + c Copy selection Control + x Cut selection Control + p Paste selection Control + a Select all the content Control + z undo Control + r redo =============== ======================================================== .. note:: To enable Emacs-style keyboard shortcuts, you can use :class:`~kivy.uix.behaviors.emacs.EmacsBehavior`. ''' __all__ = ('TextInput', ) import re import sys import string from functools import partial from os import environ from weakref import ref from kivy.animation import Animation from kivy.base import EventLoop from kivy.cache import Cache from kivy.clock import Clock from kivy.config import Config from kivy.compat import PY2 from kivy.logger import Logger from kivy.metrics import inch from kivy.utils import boundary, platform from kivy.uix.behaviors import FocusBehavior from kivy.core.text import Label from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix, Callback from kivy.graphics.context_instructions import Transform from kivy.graphics.texture import Texture from kivy.uix.widget import Widget from kivy.uix.bubble import Bubble from kivy.uix.behaviors import ButtonBehavior from kivy.uix.image import Image from kivy.properties import StringProperty, NumericProperty, \ BooleanProperty, AliasProperty, \ ListProperty, ObjectProperty, VariableListProperty, OptionProperty Cache_register = Cache.register Cache_append = Cache.append Cache_get = Cache.get Cache_remove = Cache.remove Cache_register('textinput.label', timeout=60.) Cache_register('textinput.width', timeout=60.) FL_IS_NEWLINE = 0x01 # late binding Clipboard = None CutBuffer = None MarkupLabel = None _platform = platform # for reloading, we need to keep a list of textinput to retrigger the rendering _textinput_list = [] # cache the result _is_osx = sys.platform == 'darwin' # When we are generating documentation, Config doesn't exist _is_desktop = False if Config: _is_desktop = Config.getboolean('kivy', 'desktop') # register an observer to clear the textinput cache when OpenGL will reload if 'KIVY_DOC' not in environ: def _textinput_clear_cache(*l): Cache_remove('textinput.label') Cache_remove('textinput.width') for wr in _textinput_list[:]: textinput = wr() if textinput is None: _textinput_list.remove(wr) else: textinput._trigger_refresh_text() textinput._refresh_hint_text() from kivy.graphics.context import get_context get_context().add_reload_observer(_textinput_clear_cache, True) class Selector(ButtonBehavior, Image): # Internal class for managing the selection Handles. window = ObjectProperty() target = ObjectProperty() matrix = ObjectProperty() def __init__(self, **kwargs): super(Selector, self).__init__(**kwargs) self.window.bind(on_touch_down=self.on_window_touch_down) self.matrix = self.target.get_window_matrix() with self.canvas.before: Callback(self.update_transform) PushMatrix() self.transform = Transform() with self.canvas.after: PopMatrix() def update_transform(self, cb): m = self.target.get_window_matrix() if self.matrix != m: self.matrix = m self.transform.identity() self.transform.transform(self.matrix) def transform_touch(self, touch): matrix = self.matrix.inverse() touch.apply_transform_2d( lambda x, y: matrix.transform_point(x, y, 0)[:2]) def on_window_touch_down(self, win, touch): if self.parent is not win: return try: touch.push() self.transform_touch(touch) self._touch_diff = self.top - touch.y if self.collide_point(*touch.pos): FocusBehavior.ignored_touch.append(touch) return super(Selector, self).on_touch_down(touch) finally: touch.pop() class TextInputCutCopyPaste(Bubble): # Internal class used for showing the little bubble popup when # copy/cut/paste happen. textinput = ObjectProperty(None) ''' Holds a reference to the TextInput this Bubble belongs to. ''' but_cut = ObjectProperty(None) but_copy = ObjectProperty(None) but_paste = ObjectProperty(None) but_selectall = ObjectProperty(None) matrix = ObjectProperty(None) def __init__(self, **kwargs): self.mode = 'normal' super(TextInputCutCopyPaste, self).__init__(**kwargs) Clock.schedule_interval(self._check_parent, .5) self.matrix = self.textinput.get_window_matrix() with self.canvas.before: Callback(self.update_transform) PushMatrix() self.transform = Transform() with self.canvas.after: PopMatrix() def update_transform(self, cb): m = self.textinput.get_window_matrix() if self.matrix != m: self.matrix = m self.transform.identity() self.transform.transform(self.matrix) def transform_touch(self, touch): matrix = self.matrix.inverse() touch.apply_transform_2d( lambda x, y: matrix.transform_point(x, y, 0)[:2]) def on_touch_down(self, touch): try: touch.push() self.transform_touch(touch) if self.collide_point(*touch.pos): FocusBehavior.ignored_touch.append(touch) return super(TextInputCutCopyPaste, self).on_touch_down(touch) finally: touch.pop() def on_textinput(self, instance, value): global Clipboard if value and not Clipboard and not _is_desktop: value._ensure_clipboard() def _check_parent(self, dt): # this is a prevention to get the Bubble staying on the screen, if the # attached textinput is not on the screen anymore. parent = self.textinput while parent is not None: if parent == parent.parent: break parent = parent.parent if parent is None: Clock.unschedule(self._check_parent) if self.textinput: self.textinput._hide_cut_copy_paste() def on_parent(self, instance, value): parent = self.textinput mode = self.mode if parent: self.clear_widgets() if mode == 'paste': # show only paste on long touch self.but_selectall.opacity = 1 widget_list = [self.but_selectall, ] if not parent.readonly: widget_list.append(self.but_paste) elif parent.readonly: # show only copy for read only text input widget_list = (self.but_copy, ) else: # normal mode widget_list = (self.but_cut, self.but_copy, self.but_paste) for widget in widget_list: self.add_widget(widget) def do(self, action): textinput = self.textinput if action == 'cut': textinput._cut(textinput.selection_text) elif action == 'copy': textinput.copy() elif action == 'paste': textinput.paste() elif action == 'selectall': textinput.select_all() self.mode = '' anim = Animation(opacity=0, d=.333) anim.bind(on_complete=lambda *args: self.on_parent(self, self.parent)) anim.start(self.but_selectall) return self.hide() def hide(self): parent = self.parent if not parent: return anim = Animation(opacity=0, d=.225) anim.bind(on_complete=lambda *args: parent.remove_widget(self)) anim.start(self) class TextInput(FocusBehavior, Widget): '''TextInput class. See module documentation for more information. :Events: `on_text_validate` Fired only in multiline=False mode when the user hits 'enter'. This will also unfocus the textinput. `on_double_tap` Fired when a double tap happens in the text input. The default behavior selects the text around the cursor position. More info at :meth:`on_double_tap`. `on_triple_tap` Fired when a triple tap happens in the text input. The default behavior selects the line around the cursor position. More info at :meth:`on_triple_tap`. `on_quad_touch` Fired when four fingers are touching the text input. The default behavior selects the whole text. More info at :meth:`on_quad_touch`. .. warning:: When changing a :class:`TextInput` property that requires re-drawing, e.g. modifying the :attr:`text`, the updates occur on the next clock cycle and not instantly. This might cause any changes to the :class:`TextInput` that occur between the modification and the next cycle to be ignored, or to use previous values. For example, after a update to the :attr:`text`, changing the cursor in the same clock frame will move it using the previous text and will likely end up in an incorrect position. The solution is to schedule any updates to occur on the next clock cycle using :meth:`~kivy.clock.ClockBase.schedule_once`. .. Note:: Selection is cancelled when TextInput is focused. If you need to show selection when TextInput is focused, you should delay (use Clock.schedule) the call to the functions for selecting text (select_all, select_text). .. versionchanged:: 1.9.0 :class:`TextInput` now inherits from :class:`~kivy.uix.behaviors.FocusBehavior`. :attr:`~kivy.uix.behaviors.FocusBehavior.keyboard_mode`, :meth:`~kivy.uix.behaviors.FocusBehavior.show_keyboard`, :meth:`~kivy.uix.behaviors.FocusBehavior.hide_keyboard`, :meth:`~kivy.uix.behaviors.FocusBehavior.focus`, and :attr:`~kivy.uix.behaviors.FocusBehavior.input_type` have been removed since they are now inherited from :class:`~kivy.uix.behaviors.FocusBehavior`. .. versionchanged:: 1.7.0 `on_double_tap`, `on_triple_tap` and `on_quad_touch` events added. ''' __events__ = ('on_text_validate', 'on_double_tap', 'on_triple_tap', 'on_quad_touch') def __init__(self, **kwargs): self.is_focusable = kwargs.get('is_focusable', True) self._cursor_blink_time = Clock.get_time() self._cursor = [0, 0] self._selection = False self._selection_finished = True self._selection_touch = None self.selection_text = u'' self._selection_from = None self._selection_to = None self._selection_callback = None self._handle_left = None self._handle_right = None self._handle_middle = None self._bubble = None self._lines_flags = [] self._lines_labels = [] self._lines_rects = [] self._hint_text_flags = [] self._hint_text_labels = [] self._hint_text_rects = [] self._label_cached = None self._line_options = None self._keyboard_mode = Config.get('kivy', 'keyboard_mode') self._command_mode = False self._command = '' self.reset_undo() self._touch_count = 0 self._ctrl_l = False self._ctrl_r = False self._alt_l = False self._alt_r = False self.interesting_keys = { 8: 'backspace', 13: 'enter', 127: 'del', 271: 'enter', 273: 'cursor_up', 274: 'cursor_down', 275: 'cursor_right', 276: 'cursor_left', 278: 'cursor_home', 279: 'cursor_end', 280: 'cursor_pgup', 281: 'cursor_pgdown', 303: 'shift_L', 304: 'shift_R', 305: 'ctrl_L', 306: 'ctrl_R', 308: 'alt_L', 307: 'alt_R'} super(TextInput, self).__init__(**kwargs) fbind = self.fbind refresh_line_options = self._trigger_refresh_line_options update_text_options = self._update_text_options fbind('font_size', refresh_line_options) fbind('font_name', refresh_line_options) def handle_readonly(instance, value): if value and (not _is_desktop or not self.allow_copy): self.is_focusable = False fbind('padding', update_text_options) fbind('tab_width', update_text_options) fbind('font_size', update_text_options) fbind('font_name', update_text_options) fbind('size', update_text_options) fbind('password', update_text_options) fbind('password_mask', update_text_options) fbind('pos', self._trigger_update_graphics) fbind('readonly', handle_readonly) fbind('focus', self._on_textinput_focused) handle_readonly(self, self.readonly) handles = self._trigger_position_handles = Clock.create_trigger( self._position_handles) self._trigger_show_handles = Clock.create_trigger( self._show_handles, .05) self._trigger_update_cutbuffer = Clock.create_trigger( self._update_cutbuffer) refresh_line_options() self._trigger_refresh_text() fbind('pos', handles) fbind('size', handles) # when the gl context is reloaded, trigger the text rendering again. _textinput_list.append(ref(self, TextInput._reload_remove_observer)) if platform == 'linux': self._ensure_clipboard() def on_text_validate(self): pass def cursor_index(self, cursor=None): '''Return the cursor index in the text/value. ''' if not cursor: cursor = self.cursor try: l = self._lines if len(l) == 0: return 0 lf = self._lines_flags index, cr = cursor for row in range(cr): if row >= len(l): continue index += len(l[row]) if lf[row] & FL_IS_NEWLINE: index += 1 if lf[cr] & FL_IS_NEWLINE: index += 1 return index except IndexError: return 0 def cursor_offset(self): '''Get the cursor x offset on the current line. ''' offset = 0 row = self.cursor_row col = self.cursor_col _lines = self._lines if col and row < len(_lines): offset = self._get_text_width( _lines[row][:col], self.tab_width, self._label_cached) return offset def get_cursor_from_index(self, index): '''Return the (row, col) of the cursor from text index. ''' index = boundary(index, 0, len(self.text)) if index <= 0: return 0, 0 lf = self._lines_flags l = self._lines i = 0 for row in range(len(l)): ni = i + len(l[row]) if lf[row] & FL_IS_NEWLINE: ni += 1 i += 1 if ni >= index: return index - i, row i = ni return index, row def select_text(self, start, end): ''' Select a portion of text displayed in this TextInput. .. versionadded:: 1.4.0 :Parameters: `start` Index of textinput.text from where to start selection `end` Index of textinput.text till which the selection should be displayed ''' if end < start: raise Exception('end must be superior to start') m = len(self.text) self._selection_from = boundary(start, 0, m) self._selection_to = boundary(end, 0, m) self._selection_finished = True self._update_selection(True) self._update_graphics_selection() def select_all(self): ''' Select all of the text displayed in this TextInput. .. versionadded:: 1.4.0 ''' self.select_text(0, len(self.text)) re_indent = re.compile('^(\s*|)') def _auto_indent(self, substring): index = self.cursor_index() _text = self._get_text(encode=False) if index > 0: line_start = _text.rfind('\n', 0, index) if line_start > -1: line = _text[line_start + 1:index] indent = self.re_indent.match(line).group() substring += indent return substring def insert_text(self, substring, from_undo=False): '''Insert new text at the current cursor position. Override this function in order to pre-process text for input validation. ''' if self.readonly or not substring: return if isinstance(substring, bytes): substring = substring.decode('utf8') if self.replace_crlf: substring = substring.replace(u'\r\n', u'\n') mode = self.input_filter if mode is not None: chr = type(substring) if chr is bytes: int_pat = self._insert_int_patb else: int_pat = self._insert_int_patu if mode == 'int': substring = re.sub(int_pat, chr(''), substring) elif mode == 'float': if '.' in self.text: substring = re.sub(int_pat, chr(''), substring) else: substring = '.'.join([re.sub(int_pat, chr(''), k) for k in substring.split(chr('.'), 1)]) else: substring = mode(substring, from_undo) if not substring: return self._hide_handles(EventLoop.window) if not from_undo and self.multiline and self.auto_indent \ and substring == u'\n': substring = self._auto_indent(substring) cc, cr = self.cursor sci = self.cursor_index ci = sci() text = self._lines[cr] len_str = len(substring) new_text = text[:cc] + substring + text[cc:] self._set_line_text(cr, new_text) wrap = (self._get_text_width( new_text, self.tab_width, self._label_cached) > self.width) if len_str > 1 or substring == u'\n' or wrap: # Avoid refreshing text on every keystroke. # Allows for faster typing of text when the amount of text in # TextInput gets large. start, finish, lines,\ lineflags, len_lines = self._get_line_from_cursor(cr, new_text) # calling trigger here could lead to wrong cursor positioning # and repeating of text when keys are added rapidly in a automated # fashion. From Android Keyboard for example. self._refresh_text_from_property('insert', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(ci + len_str) # handle undo and redo self._set_unredo_insert(ci, ci + len_str, substring, from_undo) def _get_line_from_cursor(self, start, new_text): # get current paragraph from cursor position finish = start lines = self._lines linesflags = self._lines_flags if start and not linesflags[start]: start -= 1 new_text = u''.join((lines[start], new_text)) try: while not linesflags[finish + 1]: new_text = u''.join((new_text, lines[finish + 1])) finish += 1 except IndexError: pass lines, lineflags = self._split_smart(new_text) len_lines = max(1, len(lines)) return start, finish, lines, lineflags, len_lines def _set_unredo_insert(self, ci, sci, substring, from_undo): # handle undo and redo if from_undo: return self._undo.append({'undo_command': ('insert', ci, sci), 'redo_command': (ci, substring)}) # reset redo when undo is appended to self._redo = [] def reset_undo(self): '''Reset undo and redo lists from memory. .. versionadded:: 1.3.0 ''' self._redo = self._undo = [] def do_redo(self): '''Do redo operation. .. versionadded:: 1.3.0 This action re-does any command that has been un-done by do_undo/ctrl+z. This function is automatically called when `ctrl+r` keys are pressed. ''' try: x_item = self._redo.pop() undo_type = x_item['undo_command'][0] _get_cusror_from_index = self.get_cursor_from_index if undo_type == 'insert': ci, substring = x_item['redo_command'] self.cursor = _get_cusror_from_index(ci) self.insert_text(substring, True) elif undo_type == 'bkspc': self.cursor = _get_cusror_from_index(x_item['redo_command']) self.do_backspace(from_undo=True) elif undo_type == 'shiftln': direction, rows, cursor = x_item['redo_command'][1:] self._shift_lines(direction, rows, cursor, True) else: # delsel ci, sci = x_item['redo_command'] self._selection_from = ci self._selection_to = sci self._selection = True self.delete_selection(True) self.cursor = _get_cusror_from_index(ci) self._undo.append(x_item) except IndexError: # reached at top of undo list pass def do_undo(self): '''Do undo operation. .. versionadded:: 1.3.0 This action un-does any edits that have been made since the last call to reset_undo(). This function is automatically called when `ctrl+z` keys are pressed. ''' try: x_item = self._undo.pop() undo_type = x_item['undo_command'][0] self.cursor = self.get_cursor_from_index(x_item['undo_command'][1]) if undo_type == 'insert': ci, sci = x_item['undo_command'][1:] self._selection_from = ci self._selection_to = sci self._selection = True self.delete_selection(True) elif undo_type == 'bkspc': substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) elif undo_type == 'shiftln': direction, rows, cursor = x_item['undo_command'][1:] self._shift_lines(direction, rows, cursor, True) else: # delsel substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) self._redo.append(x_item) except IndexError: # reached at top of undo list pass def do_backspace(self, from_undo=False, mode='bkspc'): '''Do backspace operation from the current cursor position. This action might do several things: - removing the current selection if available. - removing the previous char and move the cursor back. - do nothing, if we are at the start. ''' if self.readonly: return cc, cr = self.cursor _lines = self._lines text = _lines[cr] cursor_index = self.cursor_index() text_last_line = _lines[cr - 1] if cc == 0 and cr == 0: return _lines_flags = self._lines_flags start = cr if cc == 0: substring = u'\n' if _lines_flags[cr] else u' ' new_text = text_last_line + text self._set_line_text(cr - 1, new_text) self._delete_line(cr) start = cr - 1 else: #ch = text[cc-1] substring = text[cc - 1] new_text = text[:cc - 1] + text[cc:] self._set_line_text(cr, new_text) # refresh just the current line instead of the whole text start, finish, lines, lineflags, len_lines =\ self._get_line_from_cursor(start, new_text) # avoid trigger refresh, leads to issue with # keys/text send rapidly through code. self._refresh_text_from_property('del', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(cursor_index - 1) # handle undo and redo self._set_undo_redo_bkspc( cursor_index, cursor_index - 1, substring, from_undo) def _set_undo_redo_bkspc(self, ol_index, new_index, substring, from_undo): # handle undo and redo for backspace if from_undo: return self._undo.append({ 'undo_command': ('bkspc', new_index, substring), 'redo_command': ol_index}) #reset redo when undo is appended to self._redo = [] _re_whitespace = re.compile(r'\s+') def _move_cursor_word_left(self, index=None): pos = index or self.cursor_index() if pos == 0: return self.cursor lines = self._lines col, row = self.get_cursor_from_index(pos) if col == 0: row -= 1 col = len(lines[row]) while True: matches = list(self._re_whitespace.finditer(lines[row], 0, col)) if not matches: if col == 0: if row == 0: return 0, 0 row -= 1 col = len(lines[row]) continue return 0, row match = matches[-1] mpos = match.end() if mpos == col: if len(matches) > 1: match = matches[-2] mpos = match.end() else: if match.start() == 0: if row == 0: return 0, 0 row -= 1 col = len(lines[row]) continue return 0, row col = mpos return col, row def _move_cursor_word_right(self, index=None): pos = index or self.cursor_index() col, row = self.get_cursor_from_index(pos) lines = self._lines mrow = len(lines) - 1 if row == mrow and col == len(lines[row]): return col, row if col == len(lines[row]): row += 1 col = 0 while True: matches = list(self._re_whitespace.finditer(lines[row], col)) if not matches: if col == len(lines[row]): if row == mrow: return col, row row += 1 col = 0 continue return len(lines[row]), row match = matches[0] mpos = match.start() if mpos == col: if len(matches) > 1: match = matches[1] mpos = match.start() else: if match.end() == len(lines[row]): if row == mrow: return col, row row += 1 col = 0 continue return len(lines[row]), row col = mpos return col, row def _expand_range(self, ifrom, ito=None): if ito is None: ito = ifrom rfrom = self.get_cursor_from_index(ifrom)[1] rtcol, rto = self.get_cursor_from_index(ito) rfrom, rto = self._expand_rows(rfrom, rto + 1 if rtcol else rto) return (self.cursor_index((0, rfrom)), self.cursor_index((0, rto))) def _expand_rows(self, rfrom, rto=None): if rto is None or rto == rfrom: rto = rfrom + 1 lines = self._lines flags = list(reversed(self._lines_flags)) while rfrom > 0 and not (flags[rfrom - 1] & FL_IS_NEWLINE): rfrom -= 1 rmax = len(lines) - 1 while 0 < rto < rmax and not (flags[rto - 1] & FL_IS_NEWLINE): rto += 1 return max(0, rfrom), min(rmax, rto) def _shift_lines(self, direction, rows=None, old_cursor=None, from_undo=False): if self._selection_callback: if from_undo: self._selection_callback.cancel() else: return lines = self._lines flags = list(reversed(self._lines_flags)) labels = self._lines_labels rects = self._lines_rects orig_cursor = self.cursor sel = None if old_cursor is not None: self.cursor = old_cursor if not rows: sindex = self.selection_from eindex = self.selection_to if (sindex or eindex) and sindex != eindex: sindex, eindex = tuple(sorted((sindex, eindex))) sindex, eindex = self._expand_range(sindex, eindex) else: sindex, eindex = self._expand_range(self.cursor_index()) srow = self.get_cursor_from_index(sindex)[1] erow = self.get_cursor_from_index(eindex)[1] sel = sindex, eindex if direction < 0 and srow > 0: psrow, perow = self._expand_rows(srow - 1) rows = ((srow, erow), (psrow, perow)) elif direction > 0 and erow < len(lines) - 1: psrow, perow = self._expand_rows(erow) rows = ((srow, erow), (psrow, perow)) if rows: (srow, erow), (psrow, perow) = rows if direction < 0: m1srow, m1erow = psrow, perow m2srow, m2erow = srow, erow cdiff = psrow - perow xdiff = srow - erow else: m1srow, m1erow = srow, erow m2srow, m2erow = psrow, perow cdiff = perow - psrow xdiff = erow - srow self._lines_flags = list(reversed( flags[:m1srow] + flags[m2srow:m2erow] + flags[m1srow:m1erow] + flags[m2erow:])) self._lines = (lines[:m1srow] + lines[m2srow:m2erow] + lines[m1srow:m1erow] + lines[m2erow:]) self._lines_labels = (labels[:m1srow] + labels[m2srow:m2erow] + labels[m1srow:m1erow] + labels[m2erow:]) self._lines_rects = (rects[:m1srow] + rects[m2srow:m2erow] + rects[m1srow:m1erow] + rects[m2erow:]) self._trigger_update_graphics() csrow = srow + cdiff cerow = erow + cdiff sel = (self.cursor_index((0, csrow)), self.cursor_index((0, cerow))) self.cursor = self.cursor_col, self.cursor_row + cdiff if not from_undo: undo_rows = ((srow + cdiff, erow + cdiff), (psrow - xdiff, perow - xdiff)) self._undo.append({ 'undo_command': ('shiftln', direction * -1, undo_rows, self.cursor), 'redo_command': ('shiftln', direction, rows, orig_cursor), }) self._redo = [] if sel: def cb(dt): self.select_text(*sel) self._selection_callback = None self._selection_callback = Clock.schedule_once(cb) def do_cursor_movement(self, action, control=False, alt=False): '''Move the cursor relative to it's current position. Action can be one of : - cursor_left: move the cursor to the left - cursor_right: move the cursor to the right - cursor_up: move the cursor on the previous line - cursor_down: move the cursor on the next line - cursor_home: move the cursor at the start of the current line - cursor_end: move the cursor at the end of current line - cursor_pgup: move one "page" before - cursor_pgdown: move one "page" after In addition, the behavior of certain actions can be modified: - control + cursor_left: move the cursor one word to the left - control + cursor_right: move the cursor one word to the right - control + cursor_up: scroll up one line - control + cursor_down: scroll down one line - control + cursor_home: go to beginning of text - control + cursor_end: go to end of text - alt + cursor_up: shift line(s) up - alt + cursor_down: shift line(s) down .. versionchanged:: 1.9.1 ''' pgmove_speed = int(self.height / (self.line_height + self.line_spacing) - 1) col, row = self.cursor if action == 'cursor_up': if self.multiline and control: self.scroll_y = max(0, self.scroll_y - self.line_height) elif not self.readonly and self.multiline and alt: self._shift_lines(-1) return else: row = max(row - 1, 0) col = min(len(self._lines[row]), col) elif action == 'cursor_down': if self.multiline and control: maxy = self.minimum_height - self.height self.scroll_y = max(0, min(maxy, self.scroll_y + self.line_height)) elif not self.readonly and self.multiline and alt: self._shift_lines(1) return else: row = min(row + 1, len(self._lines) - 1) col = min(len(self._lines[row]), col) elif action == 'cursor_left': if not self.password and control: col, row = self._move_cursor_word_left() else: if col == 0: if row: row -= 1 col = len(self._lines[row]) else: col, row = col - 1, row elif action == 'cursor_right': if not self.password and control: col, row = self._move_cursor_word_right() else: if col == len(self._lines[row]): if row < len(self._lines) - 1: col = 0 row += 1 else: col, row = col + 1, row elif action == 'cursor_home': col = 0 if control: row = 0 elif action == 'cursor_end': if control: row = len(self._lines) - 1 col = len(self._lines[row]) elif action == 'cursor_pgup': row = max(0, row - pgmove_speed) col = min(len(self._lines[row]), col) elif action == 'cursor_pgdown': row = min(row + pgmove_speed, len(self._lines) - 1) col = min(len(self._lines[row]), col) self.cursor = (col, row) def get_cursor_from_xy(self, x, y): '''Return the (row, col) of the cursor from an (x, y) position. ''' padding_left = self.padding[0] padding_top = self.padding[1] l = self._lines dy = self.line_height + self.line_spacing cx = x - self.x scrl_y = self.scroll_y scrl_x = self.scroll_x scrl_y = scrl_y / dy if scrl_y > 0 else 0 cy = (self.top - padding_top + scrl_y * dy) - y cy = int(boundary(round(cy / dy - 0.5), 0, len(l) - 1)) _get_text_width = self._get_text_width _tab_width = self.tab_width _label_cached = self._label_cached for i in range(0, len(l[cy])): if _get_text_width(l[cy][:i], _tab_width, _label_cached) + \ _get_text_width(l[cy][i], _tab_width, _label_cached)*0.6 + \ padding_left > cx + scrl_x: cx = i break return cx, cy # # Selection control # def cancel_selection(self): '''Cancel current selection (if any). ''' self._selection_from = self._selection_to = self.cursor_index() self._selection = False self._selection_finished = True self._selection_touch = None self._trigger_update_graphics() def delete_selection(self, from_undo=False): '''Delete the current text selection (if any). ''' if self.readonly: return self._hide_handles(EventLoop.window) scrl_x = self.scroll_x scrl_y = self.scroll_y cc, cr = self.cursor if not self._selection: return v = self._get_text(encode=False) a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self.cursor = cursor = self.get_cursor_from_index(a) start = cursor finish = self.get_cursor_from_index(b) cur_line = self._lines[start[1]][:start[0]] +\ self._lines[finish[1]][finish[0]:] lines, lineflags = self._split_smart(cur_line) len_lines = len(lines) if start[1] == finish[1]: self._set_line_text(start[1], cur_line) else: self._refresh_text_from_property('del', start[1], finish[1], lines, lineflags, len_lines) self.scroll_x = scrl_x self.scroll_y = scrl_y # handle undo and redo for delete selecttion self._set_unredo_delsel(a, b, v[a:b], from_undo) self.cancel_selection() def _set_unredo_delsel(self, a, b, substring, from_undo): # handle undo and redo for backspace if from_undo: return self._undo.append({ 'undo_command': ('delsel', a, substring), 'redo_command': (a, b)}) # reset redo when undo is appended to self._redo = [] def _update_selection(self, finished=False): '''Update selection text and order of from/to if finished is True. Can be called multiple times until finished is True. ''' a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self._selection_finished = finished _selection_text = self._get_text(encode=False)[a:b] self.selection_text = ("" if not self.allow_copy else ((self.password_mask * (b - a)) if self.password else _selection_text)) if not finished: self._selection = True else: self._selection = bool(len(_selection_text)) self._selection_touch = None if a == 0: # update graphics only on new line # allows smoother scrolling, noticeably # faster when dealing with large text. self._update_graphics_selection() #self._trigger_update_graphics() # # Touch control # def long_touch(self, dt): if self._selection_to == self._selection_from: pos = self.to_local(*self._long_touch_pos, relative=True) self._show_cut_copy_paste( pos, EventLoop.window, mode='paste') def on_double_tap(self): '''This event is dispatched when a double tap happens inside TextInput. The default behavior is to select the word around the current cursor position. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' ci = self.cursor_index() cc = self.cursor_col line = self._lines[self.cursor_row] len_line = len(line) start = max(0, len(line[:cc]) - line[:cc].rfind(u' ') - 1) end = line[cc:].find(u' ') end = end if end > - 1 else (len_line - cc) Clock.schedule_once(lambda dt: self.select_text(ci - start, ci + end)) def on_triple_tap(self): '''This event is dispatched when a triple tap happens inside TextInput. The default behavior is to select the line around current cursor position. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' ci = self.cursor_index() sindex, eindex = self._expand_range(ci) Clock.schedule_once(lambda dt: self.select_text(sindex, eindex)) def on_quad_touch(self): '''This event is dispatched when four fingers are touching inside TextInput. The default behavior is to select all text. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' Clock.schedule_once(lambda dt: self.select_all()) def on_touch_down(self, touch): if self.disabled: return touch_pos = touch.pos if not self.collide_point(*touch_pos): return False if super(TextInput, self).on_touch_down(touch): return True # Check for scroll wheel if 'button' in touch.profile and touch.button.startswith('scroll'): scroll_type = touch.button[6:] if scroll_type == 'down': if self.multiline: if self.scroll_y <= 0: return self.scroll_y -= self.line_height else: if self.scroll_x <= 0: return self.scroll_x -= self.line_height if scroll_type == 'up': if self.multiline: if (self._lines_rects[-1].pos[1] > self.y + self.line_height): return self.scroll_y += self.line_height else: if (self.scroll_x + self.width >= self._lines_rects[-1].texture.size[0]): return self.scroll_x += self.line_height touch.grab(self) self._touch_count += 1 if touch.is_double_tap: self.dispatch('on_double_tap') if touch.is_triple_tap: self.dispatch('on_triple_tap') if self._touch_count == 4: self.dispatch('on_quad_touch') self._hide_cut_copy_paste(EventLoop.window) # schedule long touch for paste self._long_touch_pos = touch.pos Clock.schedule_once(self.long_touch, .5) self.cursor = self.get_cursor_from_xy(*touch_pos) if not self._selection_touch: self.cancel_selection() self._selection_touch = touch self._selection_from = self._selection_to = self.cursor_index() self._update_selection() if CutBuffer and 'button' in touch.profile and touch.button == 'middle': self.insert_text(CutBuffer.get_cutbuffer()) return True return False def on_touch_move(self, touch): if touch.grab_current is not self: return if not self.focus: touch.ungrab(self) if self._selection_touch is touch: self._selection_touch = None return False if self._selection_touch is touch: self.cursor = self.get_cursor_from_xy(touch.x, touch.y) self._selection_to = self.cursor_index() self._update_selection() return True def on_touch_up(self, touch): if touch.grab_current is not self: return touch.ungrab(self) self._touch_count -= 1 # schedule long touch for paste Clock.unschedule(self.long_touch) if not self.focus: return False if self._selection_touch is touch: self._selection_to = self.cursor_index() self._update_selection(True) # show Bubble win = EventLoop.window if self._selection_to != self._selection_from: self._show_cut_copy_paste(touch.pos, win) elif self.use_handles: self._hide_handles() handle_middle = self._handle_middle if handle_middle is None: self._handle_middle = handle_middle = Selector( source=self.handle_image_middle, window=win, target=self, size_hint=(None, None), size=('45dp', '45dp')) handle_middle.bind(on_press=self._handle_pressed, on_touch_move=self._handle_move, on_release=self._handle_released) if not self._handle_middle.parent and self.text: EventLoop.window.add_widget(handle_middle, canvas='after') self._position_handles(mode='middle') return True def _handle_pressed(self, instance): self._hide_cut_copy_paste() sf, st = self._selection_from, self.selection_to if sf > st: self._selection_from, self._selection_to = st, sf def _handle_released(self, instance): sf, st = self._selection_from, self.selection_to if sf == st: return self._update_selection() self._show_cut_copy_paste( (instance.right if instance is self._handle_left else instance.x, instance.top + self.line_height), EventLoop.window) def _handle_move(self, instance, touch): if touch.grab_current != instance: return get_cursor = self.get_cursor_from_xy handle_right = self._handle_right handle_left = self._handle_left handle_middle = self._handle_middle try: touch.push() touch.apply_transform_2d(self.to_widget) x, y = touch.pos finally: touch.pop() cursor = get_cursor( x, y + instance._touch_diff + (self.line_height / 2)) if instance != touch.grab_current: return if instance == handle_middle: self.cursor = cursor self._position_handles(mode='middle') return ci = self.cursor_index(cursor=cursor) sf, st = self._selection_from, self.selection_to if instance == handle_left: self._selection_from = ci elif instance == handle_right: self._selection_to = ci self._trigger_update_graphics() self._trigger_position_handles() def _position_handles(self, *args, **kwargs): if not self.text: return mode = kwargs.get('mode', 'both') lh = self.line_height handle_middle = self._handle_middle if handle_middle: hp_mid = self.cursor_pos pos = self.to_local(*hp_mid, relative=True) handle_middle.x = pos[0] - handle_middle.width / 2 handle_middle.top = pos[1] - lh if mode[0] == 'm': return group = self.canvas.get_group('selection') if not group: return EventLoop.window.remove_widget(self._handle_middle) handle_left = self._handle_left if not handle_left: return hp_left = group[2].pos handle_left.pos = self.to_local(*hp_left, relative=True) handle_left.x -= handle_left.width handle_left.y -= handle_left.height handle_right = self._handle_right last_rect = group[-1] hp_right = last_rect.pos[0], last_rect.pos[1] x, y = self.to_local(*hp_right, relative=True) handle_right.x = x + last_rect.size[0] handle_right.y = y - handle_right.height def _hide_handles(self, win=None): win = win or EventLoop.window if win is None: return win.remove_widget(self._handle_right) win.remove_widget(self._handle_left) win.remove_widget(self._handle_middle) def _show_handles(self, dt): if not self.use_handles or not self.text: return win = EventLoop.window handle_right = self._handle_right handle_left = self._handle_left if self._handle_left is None: self._handle_left = handle_left = Selector( source=self.handle_image_left, target=self, window=win, size_hint=(None, None), size=('45dp', '45dp')) handle_left.bind(on_press=self._handle_pressed, on_touch_move=self._handle_move, on_release=self._handle_released) self._handle_right = handle_right = Selector( source=self.handle_image_right, target=self, window=win, size_hint=(None, None), size=('45dp', '45dp')) handle_right.bind(on_press=self._handle_pressed, on_touch_move=self._handle_move, on_release=self._handle_released) else: if self._handle_left.parent: self._position_handles() return if not self.parent: return self._trigger_position_handles() if self.selection_from != self.selection_to: self._handle_left.opacity = self._handle_right.opacity = 0 win.add_widget(self._handle_left, canvas='after') win.add_widget(self._handle_right, canvas='after') anim = Animation(opacity=1, d=.4) anim.start(self._handle_right) anim.start(self._handle_left) def _show_cut_copy_paste(self, pos, win, parent_changed=False, mode='', pos_in_window=False, *l): # Show a bubble with cut copy and paste buttons if not self.use_bubble: return bubble = self._bubble if bubble is None: self._bubble = bubble = TextInputCutCopyPaste(textinput=self) self.fbind('parent', self._show_cut_copy_paste, pos, win, True) win.bind( size=lambda *args: self._hide_cut_copy_paste(win)) self.bind(cursor_pos=lambda *args: self._hide_cut_copy_paste(win)) else: win.remove_widget(bubble) if not self.parent: return if parent_changed: return # Search the position from the touch to the window lh, ls = self.line_height, self.line_spacing x, y = pos t_pos = (x, y) if pos_in_window else self.to_window(x, y) bubble_size = bubble.size bubble_hw = bubble_size[0] / 2. win_size = win.size bubble_pos = (t_pos[0], t_pos[1] + inch(.25)) if (bubble_pos[0] - bubble_hw) < 0: # bubble beyond left of window if bubble_pos[1] > (win_size[1] - bubble_size[1]): # bubble above window height bubble_pos = (bubble_hw, (t_pos[1]) - (lh + ls + inch(.25))) bubble.arrow_pos = 'top_left' else: bubble_pos = (bubble_hw, bubble_pos[1]) bubble.arrow_pos = 'bottom_left' elif (bubble_pos[0] + bubble_hw) > win_size[0]: # bubble beyond right of window if bubble_pos[1] > (win_size[1] - bubble_size[1]): # bubble above window height bubble_pos = (win_size[0] - bubble_hw, (t_pos[1]) - (lh + ls + inch(.25))) bubble.arrow_pos = 'top_right' else: bubble_pos = (win_size[0] - bubble_hw, bubble_pos[1]) bubble.arrow_pos = 'bottom_right' else: if bubble_pos[1] > (win_size[1] - bubble_size[1]): # bubble above window height bubble_pos = (bubble_pos[0], (t_pos[1]) - (lh + ls + inch(.25))) bubble.arrow_pos = 'top_mid' else: bubble.arrow_pos = 'bottom_mid' bubble_pos = self.to_widget(*bubble_pos) bubble.center_x = bubble_pos[0] if bubble.arrow_pos[0] == 't': bubble.top = bubble_pos[1] else: bubble.y = bubble_pos[1] bubble.mode = mode Animation.cancel_all(bubble) bubble.opacity = 0 win.add_widget(bubble, canvas='after') Animation(opacity=1, d=.225).start(bubble) def _hide_cut_copy_paste(self, win=None): bubble = self._bubble if not bubble: return bubble.hide() # # Private # @staticmethod def _reload_remove_observer(wr): # called when the textinput is deleted if wr in _textinput_list: _textinput_list.remove(wr) def _on_textinput_focused(self, instance, value, *largs): self.focus = value win = EventLoop.window self.cancel_selection() self._hide_cut_copy_paste(win) if value: if (not (self.readonly or self.disabled) or _is_desktop and self._keyboard_mode == 'system'): Clock.schedule_interval(self._do_blink_cursor, 1 / 2.) self._editable = True else: self._editable = False else: Clock.unschedule(self._do_blink_cursor) self._hide_handles(win) def _ensure_clipboard(self): global Clipboard, CutBuffer if not Clipboard: from kivy.core.clipboard import Clipboard, CutBuffer def cut(self): ''' Copy current selection to clipboard then delete it from TextInput. .. versionadded:: 1.8.0 ''' self._cut(self.selection_text) def _cut(self, data): self._ensure_clipboard() Clipboard.copy(data) self.delete_selection() def copy(self, data=''): ''' Copy the value provided in argument `data` into current clipboard. If data is not of type string it will be converted to string. If no data is provided then current selection if present is copied. .. versionadded:: 1.8.0 ''' self._ensure_clipboard() if data: return Clipboard.copy(data) if self.selection_text: return Clipboard.copy(self.selection_text) def paste(self): ''' Insert text from system :class:`~kivy.core.clipboard.Clipboard` into the :class:`~kivy.uix.textinput.TextInput` at current cursor position. .. versionadded:: 1.8.0 ''' self._ensure_clipboard() data = Clipboard.paste() self.delete_selection() self.insert_text(data) def _update_cutbuffer(self, *args): CutBuffer.set_cutbuffer(self.selection_text) def _get_text_width(self, text, tab_width, _label_cached): # Return the width of a text, according to the current line options kw = self._get_line_options() try: cid = u'{}\0{}\0{}'.format(text, self.password, kw) except UnicodeDecodeError: cid = '{}\0{}\0{}'.format(text, self.password, kw) width = Cache_get('textinput.width', cid) if width: return width if not _label_cached: _label_cached = self._label_cached text = text.replace('\t', ' ' * tab_width) if not self.password: width = _label_cached.get_extents(text)[0] else: width = _label_cached.get_extents( self.password_mask * len(text))[0] Cache_append('textinput.width', cid, width) return width def _do_blink_cursor(self, dt): # Callback called by the timer to blink the cursor, according to the # last activity in the widget b = (Clock.get_time() - self._cursor_blink_time) self.cursor_blink = int(b * 2) % 2 def on_cursor(self, instance, value): # When the cursor is moved, reset the activity timer, and update all # the graphics. self._cursor_blink_time = Clock.get_time() self._trigger_update_graphics() def _delete_line(self, idx): # Delete current line, and fix cursor position assert(idx < len(self._lines)) self._lines_flags.pop(idx) self._lines_labels.pop(idx) self._lines.pop(idx) self.cursor = self.cursor def _set_line_text(self, line_num, text): # Set current line with other text than the default one. self._lines_labels[line_num] = self._create_line_label(text) self._lines[line_num] = text def _trigger_refresh_line_options(self, *largs): Clock.unschedule(self._refresh_line_options) Clock.schedule_once(self._refresh_line_options, 0) def _refresh_line_options(self, *largs): self._line_options = None self._get_line_options() self._refresh_text_from_property() self._refresh_hint_text() self.cursor = self.get_cursor_from_index(len(self.text)) def _trigger_refresh_text(self, *largs): if len(largs) and largs[0] == self: largs = () Clock.unschedule(lambda dt: self._refresh_text_from_property(*largs)) Clock.schedule_once(lambda dt: self._refresh_text_from_property(*largs)) def _update_text_options(self, *largs): Cache_remove('textinput.width') self._trigger_refresh_text() def _refresh_text_from_trigger(self, dt, *largs): self._refresh_text_from_property(*largs) def _refresh_text_from_property(self, *largs): self._refresh_text(self._get_text(encode=False), *largs) def _refresh_text(self, text, *largs): # Refresh all the lines from a new text. # By using cache in internal functions, this method should be fast. mode = 'all' if len(largs) > 1: mode, start, finish, _lines, _lines_flags, len_lines = largs #start = max(0, start) cursor = None else: cursor = self.cursor_index() _lines, self._lines_flags = self._split_smart(text) _lines_labels = [] _line_rects = [] _create_label = self._create_line_label for x in _lines: lbl = _create_label(x) _lines_labels.append(lbl) _line_rects.append(Rectangle(size=lbl.size)) if mode == 'all': self._lines_labels = _lines_labels self._lines_rects = _line_rects self._lines = _lines elif mode == 'del': if finish > start: self._insert_lines(start, finish if start == finish else (finish + 1), len_lines, _lines_flags, _lines, _lines_labels, _line_rects) elif mode == 'insert': self._insert_lines( start, finish if (start == finish and not len_lines) else (finish + 1), len_lines, _lines_flags, _lines, _lines_labels, _line_rects) min_line_ht = self._label_cached.get_extents('_')[1] # with markup texture can be of height `1` self.line_height = max(_lines_labels[0].height, min_line_ht) #self.line_spacing = 2 # now, if the text change, maybe the cursor is not at the same place as # before. so, try to set the cursor on the good place row = self.cursor_row self.cursor = self.get_cursor_from_index(self.cursor_index() if cursor is None else cursor) # if we back to a new line, reset the scroll, otherwise, the effect is # ugly if self.cursor_row != row: self.scroll_x = 0 # with the new text don't forget to update graphics again self._trigger_update_graphics() def _insert_lines(self, start, finish, len_lines, _lines_flags, _lines, _lines_labels, _line_rects): self_lines_flags = self._lines_flags _lins_flags = [] _lins_flags.extend(self_lines_flags[:start]) if len_lines: # if not inserting at first line then if start: # make sure line flags restored for first line # _split_smart assumes first line to be not a new line _lines_flags[0] = self_lines_flags[start] _lins_flags.extend(_lines_flags) _lins_flags.extend(self_lines_flags[finish:]) self._lines_flags = _lins_flags _lins_lbls = [] _lins_lbls.extend(self._lines_labels[:start]) if len_lines: _lins_lbls.extend(_lines_labels) _lins_lbls.extend(self._lines_labels[finish:]) self._lines_labels = _lins_lbls _lins_rcts = [] _lins_rcts.extend(self._lines_rects[:start]) if len_lines: _lins_rcts.extend(_line_rects) _lins_rcts.extend(self._lines_rects[finish:]) self._lines_rects = _lins_rcts _lins = [] _lins.extend(self._lines[:start]) if len_lines: _lins.extend(_lines) _lins.extend(self._lines[finish:]) self._lines = _lins def _trigger_update_graphics(self, *largs): Clock.unschedule(self._update_graphics) Clock.schedule_once(self._update_graphics, -1) def _update_graphics(self, *largs): # Update all the graphics according to the current internal values. # # This is a little bit complex, cause we have to : # - handle scroll_x # - handle padding # - create rectangle for the lines matching the viewport # - crop the texture coordinates to match the viewport # # This is the first step of graphics, the second is the selection. self.canvas.clear() add = self.canvas.add lh = self.line_height dy = lh + self.line_spacing # adjust view if the cursor is going outside the bounds sx = self.scroll_x sy = self.scroll_y # draw labels if not self._lines or ( not self._lines[0] and len(self._lines) == 1): rects = self._hint_text_rects labels = self._hint_text_labels lines = self._hint_text_lines else: rects = self._lines_rects labels = self._lines_labels lines = self._lines padding_left, padding_top, padding_right, padding_bottom = self.padding x = self.x + padding_left y = self.top - padding_top + sy miny = self.y + padding_bottom maxy = self.top - padding_top for line_num, value in enumerate(lines): if miny <= y <= maxy + dy: texture = labels[line_num] size = list(texture.size) texc = texture.tex_coords[:] # calcul coordinate viewport_pos = sx, 0 vw = self.width - padding_left - padding_right vh = self.height - padding_top - padding_bottom tw, th = list(map(float, size)) oh, ow = tch, tcw = texc[1:3] tcx, tcy = 0, 0 # adjust size/texcoord according to viewport if viewport_pos: tcx, tcy = viewport_pos tcx = tcx / tw * (ow) tcy = tcy / th * oh if tw - viewport_pos[0] < vw: tcw = tcw - tcx size[0] = tcw * size[0] elif vw < tw: tcw = (vw / tw) * tcw size[0] = vw if vh < th: tch = (vh / th) * tch size[1] = vh # cropping mlh = lh if y > maxy: vh = (maxy - y + lh) tch = (vh / float(lh)) * oh tcy = oh - tch size[1] = vh if y - lh < miny: diff = miny - (y - lh) y += diff vh = lh - diff tch = (vh / float(lh)) * oh size[1] = vh texc = ( tcx, tcy + tch, tcx + tcw, tcy + tch, tcx + tcw, tcy, tcx, tcy) # add rectangle. r = rects[line_num] r.pos = int(x), int(y - mlh) r.size = size r.texture = texture r.tex_coords = texc add(r) y -= dy self._update_graphics_selection() def _update_graphics_selection(self): if not self._selection: return self.canvas.remove_group('selection') dy = self.line_height + self.line_spacing rects = self._lines_rects padding_top = self.padding[1] padding_bottom = self.padding[3] _top = self.top y = _top - padding_top + self.scroll_y miny = self.y + padding_bottom maxy = _top - padding_top draw_selection = self._draw_selection a, b = self._selection_from, self._selection_to if a > b: a, b = b, a get_cursor_from_index = self.get_cursor_from_index s1c, s1r = get_cursor_from_index(a) s2c, s2r = get_cursor_from_index(b) s2r += 1 # pass only the selection lines[] # passing all the lines can get slow when dealing with a lot of text y -= s1r * dy _lines = self._lines _get_text_width = self._get_text_width tab_width = self.tab_width _label_cached = self._label_cached width = self.width padding_left = self.padding[0] padding_right = self.padding[2] x = self.x canvas_add = self.canvas.add selection_color = self.selection_color for line_num, value in enumerate(_lines[s1r:s2r], start=s1r): if miny <= y <= maxy + dy: r = rects[line_num] draw_selection(r.pos, r.size, line_num, (s1c, s1r), (s2c, s2r - 1), _lines, _get_text_width, tab_width, _label_cached, width, padding_left, padding_right, x, canvas_add, selection_color) y -= dy self._position_handles('both') def _draw_selection(self, *largs): pos, size, line_num, (s1c, s1r), (s2c, s2r),\ _lines, _get_text_width, tab_width, _label_cached, width,\ padding_left, padding_right, x, canvas_add, selection_color = largs # Draw the current selection on the widget. if line_num < s1r or line_num > s2r: return x, y = pos w, h = size x1 = x x2 = x + w if line_num == s1r: lines = _lines[line_num] x1 -= self.scroll_x x1 += _get_text_width(lines[:s1c], tab_width, _label_cached) if line_num == s2r: lines = _lines[line_num] x2 = (x - self.scroll_x) + _get_text_width(lines[:s2c], tab_width, _label_cached) width_minus_padding = width - (padding_right + padding_left) maxx = x + width_minus_padding if x1 > maxx: return x1 = max(x1, x) x2 = min(x2, x + width_minus_padding) canvas_add(Color(*selection_color, group='selection')) canvas_add(Rectangle( pos=(x1, pos[1]), size=(x2 - x1, size[1]), group='selection')) def on_size(self, instance, value): # if the size change, we might do invalid scrolling / text split # size the text maybe be put after size_hint have been resolved. self._trigger_refresh_text() self._refresh_hint_text() self.scroll_x = self.scroll_y = 0 def _get_cursor_pos(self): # return the current cursor x/y from the row/col dy = self.line_height + self.line_spacing padding_left = self.padding[0] padding_top = self.padding[1] left = self.x + padding_left top = self.top - padding_top y = top + self.scroll_y y -= self.cursor_row * dy x, y = left + self.cursor_offset() - self.scroll_x, y if x < left: self.scroll_x = 0 x = left if y > top: y = top self.scroll_y = 0 return x, y def _get_line_options(self): # Get or create line options, to be used for Label creation if self._line_options is None: self._line_options = kw = { 'font_size': self.font_size, 'font_name': self.font_name, 'anchor_x': 'left', 'anchor_y': 'top', 'padding_x': 0, 'padding_y': 0, 'padding': (0, 0)} self._label_cached = Label(**kw) return self._line_options def _create_line_label(self, text, hint=False): # Create a label from a text, using line options ntext = text.replace(u'\n', u'').replace(u'\t', u' ' * self.tab_width) if self.password and not hint: # Don't replace hint_text with * ntext = self.password_mask * len(ntext) kw = self._get_line_options() cid = '%s\0%s' % (ntext, str(kw)) texture = Cache_get('textinput.label', cid) if texture is None: # FIXME right now, we can't render very long line... # if we move on "VBO" version as fallback, we won't need to # do this. try to found the maximum text we can handle label = None label_len = len(ntext) ld = None # check for blank line if not ntext: texture = Texture.create(size=(1, 1)) Cache_append('textinput.label', cid, texture) return texture while True: try: label = Label(text=ntext[:label_len], **kw) label.refresh() if ld is not None and ld > 2: ld = int(ld / 2) label_len += ld else: break except: # exception happen when we tried to render the text # reduce it... if ld is None: ld = len(ntext) ld = int(ld / 2) if ld < 2 and label_len: label_len -= 1 label_len -= ld continue # ok, we found it. texture = label.texture Cache_append('textinput.label', cid, texture) return texture def _tokenize(self, text): # Tokenize a text string from some delimiters if text is None: return delimiters = u' ,\'".;:\n\r\t' oldindex = 0 for index, char in enumerate(text): if char not in delimiters: continue if oldindex != index: yield text[oldindex:index] yield text[index:index + 1] oldindex = index + 1 yield text[oldindex:] def _split_smart(self, text): # Do a "smart" split. If autowidth or autosize is set, # we are not doing smart split, just a split on line break. # Otherwise, we are trying to split as soon as possible, to prevent # overflow on the widget. # depend of the options, split the text on line, or word if not self.multiline: lines = text.split(u'\n') lines_flags = [0] + [FL_IS_NEWLINE] * (len(lines) - 1) return lines, lines_flags # no autosize, do wordwrap. x = flags = 0 line = [] lines = [] lines_flags = [] _join = u''.join lines_append, lines_flags_append = lines.append, lines_flags.append padding_left = self.padding[0] padding_right = self.padding[2] width = self.width - padding_left - padding_right text_width = self._get_text_width _tab_width, _label_cached = self.tab_width, self._label_cached # try to add each word on current line. for word in self._tokenize(text): is_newline = (word == u'\n') w = text_width(word, _tab_width, _label_cached) # if we have more than the width, or if it's a newline, # push the current line, and create a new one if (x + w > width and line) or is_newline: lines_append(_join(line)) lines_flags_append(flags) flags = 0 line = [] x = 0 if is_newline: flags |= FL_IS_NEWLINE else: x += w line.append(word) if line or flags & FL_IS_NEWLINE: lines_append(_join(line)) lines_flags_append(flags) return lines, lines_flags def _key_down(self, key, repeat=False): displayed_str, internal_str, internal_action, scale = key if internal_action is None: if self._selection: self.delete_selection() self.insert_text(displayed_str) elif internal_action in ('shift', 'shift_L', 'shift_R'): if not self._selection: self._selection_from = self._selection_to = self.cursor_index() self._selection = True self._selection_finished = False elif internal_action == 'ctrl_L': self._ctrl_l = True elif internal_action == 'ctrl_R': self._ctrl_r = True elif internal_action == 'alt_L': self._alt_l = True elif internal_action == 'alt_R': self._alt_r = True elif internal_action.startswith('cursor_'): cc, cr = self.cursor self.do_cursor_movement(internal_action, self._ctrl_l or self._ctrl_r, self._alt_l or self._alt_r) if self._selection and not self._selection_finished: self._selection_to = self.cursor_index() self._update_selection() else: self.cancel_selection() elif self._selection and internal_action in ('del', 'backspace'): self.delete_selection() elif internal_action == 'del': # Move cursor one char to the right. If that was successful, # do a backspace (effectively deleting char right of cursor) cursor = self.cursor self.do_cursor_movement('cursor_right') if cursor != self.cursor: self.do_backspace(mode='del') elif internal_action == 'backspace': self.do_backspace() elif internal_action == 'enter': if self.multiline: self.insert_text(u'\n') else: self.dispatch('on_text_validate') self.focus = False elif internal_action == 'escape': self.focus = False if internal_action != 'escape': #self._recalc_size() pass def _key_up(self, key, repeat=False): displayed_str, internal_str, internal_action, scale = key if internal_action in ('shift', 'shift_L', 'shift_R'): if self._selection: self._update_selection(True) elif internal_action == 'ctrl_L': self._ctrl_l = False elif internal_action == 'ctrl_R': self._ctrl_r = False elif internal_action == 'alt_L': self._alt_l = False elif internal_action == 'alt_R': self._alt_r = False def keyboard_on_key_down(self, window, keycode, text, modifiers): # Keycodes on OS X: ctrl, cmd = 64, 1024 key, key_str = keycode win = EventLoop.window # This allows *either* ctrl *or* cmd, but not both. is_shortcut = (modifiers == ['ctrl'] or ( _is_osx and modifiers == ['meta'])) is_interesting_key = key in (list(self.interesting_keys.keys()) + [27]) if not self.write_tab and super(TextInput, self).keyboard_on_key_down(window, keycode, text, modifiers): return True if not self._editable: # duplicated but faster testing for non-editable keys if text and not is_interesting_key: if is_shortcut and key == ord('c'): self.copy() elif key == 27: self.focus = False return True if text and not is_interesting_key: self._hide_handles(win) self._hide_cut_copy_paste(win) win.remove_widget(self._handle_middle) # check for command modes # we use \x01INFO\x02 to get info from IME on mobiles # pygame seems to pass \x01 as the unicode for ctrl+a # checking for modifiers ensures conflict resolution. first_char = ord(text[0]) if not modifiers and first_char == 1: self._command_mode = True self._command = '' if not modifiers and first_char == 2: self._command_mode = False self._command = self._command[1:] if self._command_mode: self._command += text return _command = self._command if _command and first_char == 2: from_undo = True _command, data = _command.split(':') self._command = '' if self._selection: self.delete_selection() if _command == 'DEL': count = int(data) if not count: self.delete_selection(from_undo=True) end = self.cursor_index() self._selection_from = max(end - count, 0) self._selection_to = end self._selection = True self.delete_selection(from_undo=True) return elif _command == 'INSERT': self.insert_text(data, from_undo) elif _command == 'INSERTN': from_undo = False self.insert_text(data, from_undo) elif _command == 'SELWORD': self.dispatch('on_double_tap') elif _command == 'SEL': if data == '0': Clock.schedule_once(lambda dt: self.cancel_selection()) elif _command == 'CURCOL': self.cursor = int(data), self.cursor_row return if is_shortcut: if key == ord('x'): # cut selection self._cut(self.selection_text) elif key == ord('c'): # copy selection self.copy() elif key == ord('v'): # paste selection self.paste() elif key == ord('a'): # select all self.select_all() elif key == ord('z'): # undo self.do_undo() elif key == ord('r'): # redo self.do_redo() else: if EventLoop.window.__class__.__module__ == \ 'kivy.core.window.window_sdl2': return if self._selection: self.delete_selection() self.insert_text(text) #self._recalc_size() return if is_interesting_key: self._hide_cut_copy_paste(win) self._hide_handles(win) if key == 27: # escape self.focus = False return True elif key == 9: # tab self.insert_text(u'\t') return True k = self.interesting_keys.get(key) if k: key = (None, None, k, 1) self._key_down(key) def keyboard_on_key_up(self, window, keycode): key, key_str = keycode k = self.interesting_keys.get(key) if k: key = (None, None, k, 1) self._key_up(key) def keyboard_on_textinput(self, window, text): if self._selection: self.delete_selection() self.insert_text(text, False) def on_hint_text(self, instance, value): self._refresh_hint_text() def _refresh_hint_text(self): _lines, self._hint_text_flags = self._split_smart(self.hint_text) _hint_text_labels = [] _hint_text_rects = [] _create_label = self._create_line_label for x in _lines: lbl = _create_label(x, hint=True) _hint_text_labels.append(lbl) _hint_text_rects.append(Rectangle(size=lbl.size)) self._hint_text_lines = _lines self._hint_text_labels = _hint_text_labels self._hint_text_rects = _hint_text_rects # Remember to update graphics self._trigger_update_graphics() # # Properties # _lines = ListProperty([]) _hint_text_lines = ListProperty([]) _editable = BooleanProperty(True) _insert_int_patu = re.compile(u'[^0-9]') _insert_int_patb = re.compile(b'[^0-9]') readonly = BooleanProperty(False) '''If True, the user will not be able to change the content of a textinput. .. versionadded:: 1.3.0 :attr:`readonly` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' multiline = BooleanProperty(True) '''If True, the widget will be able show multiple lines of text. If False, the "enter" keypress will defocus the textinput instead of adding a new line. :attr:`multiline` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' password = BooleanProperty(False) '''If True, the widget will display its characters as the character set in :attr:`password_mask`. .. versionadded:: 1.2.0 :attr:`password` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' password_mask = StringProperty('*') '''Sets the character used to mask the text when :attr:`password` is True. .. versionadded:: 1.9.2 :attr:`password_mask` is a :class:`~kivy.properties.StringProperty` and defaults to `'*'`. ''' keyboard_suggestions = BooleanProperty(True) '''If True provides auto suggestions on top of keyboard. This will only work if :attr:`input_type` is set to `text`. .. versionadded:: 1.8.0 :attr:`keyboard_suggestions` is a :class:`~kivy.properties.BooleanProperty` defaults to True. ''' cursor_blink = BooleanProperty(False) '''This property is used to blink the cursor graphic. The value of :attr:`cursor_blink` is automatically computed. Setting a value on it will have no impact. :attr:`cursor_blink` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' def _get_cursor(self): return self._cursor def _set_cursor(self, pos): if not self._lines: self._trigger_refresh_text() return l = self._lines cr = boundary(pos[1], 0, len(l) - 1) cc = boundary(pos[0], 0, len(l[cr])) cursor = cc, cr if self._cursor == cursor: return self._cursor = cursor # adjust scrollview to ensure that the cursor will be always inside our # viewport. padding_left = self.padding[0] padding_right = self.padding[2] viewport_width = self.width - padding_left - padding_right sx = self.scroll_x offset = self.cursor_offset() # if offset is outside the current bounds, reajust if offset > viewport_width + sx: self.scroll_x = offset - viewport_width if offset < sx: self.scroll_x = offset # do the same for Y # this algo try to center the cursor as much as possible dy = self.line_height + self.line_spacing offsety = cr * dy sy = self.scroll_y padding_top = self.padding[1] padding_bottom = self.padding[3] viewport_height = self.height - padding_top - padding_bottom - dy if offsety > viewport_height + sy: sy = offsety - viewport_height if offsety < sy: sy = offsety self.scroll_y = sy return True cursor = AliasProperty(_get_cursor, _set_cursor) '''Tuple of (row, col) values indicating the current cursor position. You can set a new (row, col) if you want to move the cursor. The scrolling area will be automatically updated to ensure that the cursor is visible inside the viewport. :attr:`cursor` is an :class:`~kivy.properties.AliasProperty`. ''' def _get_cursor_col(self): return self._cursor[0] cursor_col = AliasProperty(_get_cursor_col, None, bind=('cursor', )) '''Current column of the cursor. :attr:`cursor_col` is an :class:`~kivy.properties.AliasProperty` to cursor[0], read-only. ''' def _get_cursor_row(self): return self._cursor[1] cursor_row = AliasProperty(_get_cursor_row, None, bind=('cursor', )) '''Current row of the cursor. :attr:`cursor_row` is an :class:`~kivy.properties.AliasProperty` to cursor[1], read-only. ''' cursor_pos = AliasProperty(_get_cursor_pos, None, bind=( 'cursor', 'padding', 'pos', 'size', 'focus', 'scroll_x', 'scroll_y')) '''Current position of the cursor, in (x, y). :attr:`cursor_pos` is an :class:`~kivy.properties.AliasProperty`, read-only. ''' cursor_color = ListProperty([1, 0, 0, 1]) '''Current color of the cursor, in (r, g, b, a) format. .. versionadded:: 1.9.0 :attr:`cursor_color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 0, 0, 1]. ''' line_height = NumericProperty(1) '''Height of a line. This property is automatically computed from the :attr:`font_name`, :attr:`font_size`. Changing the line_height will have no impact. .. note:: :attr:`line_height` is the height of a single line of text. Use :attr:`minimum_height`, which also includes padding, to get the height required to display the text properly. :attr:`line_height` is a :class:`~kivy.properties.NumericProperty`, read-only. ''' tab_width = NumericProperty(4) '''By default, each tab will be replaced by four spaces on the text input widget. You can set a lower or higher value. :attr:`tab_width` is a :class:`~kivy.properties.NumericProperty` and defaults to 4. ''' padding_x = VariableListProperty([0, 0], length=2) '''Horizontal padding of the text: [padding_left, padding_right]. padding_x also accepts a one argument form [padding_horizontal]. :attr:`padding_x` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0]. This might be changed by the current theme. .. deprecated:: 1.7.0 Use :attr:`padding` instead. ''' def on_padding_x(self, instance, value): self.padding[0] = value[0] self.padding[2] = value[1] padding_y = VariableListProperty([0, 0], length=2) '''Vertical padding of the text: [padding_top, padding_bottom]. padding_y also accepts a one argument form [padding_vertical]. :attr:`padding_y` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0]. This might be changed by the current theme. .. deprecated:: 1.7.0 Use :attr:`padding` instead. ''' def on_padding_y(self, instance, value): self.padding[1] = value[0] self.padding[3] = value[1] padding = VariableListProperty([6, 6, 6, 6]) '''Padding of the text: [padding_left, padding_top, padding_right, padding_bottom]. padding also accepts a two argument form [padding_horizontal, padding_vertical] and a one argument form [padding]. .. versionchanged:: 1.7.0 Replaced AliasProperty with VariableListProperty. :attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and defaults to [6, 6, 6, 6]. ''' scroll_x = NumericProperty(0) '''X scrolling value of the viewport. The scrolling is automatically updated when the cursor is moved or text changed. If there is no user input, the scroll_x and scroll_y properties may be changed. :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' scroll_y = NumericProperty(0) '''Y scrolling value of the viewport. See :attr:`scroll_x` for more information. :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' selection_color = ListProperty([0.1843, 0.6549, 0.8313, .5]) '''Current color of the selection, in (r, g, b, a) format. .. warning:: The color should always have an "alpha" component less than 1 since the selection is drawn after the text. :attr:`selection_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0.1843, 0.6549, 0.8313, .5]. ''' border = ListProperty([4, 4, 4, 4]) '''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage` graphics instruction. Used with :attr:`background_normal` and :attr:`background_active`. Can be used for a custom background. .. versionadded:: 1.4.1 It must be a list of four values: (top, right, bottom, left). Read the BorderImage instruction for more information about how to use it. :attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to (4, 4, 4, 4). ''' background_normal = StringProperty( 'atlas://data/images/defaulttheme/textinput') '''Background image of the TextInput when it's not in focus. .. versionadded:: 1.4.1 :attr:`background_normal` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput'. ''' background_disabled_normal = StringProperty( 'atlas://data/images/defaulttheme/textinput_disabled') '''Background image of the TextInput when disabled. .. versionadded:: 1.8.0 :attr:`background_disabled_normal` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput_disabled'. ''' background_active = StringProperty( 'atlas://data/images/defaulttheme/textinput_active') '''Background image of the TextInput when it's in focus. .. versionadded:: 1.4.1 :attr:`background_active` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput_active'. ''' background_disabled_active = StringProperty( 'atlas://data/images/defaulttheme/textinput_disabled_active') '''Background image of the TextInput when it's in focus and disabled. .. versionadded:: 1.8.0 :attr:`background_disabled_active` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput_disabled_active'. ''' background_color = ListProperty([1, 1, 1, 1]) '''Current color of the background, in (r, g, b, a) format. .. versionadded:: 1.2.0 :attr:`background_color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 1, 1, 1] (white). ''' foreground_color = ListProperty([0, 0, 0, 1]) '''Current color of the foreground, in (r, g, b, a) format. .. versionadded:: 1.2.0 :attr:`foreground_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0, 0, 0, 1] (black). ''' disabled_foreground_color = ListProperty([0, 0, 0, .5]) '''Current color of the foreground when disabled, in (r, g, b, a) format. .. versionadded:: 1.8.0 :attr:`disabled_foreground_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0, 0, 0, 5] (50% transparent black). ''' use_bubble = BooleanProperty(not _is_desktop) '''Indicates whether the cut/copy/paste bubble is used. .. versionadded:: 1.7.0 :attr:`use_bubble` is a :class:`~kivy.properties.BooleanProperty` and defaults to True on mobile OS's, False on desktop OS's. ''' use_handles = BooleanProperty(not _is_desktop) '''Indicates whether the selection handles are displayed. .. versionadded:: 1.8.0 :attr:`use_handles` is a :class:`~kivy.properties.BooleanProperty` and defaults to True on mobile OS's, False on desktop OS's. ''' suggestion_text = StringProperty('') '''Shows a suggestion text/word from currentcursor position onwards, that can be used as a possible completion. Usefull for suggesting completion text. This can also be used by the IME to setup the current word being edited .. versionadded:: 1.9.0 :attr:`suggestion_text` is a :class:`~kivy.properties.StringProperty` defaults to `''` ''' def on_suggestion_text(self, instance, value): global MarkupLabel if not MarkupLabel: from kivy.core.text.markup import MarkupLabel cursor_pos = self.cursor_pos txt = self._lines[self.cursor_row] cr = self.cursor_row kw = self._get_line_options() rct = self._lines_rects[cr] lbl = text = None if value: lbl = MarkupLabel( text=txt + "[b]{}[/b]".format(value), **kw) else: lbl = Label(**kw) text = txt lbl.refresh() self._lines_labels[cr] = lbl.texture rct.size = lbl.size self._update_graphics() def get_sel_from(self): return self._selection_from selection_from = AliasProperty(get_sel_from, None) '''If a selection is in progress or complete, this property will represent the cursor index where the selection started. .. versionchanged:: 1.4.0 :attr:`selection_from` is an :class:`~kivy.properties.AliasProperty` and defaults to None, readonly. ''' def get_sel_to(self): return self._selection_to selection_to = AliasProperty(get_sel_to, None) '''If a selection is in progress or complete, this property will represent the cursor index where the selection started. .. versionchanged:: 1.4.0 :attr:`selection_to` is an :class:`~kivy.properties.AliasProperty` and defaults to None, readonly. ''' selection_text = StringProperty(u'') '''Current content selection. :attr:`selection_text` is a :class:`~kivy.properties.StringProperty` and defaults to '', readonly. ''' def on_selection_text(self, instance, value): if value: if self.use_handles: self._trigger_show_handles() if CutBuffer and not self.password: self._trigger_update_cutbuffer() def _get_text(self, encode=False): lf = self._lines_flags l = self._lines len_l = len(l) if len(lf) < len_l: lf.append(1) text = u''.join([(u'\n' if (lf[i] & FL_IS_NEWLINE) else u'') + l[i] for i in range(len_l)]) if encode and not isinstance(text, bytes): text = text.encode('utf8') return text def _set_text(self, text): if isinstance(text, bytes): text = text.decode('utf8') if self.replace_crlf: text = text.replace(u'\r\n', u'\n') if self._get_text(encode=False) == text: return self._refresh_text(text) self.cursor = self.get_cursor_from_index(len(text)) text = AliasProperty(_get_text, _set_text, bind=('_lines', )) '''Text of the widget. Creation of a simple hello world:: widget = TextInput(text='Hello world') If you want to create the widget with an unicode string, use:: widget = TextInput(text=u'My unicode string') :attr:`text` is an :class:`~kivy.properties.AliasProperty`. ''' font_name = StringProperty('Roboto') '''Filename of the font to use. The path can be absolute or relative. Relative paths are resolved by the :func:`~kivy.resources.resource_find` function. .. warning:: Depending on your text provider, the font file may be ignored. However, you can mostly use this without problems. If the font used lacks the glyphs for the particular language/symbols you are using, you will see '[]' blank box characters instead of the actual glyphs. The solution is to use a font that has the glyphs you need to display. For example, to display |unicodechar|, use a font like freesans.ttf that has the glyph. .. |unicodechar| image:: images/unicode-char.png :attr:`font_name` is a :class:`~kivy.properties.StringProperty` and defaults to 'Roboto'. ''' font_size = NumericProperty('15sp') '''Font size of the text in pixels. :attr:`font_size` is a :class:`~kivy.properties.NumericProperty` and defaults to 10. ''' hint_text = StringProperty('') '''Hint text of the widget. Shown if text is '' and focus is False. .. versionadded:: 1.6.0 :attr:`hint_text` a :class:`~kivy.properties.StringProperty` and defaults to ''. ''' hint_text_color = ListProperty([0.5, 0.5, 0.5, 1.0]) '''Current color of the hint_text text, in (r, g, b, a) format. .. versionadded:: 1.6.0 :attr:`hint_text_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0.5, 0.5, 0.5, 1.0] (grey). ''' auto_indent = BooleanProperty(False) '''Automatically indent multiline text. .. versionadded:: 1.7.0 :attr:`auto_indent` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' replace_crlf = BooleanProperty(True) '''Automatically replace CRLF with LF. .. versionadded:: 1.9.1 :attr:`replace_crlf` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' allow_copy = BooleanProperty(True) '''Decides whether to allow copying the text. .. versionadded:: 1.8.0 :attr:`allow_copy` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' def _get_min_height(self): return (len(self._lines) * (self.line_height + self.line_spacing) + self.padding[1] + self.padding[3]) minimum_height = AliasProperty(_get_min_height, None, bind=('_lines', 'line_spacing', 'padding', 'font_size', 'font_name', 'password', 'hint_text', 'line_height')) '''Minimum height of the content inside the TextInput. .. versionadded:: 1.8.0 :attr:`minimum_height` is a readonly :class:`~kivy.properties.AliasProperty`. .. warning:: :attr:`minimum_width` is calculated based on :attr:`width` therefore code like this will lead to an infinite loop:: <FancyTextInput>: height: self.minimum_height width: self.height ''' line_spacing = NumericProperty(0) '''Space taken up between the lines. .. versionadded:: 1.8.0 :attr:`line_spacing` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' input_filter = ObjectProperty(None, allownone=True) ''' Filters the input according to the specified mode, if not None. If None, no filtering is applied. .. versionadded:: 1.9.0 :attr:`input_filter` is an :class:`~kivy.properties.ObjectProperty` and defaults to `None`. Can be one of `None`, `'int'` (string), or `'float'` (string), or a callable. If it is `'int'`, it will only accept numbers. If it is `'float'` it will also accept a single period. Finally, if it is a callable it will be called with two parameter; the string to be added and a bool indicating whether the string is a result of undo (True). The callable should return a new substring that will be used instead. ''' handle_image_middle = StringProperty( 'atlas://data/images/defaulttheme/selector_middle') '''Image used to display the middle handle on the TextInput for cursor positioning. .. versionadded:: 1.8.0 :attr:`handle_image_middle` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/selector_middle'. ''' def on_handle_image_middle(self, instance, value): if self._handle_middle: self._handle_middle.source = value handle_image_left = StringProperty( 'atlas://data/images/defaulttheme/selector_left') '''Image used to display the Left handle on the TextInput for selection. .. versionadded:: 1.8.0 :attr:`handle_image_left` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/selector_left'. ''' def on_handle_image_left(self, instance, value): if self._handle_left: self._handle_left.source = value handle_image_right = StringProperty( 'atlas://data/images/defaulttheme/selector_right') '''Image used to display the Right handle on the TextInput for selection. .. versionadded:: 1.8.0 :attr:`handle_image_right` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/selector_right'. ''' def on_handle_image_right(self, instance, value): if self._handle_right: self._handle_right.source = value write_tab = BooleanProperty(True) '''Whether the tab key should move focus to the next widget or if it should enter a tab in the :class:`TextInput`. If `True` a tab will be written, otherwise, focus will move to the next widget. .. versionadded:: 1.9.0 :attr:`write_tab` is a :class:`~kivy.properties.BooleanProperty` and defaults to `True`. ''' if __name__ == '__main__': from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.lang import Builder class TextInputApp(App): def build(self): Builder.load_string(''' <TextInput> on_text: self.suggestion_text = '' self.suggestion_text = 'ion_text' ''') root = BoxLayout(orientation='vertical') textinput = TextInput(multiline=True, use_bubble=True, use_handles=True) #textinput.text = __doc__ root.add_widget(textinput) textinput2 = TextInput(multiline=False, text='monoline textinput', size_hint=(1, None), height=30) root.add_widget(textinput2) return root TextInputApp().run()
mit
6,712,873,367,837,512,000
34.263362
80
0.549193
false
3.997723
false
false
false
robcarver17/systematictradingexamples
plots_for_perhaps/compareoptmethods.py
1
22426
import numpy as np import matplotlib.pyplot as plt from matplotlib.pyplot import plot, show, xticks, xlabel, ylabel, legend, yscale, title, savefig, rcParams, figure, hist, text, bar, subplots import Image def file_process(filename): fig = plt.gcf() fig.set_size_inches(18.5,10.5) fig.savefig("/home/rob/%s.png" % filename,dpi=300) fig.savefig("/home/rob/%sLOWRES.png" % filename,dpi=50) Image.open("/home/rob/%s.png" % filename).convert('L').save("/home/rob/%s.jpg" % filename) Image.open("/home/rob/%sLOWRES.png" % filename).convert('L').save("/home/rob/%sLOWRES.jpg" % filename) """ compare: handcrafting bootstrapped one shot equal weights market cap weights """ import pandas as pd from datetime import datetime as dt def read_ts_csv(fname, dindex="Date"): data=pd.read_csv(fname) dateindex=[dt.strptime(dx, "%d/%m/%y") for dx in list(data[dindex])] data.index=dateindex del(data[dindex]) return data def calc_asset_returns(rawdata, tickers): asset_returns=pd.concat([get_monthly_tr(tickname, rawdata) for tickname in tickers], axis=1) asset_returns.columns=tickers return asset_returns def get_monthly_tr(tickname, rawdata): total_returns=rawdata[tickname+"_TR"] return (total_returns / total_returns.shift(1)) - 1.0 def portfolio_return(asset_returns, cash_weights): index_returns=asset_returns.cumsum().ffill().diff() cash_align = cash_weights.reindex(asset_returns.index, method="ffill") cash_align[np.isnan(index_returns)]=0.0 cash_align[np.isnan(cash_align)]=0.0 vols=pd.ewmstd(asset_returns, span=100, min_periods=1) riskweights=pd.DataFrame(cash_align.values / vols.values, index=vols.index) riskweights.columns=asset_returns.columns riskweights[np.isnan(riskweights)]=0.0 def _rowfix(x): if all([y==0.0 for y in x]): return x sumx=sum(x) return [y/sumx for y in x] riskweights = riskweights.apply(_rowfix, axis=1) portfolio_returns=asset_returns*riskweights portfolio_returns[np.isnan(portfolio_returns)]=0.0 portfolio_returns=portfolio_returns.sum(axis=1) return portfolio_returns import matplotlib.pyplot as plt from scipy import stats import pandas as pd import numpy as np from datetime import datetime as dt import datetime from scipy.optimize import minimize from copy import copy import random def correlation_matrix(returns): """ Calcs a correlation matrix using weekly returns from a pandas time series We use weekly returns because otherwise end of day effects, especially over time zones, give unrealistically low correlations """ asset_index=returns.cumsum().ffill() asset_index=asset_index.resample('1W') ## Only want index, fill method is irrelevant asset_index = asset_index - asset_index.shift(1) return asset_index.corr().values def create_dull_pd_matrix(dullvalue=0.0, dullname="A", startdate=pd.datetime(1970,1,1).date(), enddate=datetime.datetime.now().date(), index=None): """ create a single valued pd matrix """ if index is None: index=pd.date_range(startdate, enddate) dullvalue=np.array([dullvalue]*len(index)) ans=pd.DataFrame(dullvalue, index, columns=[dullname]) return ans def addem(weights): ## Used for constraints return 1.0 - sum(weights) def variance(weights, sigma): ## returns the variance (NOT standard deviation) given weights and sigma return (np.matrix(weights)*sigma*np.matrix(weights).transpose())[0,0] def neg_SR(weights, sigma, mus): ## Returns minus the Sharpe Ratio (as we're minimising) """ estreturn=250.0*((np.matrix(x)*mus)[0,0]) variance=(variance(x,sigma)**.5)*16.0 """ estreturn=(np.matrix(weights)*mus)[0,0] std_dev=(variance(weights,sigma)**.5) return -estreturn/std_dev def sigma_from_corr(std, corr): sigma=std*corr*std return sigma def basic_opt(std,corr,mus): number_assets=mus.shape[0] sigma=sigma_from_corr(std, corr) start_weights=[1.0/number_assets]*number_assets ## Constraints - positive weights, adding to 1.0 bounds=[(0.0,1.0)]*number_assets cdict=[{'type':'eq', 'fun':addem}] return minimize(neg_SR_riskfree, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001) def neg_SR_riskfree(weights, sigma, mus, riskfree=0.005): ## Returns minus the Sharpe Ratio (as we're minimising) """ estreturn=250.0*((np.matrix(x)*mus)[0,0]) variance=(variance(x,sigma)**.5)*16.0 """ estreturn=(np.matrix(weights)*mus)[0,0] - riskfree std_dev=(variance(weights,sigma)**.5) return -estreturn/std_dev def equalise_vols(returns, default_vol): """ Normalises returns so they have the in sample vol of defaul_vol (annualised) Assumes daily returns """ factors=(default_vol/16.0)/returns.std(axis=0) facmat=create_dull_pd_matrix(dullvalue=factors, dullname=returns.columns, index=returns.index) norm_returns=returns*facmat norm_returns.columns=returns.columns return norm_returns def offdiag_matrix(offvalue, nlength): identity=np.diag([1.0]*nlength) for x in range(nlength): for y in range(nlength): if x!=y: identity[x][y]=offvalue return identity def get_avg_corr(sigma): new_sigma=copy(sigma) np.fill_diagonal(new_sigma,np.nan) return np.nanmean(new_sigma) def nearest_to_listvals(x, lvalues=[0.0, 0.25, 0.5, 0.75, 0.9]): ## return x rounded to nearest of lvalues if len(lvalues)==1: return lvalues[0] d1=abs(x - lvalues[0]) d2=abs(x - lvalues[1]) if d1<d2: return lvalues[0] newlvalues=lvalues[1:] return nearest_to_listvals(x, newlvalues) def handcrafted(returns, equalisevols=True, default_vol=0.2): """ Handcrafted optimiser """ count_assets=len(returns.columns) try: assert equalisevols is True assert count_assets<=3 except: raise Exception("Handcrafting only works with equalised vols and 3 or fewer assets") if count_assets<3: ## Equal weights return [1.0/count_assets]*count_assets est_corr=returns.corr().values c1=nearest_to_listvals(est_corr[0][1]) c2=nearest_to_listvals(est_corr[0][2]) c3=nearest_to_listvals(est_corr[1][2]) wts_to_use=HANDCRAFTED_WTS[(HANDCRAFTED_WTS.c1==c1) & (HANDCRAFTED_WTS.c2==c2) & (HANDCRAFTED_WTS.c3==c3)].irow(0) return [wts_to_use.w1, wts_to_use.w2, wts_to_use.w3] def opt_shrinkage(returns, shrinkage_factors, equalisevols=True, default_vol=0.2): """ Returns the optimal portfolio for the dataframe returns using shrinkage shrinkage_factors is a tuple, shrinkage of mean and correlation If equalisevols=True then normalises returns to have same standard deviation; the weights returned will be 'risk weightings' """ if equalisevols: use_returns=equalise_vols(returns, default_vol) else: use_returns=returns (shrinkage_mean, shrinkage_corr)=shrinkage_factors ## Sigma matrix ## Use correlation and then convert back to variance est_corr=use_returns.corr().values avg_corr=get_avg_corr(est_corr) prior_corr=offdiag_matrix(avg_corr, est_corr.shape[0]) sigma_corr=shrinkage_corr*prior_corr+(1-shrinkage_corr)*est_corr cov_vector=use_returns.std().values sigma=cov_vector*sigma_corr*cov_vector ## mus vector avg_return=np.mean(use_returns.mean()) est_mus=np.array([use_returns[asset_name].mean() for asset_name in use_returns.columns], ndmin=2).transpose() prior_mus=np.array([avg_return for asset_name in use_returns.columns], ndmin=2).transpose() mus=shrinkage_mean*prior_mus+(1-shrinkage_mean)*est_mus ## Starting weights number_assets=use_returns.shape[1] start_weights=[1.0/number_assets]*number_assets ## Constraints - positive weights, adding to 1.0 bounds=[(0.0,1.0)]*number_assets cdict=[{'type':'eq', 'fun':addem}] ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001) return ans['x'] def handcraft_equal(returns): """ dynamic handcrafting, equal weights only """ ## RETURNS Correlation matrix use_returns=equalise_vols(returns, default_vol=16.0) ## Sigma matrix = correlations sigma=use_returns.cov() sigma[sigma<0.0]=0.0 ungroupedreturns=dict([(x,returns[x]) for x in returns.columns]) tree_data=hc_sigma(sigma, ungroupedreturns) tree_data=grouping_tree(tree_data) weights=tree_to_weights(tree_data) return weights def hc_sigma(ungrouped_sigma, ungroupedreturns, groupdata=None): """ handcraft weights from sigma matrix Algo: - Find pair of assets with highest correlation - Form them into a new group with equal weights - The group becomes like a new asset - Once we only have two assets left, stop. Need to """ if len(ungroupedreturns)==1: return groupdata[1] if groupdata is None: ## first run ## groupdata stores grouping information ## To begin with each group just consists of one asset groupdata=[[],list(ungrouped_sigma.columns)] groupedreturns=dict() ## iteration while len(ungroupedreturns)>0: ## current_sigma consists of the correlation of things we currently have if len(ungroupedreturns)==1: idx_list=[0] else: idx_list=find_highest_corr(ungrouped_sigma) name_list=tuple([ungrouped_sigma.columns[idx] for idx in idx_list]) ## pair those things up (ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata)=group_assets(ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata, idx_list, name_list) new_returns=pd.concat(groupedreturns, axis=1) new_sigma=new_returns.corr() ## recursive return hc_sigma(new_sigma, groupedreturns, groupdata=[[],groupdata[0]]) def find_highest_corr(sigmat): new_sigmat=copy(sigmat.values) np.fill_diagonal(new_sigmat, -100.0) (i,j)=np.unravel_index(new_sigmat.argmax(), new_sigmat.shape) return (i,j) def group_assets(ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata, idx_list, name_list): """ Group assets """ todelete=[] names=[] grouping=[] group_returns=[] weights=[1.0/len(idx_list)]*len(idx_list) ## could have more complex thing here... for (itemweight,idx, iname) in zip(weights,idx_list, name_list): gi=groupdata[1][idx] grouping.append(gi) gri=ungroupedreturns.pop(iname) group_returns.append(gri*itemweight) names.append(gri.name) ungrouped_sigma=ungrouped_sigma.drop(iname, axis=0) ungrouped_sigma=ungrouped_sigma.drop(iname, axis=1) todelete.append(idx) groupdata[0].append(grouping) gr_returns=pd.concat(group_returns, axis=1) gr_returns=gr_returns.sum(axis=1) gr_returns.name="[%s]" % "+".join(names) print "Pairing %s" % ", ".join(names) groupedreturns[gr_returns.name]=gr_returns groupdata[1]=[element for eindex, element in enumerate(groupdata[1]) if eindex not in todelete] return (ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata) def grouping_tree(tree_data, sigma): """ Group branches of 2 into larger if possible """ pass def corrs_in_group(group, sigma): asset_list=sum(group, []) littlesigma=sigma.loc[asset_list, asset_list] def corr_from_leaf(leaf, sigma): return sigma[leaf[0]][leaf[1]] def tree_to_weights(tree_data): """ convert a tree into weights """ pass def markosolver(returns, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0): """ Returns the optimal portfolio for the dataframe returns If equalisemeans=True then assumes all assets have same return if False uses the asset means If equalisevols=True then normalises returns to have same standard deviation; the weights returned will be 'risk weightings' Note if usemeans=True and equalisevols=True effectively assumes all assets have same sharpe ratio """ if equalisevols: use_returns=equalise_vols(returns, default_vol) else: use_returns=returns ## Sigma matrix sigma=use_returns.cov().values ## Expected mean returns est_mus=[use_returns[asset_name].mean() for asset_name in use_returns.columns] missingvals=[np.isnan(x) for x in est_mus] if equalisemeans: ## Don't use the data - Set to the average Sharpe Ratio mus=[default_vol*default_SR]*returns.shape[1] else: mus=est_mus mus=np.array(mus, ndmin=2).transpose() ## Starting weights number_assets=use_returns.shape[1] start_weights=[1.0/number_assets]*number_assets ## Constraints - positive weights, adding to 1.0 bounds=[(0.0,1.0)]*number_assets cdict=[{'type':'eq', 'fun':addem}] ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001) wts=ans['x'] return wts def bootstrap_portfolio(returns_to_bs, monte_carlo=200, monte_length=250, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0): """ Given dataframe of returns; returns_to_bs, performs a bootstrap optimisation We run monte_carlo numbers of bootstraps Each one contains monte_length days drawn randomly, with replacement (so *not* block bootstrapping) The other arguments are passed to the optimisation function markosolver Note - doesn't deal gracefully with missing data. Will end up downweighting stuff depending on how much data is missing in each boostrap. You'll need to think about how to solve this problem. """ weightlist=[] for unused_index in range(monte_carlo): bs_idx=[int(random.uniform(0,1)*len(returns_to_bs)) for i in range(monte_length)] returns=returns_to_bs.iloc[bs_idx,:] weight=markosolver(returns, equalisemeans=equalisemeans, equalisevols=equalisevols, default_vol=default_vol, default_SR=default_SR) weightlist.append(weight) ### We can take an average here; only because our weights always add up to 1. If that isn't true ### then you will need to some kind of renormalisation theweights_mean=list(np.mean(weightlist, axis=0)) return theweights_mean def optimise_over_periods(data, date_method, fit_method, rollyears=20, equalisemeans=False, equalisevols=True, monte_carlo=100, monte_length=None, shrinkage_factors=(0.5, 0.5), weightdf=None): """ Do an optimisation Returns data frame of weights Note if fitting in sample weights will be somewhat boring Doesn't deal with eg missing data in certain subperiods """ if monte_length is None: monte_length=int(len(data.index)*.1) ## Get the periods fit_periods=generate_fitting_dates(data, date_method, rollyears=rollyears) ## Do the fitting ## Build up a list of weights, which we'll concat weight_list=[] for fit_tuple in fit_periods: ## Fit on the slice defined by first two parts of the tuple period_subset_data=data[fit_tuple[0]:fit_tuple[1]] ## Can be slow, if bootstrapping, so indicate where we are print "Fitting data for %s to %s" % (str(fit_tuple[2]), str(fit_tuple[3])) if fit_method=="one_period": weights=markosolver(period_subset_data, equalisemeans=equalisemeans, equalisevols=equalisevols) elif fit_method=="bootstrap": weights=bootstrap_portfolio(period_subset_data, equalisemeans=equalisemeans, equalisevols=equalisevols, monte_carlo=monte_carlo, monte_length=monte_length) elif fit_method=="shrinkage": weights=opt_shrinkage(period_subset_data, shrinkage_factors=shrinkage_factors, equalisevols=equalisevols) elif fit_method=="fixed": weights=[float(weightdf[weightdf.Country==ticker].Weight.values) for ticker in list(period_subset_data.columns)] else: raise Exception("Fitting method %s unknown" % fit_method) ## We adjust dates slightly to ensure no overlaps dindex=[fit_tuple[2]+datetime.timedelta(seconds=1), fit_tuple[3]-datetime.timedelta(seconds=1)] ## create a double row to delineate start and end of test period weight_row=pd.DataFrame([weights]*2, index=dindex, columns=data.columns) weight_list.append(weight_row) weight_df=pd.concat(weight_list, axis=0) return weight_df """ Now we need to do this with expanding or rolling window """ """ Generate the date tuples """ def generate_fitting_dates(data, date_method, rollyears=20): """ generate a list 4 tuples, one element for each year in the data each tuple contains [fit_start, fit_end, period_start, period_end] datetime objects the last period will be a 'stub' if we haven't got an exact number of years date_method can be one of 'in_sample', 'expanding', 'rolling' if 'rolling' then use rollyears variable """ start_date=data.index[0] end_date=data.index[-1] ## generate list of dates, one year apart, including the final date yearstarts=list(pd.date_range(start_date, end_date, freq="12M"))+[end_date] ## loop through each period periods=[] for tidx in range(len(yearstarts))[1:-1]: ## these are the dates we test in period_start=yearstarts[tidx] period_end=yearstarts[tidx+1] ## now generate the dates we use to fit if date_method=="in_sample": fit_start=start_date elif date_method=="expanding": fit_start=start_date elif date_method=="rolling": yearidx_to_use=max(0, tidx-rollyears) fit_start=yearstarts[yearidx_to_use] else: raise Exception("don't recognise date_method %s" % date_method) if date_method=="in_sample": fit_end=end_date elif date_method in ['rolling', 'expanding']: fit_end=period_start else: raise Exception("don't recognise date_method %s " % date_method) periods.append([fit_start, fit_end, period_start, period_end]) ## give the user back the list of periods return periods rawdata=read_ts_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/MSCI_data.csv") refdata=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/MSCI_ref.csv") tickers=list(refdata[(refdata.EmorDEV=="DEV") & (refdata.Type=="Country")].Country.values) #mom 12bp #tickers=list(refdata[refdata.Type=="Country"].Country.values) #mom 12bp fix_hcweights=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/devhcweights.csv") fix_capweights=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/devcapweights.csv") fix_eqweights=pd.DataFrame(dict(Country=tickers, Weight=[1.0/len(tickers)]*len(tickers))) data=calc_asset_returns(rawdata, tickers) ### IDEA: to boostrap the results ### Repeatedly draw from 'data' to make new pseudo series oneperiodweights=optimise_over_periods(data, "expanding", "one_period", equalisemeans=False, equalisevols=True) #bootstrapweights=optimise_over_periods(data, "expanding", "bootstrap", equalisemeans=True, equalisevols=True) exposthcweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_hcweights, equalisemeans=True, equalisevols=True) equalweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_eqweights, equalisemeans=True, equalisevols=True) marketcapweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_capweights, equalisemeans=True, equalisevols=True) index_returns=(1.0+data).cumprod().ffill() last_return=index_returns.irow(-1).values last_return=pd.DataFrame(np.array([last_return]*len(data)), data.index) last_return.columns=data.columns index_returns = index_returns / last_return marketcapweights = marketcapweights.reindex(index_returns.index, method="ffill") marketcapweights=marketcapweights*index_returns marketcapweights=marketcapweights.ffill() ## portfolio, take out missing weights p1=portfolio_return(data, oneperiodweights)[pd.datetime(1994,1,1):] #p2=portfolio_return(data, bootstrapweights) p3=portfolio_return(data, exposthcweights)[pd.datetime(1994,1,1):] p4=portfolio_return(data, equalweights)[pd.datetime(1994,1,1):] p5=portfolio_return(data, marketcapweights)[pd.datetime(1994,1,1):] drag1=p3 - p1 drag2=p4 - p5 def stats(x): ann_mean=x.mean()*12 ann_std = x.std()*(12**.5) geo_mean = ann_mean - (ann_std**2)/2.0 sharpe = geo_mean / ann_std return (ann_mean, ann_std, geo_mean, sharpe) print stats(p1) print stats(p3) print stats(p4) print stats(p5) toplot=pd.concat([p1, p3, p4, p5], axis=1) toplot.columns=["Optimised", "Handcraft", "Equal", "Market Cap"] toplot.cumsum().plot() show() p1.cumsum().plot(color="black", ls="solid") p3.cumsum().plot(color="gray", ls="solid") p4.cumsum().plot(color="black", ls="dashed") p5.cumsum().plot(color="gray", ls="dashed") legend( ["Optimised", "Handcraft", "Equal", "Market Cap"], loc="upper left") frame=plt.gca() #frame.get_yaxis().set_visible(False) rcParams.update({'font.size': 18}) file_process("compareoptmethods") show() drag1.cumsum().plot(color="gray", ls="solid") legend( [ "Handcraft vs MktCap"], loc="upper left") frame=plt.gca() #frame.get_yaxis().set_visible(False) rcParams.update({'font.size': 18}) file_process("compareoptmethodstracking") show()
gpl-2.0
4,701,163,412,939,915,000
30.408964
147
0.66463
false
3.352669
false
false
false
bqbn/addons-server
src/olympia/users/tests/test_user_utils.py
1
1451
# -*- coding: utf-8 -*- import pytest from olympia.amo.tests import user_factory from olympia.users.utils import ( UnsubscribeCode, system_addon_submission_allowed) def test_email_unsubscribe_code_parse(): email = u'nobody@mozîlla.org' token, hash_ = UnsubscribeCode.create(email) r_email = UnsubscribeCode.parse(token, hash_) assert email == r_email # A bad token or hash raises ValueError with pytest.raises(ValueError): UnsubscribeCode.parse(token, hash_[:-5]) with pytest.raises(ValueError): UnsubscribeCode.parse(token[5:], hash_) system_guids = pytest.mark.parametrize('guid', [ 'foø@mozilla.org', '[email protected]', '[email protected]', 'blâ[email protected]', 'foø@Mozilla.Org', '[email protected]', '[email protected]', '[email protected]', 'blâ[email protected]', '[email protected]', '[email protected]', '[email protected]' ]) @system_guids @pytest.mark.django_db def test_system_addon_submission_allowed_mozilla_allowed(guid): user = user_factory(email='[email protected]') data = {'guid': guid} assert system_addon_submission_allowed(user, data) @system_guids @pytest.mark.django_db def test_system_addon_submission_allowed_not_mozilla_not_allowed(guid): user = user_factory(email='[email protected]') data = {'guid': guid} assert not system_addon_submission_allowed(user, data)
bsd-3-clause
2,683,786,618,169,446,000
31.133333
76
0.707469
false
3.031447
true
false
false
vst/normalazy
normalazy.py
1
27657
import copy import datetime from collections import OrderedDict from decimal import Decimal from functools import wraps from six import add_metaclass #: Defines the version of the `normalazy` library. __version__ = "0.0.3" def iffnotnull(func): """ Wraps a function, returns None if the first argument is None, invokes the method otherwise. :param func: The function to be wrapped. :return: None or the result of the function. >>> test1 = iffnotnull(lambda x: x) >>> test1(None) >>> test1(1) 1 """ @wraps(func) def wrapper(value, *args, **kwargs): return None if value is None else func(value, *args, **kwargs) return wrapper def iffnotblank(func): """ Wraps a function, returns None if the first argument is empty, invokes the method otherwise. :param func: The function to be wrapped. :return: Empty string or the result of the function. >>> test1 = iffnotblank(lambda x: x) >>> test1("") '' >>> test1(1) 1 """ @wraps(func) def wrapper(value, *args, **kwargs): return value if value == "" else func(value, *args, **kwargs) return wrapper def identity(x): """ Defines an identity function. :param x: value :return: value >>> identity(None) >>> identity(1) 1 """ return x @iffnotnull def as_string(x): """ Converts the value to a trimmed string. :param x: Value. :return: Trimmed string value. >>> as_string(None) >>> as_string("") '' >>> as_string("a") 'a' >>> as_string(" a ") 'a' """ return str(x).strip() @iffnotnull def as_factor(x): """ Converts the value to a factor string. :param x: Value. :return: Trimmed, up-cased string value. >>> as_factor(None) >>> as_factor("") '' >>> as_factor("a") 'A' >>> as_factor(" a ") 'A' """ return as_string(x).upper() @iffnotnull @iffnotblank def as_number(x): """ Converts the value to a decimal value. :param x: The value to be converted to a decimal value. :return: A Decimal instance. >>> as_number(None) >>> as_number(1) Decimal('1') >>> as_number("1") Decimal('1') >>> as_number(" 1 ") Decimal('1') """ return Decimal(as_string(x)) def as_boolean(x, predicate=None): """ Converts the value to a boolean value. :param x: The value to be converted to a boolean value. :param predicate: The predicate function if required. :return: Boolean >>> as_boolean(None) False >>> as_boolean("") False >>> as_boolean(" ") True >>> as_boolean(1) True >>> as_boolean(0) False >>> as_boolean("1") True >>> as_boolean("0") True >>> as_boolean("1", predicate=lambda x: int(x) != 0) True >>> as_boolean("0", predicate=lambda x: int(x) != 0) False >>> as_boolean("1", predicate=int) True >>> as_boolean("0", predicate=int) False >>> as_boolean("1", int) True >>> as_boolean("0", int) False """ return bool(x if predicate is None else predicate(x)) @iffnotnull @iffnotblank def as_datetime(x, fmt=None): """ Converts the value to a datetime value. :param x: The value to be converted to a datetime value. :param fmt: The format of the date/time string. :return: A datetime.date instance. >>> as_datetime(None) >>> as_datetime("") '' >>> as_datetime("2015-01-01 00:00:00") datetime.datetime(2015, 1, 1, 0, 0) >>> as_datetime("2015-01-01T00:00:00", "%Y-%m-%dT%H:%M:%S") datetime.datetime(2015, 1, 1, 0, 0) >>> as_datetime("2015-01-01T00:00:00", fmt="%Y-%m-%dT%H:%M:%S") datetime.datetime(2015, 1, 1, 0, 0) """ return datetime.datetime.strptime(x, fmt or "%Y-%m-%d %H:%M:%S") @iffnotnull @iffnotblank def as_date(x, fmt=None): """ Converts the value to a date value. :param x: The value to be converted to a date value. :param fmt: The format of the date string. :return: A datetime.date instance. >>> as_date(None) >>> as_date('') '' >>> as_date("2015-01-01") datetime.date(2015, 1, 1) >>> as_date("Date: 2015-01-01", "Date: %Y-%m-%d") datetime.date(2015, 1, 1) >>> as_date("Date: 2015-01-01", fmt="Date: %Y-%m-%d") datetime.date(2015, 1, 1) """ return datetime.datetime.strptime(x, fmt or "%Y-%m-%d").date() class Value: """ Defines an immutable *[sic.]* boxed value with message, status and extra data as payload if required. >>> value = Value(value=42, message=None, status=Value.Status.Success, extras="41 + 1") >>> value.value 42 >>> value.message >>> value.status == Value.Status.Success True >>> value.extras '41 + 1' >>> value = Value.success(42, date="2015-01-01") >>> value.value 42 >>> value.status == Value.Status.Success True >>> value.date '2015-01-01' >>> value = Value.warning(value="fortytwo", message="Failed to convert to integer.", date="2015-01-01") >>> value.value 'fortytwo' >>> value.status == Value.Status.Warning True >>> value.date '2015-01-01' >>> value.message 'Failed to convert to integer.' >>> value = Value.error(message="Failed to compute the value.", date="2015-01-01") >>> value.value >>> value.status == Value.Status.Error True >>> value.date '2015-01-01' >>> value.message 'Failed to compute the value.' """ class Status: """ Defines an enumeration for value status. """ #: Indicates that value is mapped successfully. Success = 1 #: Indicates that value is mapped successfully with warnings. Warning = 2 #: Indicates that value could not be mapped successfully. Error = 3 def __init__(self, value=None, message=None, status=None, **kwargs): """ Constructs an immutable Value class instance. Note that the classmethods `success`, `warning` and `error` should be preferred over this constructor. :param value: The atomic value. :param message: Any messages if required. :param status: The value status. :param kwargs: Extra payload for the value. """ self.__value = value self.__status = status or self.Status.Success self.__message = message self.__payload = kwargs @property def value(self): return self.__value @property def status(self): return self.__status @property def message(self): return self.__message @property def payload(self): return self.__payload def __getattr__(self, item): """ Provides access to payload through attributes. :param item: The name of the attribute. :return: The value for the attribute if the attribute name is in payload. """ ## Check if the item is in the payload: if item in self.payload: ## Yes, return it. return self.payload.get(item) ## Nope, escalate: return super(Value, self).__getattr__(item) @classmethod def success(cls, value=None, message=None, **kwargs): """ Provides a convenience constructor for successful Value instances. :param value: The value of the Value instance to be constructed. :param message: The message, if any. :param kwargs: Extra payload for the value. :return: A successful Value instance. """ return cls(value=value, message=message, status=cls.Status.Success, **kwargs) @classmethod def warning(cls, value=None, message=None, **kwargs): """ Provides a convenience constructor for Values instances with warnings. :param value: The value of the Value instance to be constructed. :param message: The message, if any. :param kwargs: Extra payload for the value. :return: A Value instance with warnings. """ return cls(value=value, message=message, status=cls.Status.Warning, **kwargs) @classmethod def error(cls, value=None, message=None, **kwargs): """ Provides a convenience constructor for Values instances with errors. :param value: The value of the Value instance to be constructed. :param message: The message, if any. :param kwargs: Extra payload for the value. :return: A Value instance with errors. """ return cls(value=value, message=message, status=cls.Status.Error, **kwargs) class Field(object): """ Provides a concrete mapper field. >>> field = Field() >>> field.map(None, dict()).value >>> field.map(None, dict()).status == Value.Status.Success True >>> field = Field(null=False) >>> field.map(None, dict()).value >>> field.map(None, dict()).status == Value.Status.Error True >>> field = Field(func=lambda i, r: r.get("a", None)) >>> field.map(None, dict(a="")).value '' >>> field.map(None, dict(a="")).status == Value.Status.Success True >>> field = Field(func=lambda i, r: r.get("a", None), blank=False) >>> field.map(None, dict(a="")).value '' >>> field.map(None, dict(a="")).status == Value.Status.Error True >>> field = Field(func=lambda i, r: r.get("a", None)) >>> field.map(None, dict()).value >>> field.map(None, dict(a=1)).value 1 >>> field.map(None, dict(a=1)).status == Value.Status.Success True """ def __init__(self, name=None, func=None, blank=True, null=True): """ Constructs a mapper field with the given argument. :param name: The name of the field. :param func: The function which is to be used to map the value. :param blank: Boolean indicating if blank values are allowed. :param null: Boolean indicating if null values are allowed. """ self.__name = name self.__func = func self.__blank = blank self.__null = null @property def name(self): """ Returns the name of the field. :return: The name of the field. """ return self.__name @property def func(self): """ Returns the mapping function of the field. :return: The mapping function of the field. """ return self.__func @property def blank(self): """ Indicates if the value is allowed to be blank. :return: Boolean indicating if the value is allowed to be blank. """ return self.__blank @property def null(self): """ Indicates if the value is allowed to be null. :return: Boolean indicating if the value is allowed to be null. """ return self.__null def rename(self, name): """ Renames the field. :param name: The new name of the field. """ self.__name = name def treat_value(self, value): """ Treats the value and return. :param value: The value to be treated. :return: A Value instance. """ ## By now we have a value. If it is an instance of Value ## class, return it as is: if isinstance(value, Value): return value ## If the value is string and empty, but is not allowed to be so, return ## with error: if not self.blank and isinstance(value, str) and value == "": return Value.error(value="", message="Value is not allowed to be blank.") ## If the value is None but is not allowed to be so, return ## with error: if not self.null and value is None: return Value.error(message="Value is not allowed to be None.") ## OK, we have a value to be boxed and returned successfully: return Value.success(value=value) def map(self, instance, record): """ Returns the value of for field as a Value instance. :param instance: The instance for which the value will be retrieved. :param record: The raw record. :return: A Value instance. """ ## Check if we have a function: if self.func is None: ## OK, value shall be None: value = None ## Check if the function is a callable or the name of an attribute of the instance: elif hasattr(self.func, "__call__"): ## The function is a callable. Call it directly on the ## instance and the record and get the raw value: value = self.func(instance, record) else: ## The function is not a callable. We assume that it is ## the name of a method of the instance. Apply the ## instance method on the record and get the raw value: value = getattr(instance, self.func)(record) ## Treat the value and return: return self.treat_value(value) class KeyField(Field): """ Provides a mapper field for a given key which belongs to the record. The record can be an object which has `__getitem__` method or a simple object just with attribute access. The method starts reading the source value using the key provided checking `__getitem__` method (for iterables such as `dict` or `list`), then checks the attribute for simple object attribute access. >>> field = KeyField(key="a") >>> field.map(None, dict(a="")).value '' >>> field.map(None, dict(a="")).status == Value.Status.Success True >>> field = KeyField(key="a", blank=False) >>> field.map(None, dict(a="")).value '' >>> field.map(None, dict(a="")).status == Value.Status.Error True >>> field = KeyField(key="a", func=lambda i, r, v: as_number(v)) >>> field.map(None, dict(a="12")).value Decimal('12') >>> field.map(None, dict(a="12")).status == Value.Status.Success True >>> field = KeyField(key="a", cast=as_number) >>> field.map(None, dict(a="12")).value Decimal('12') >>> field.map(None, dict(a="12")).status == Value.Status.Success True >>> class Student: ... def __init__(self, name): ... self.name = name >>> field = KeyField(key="name") >>> field.map(None, Student("Sinan")).value 'Sinan' """ def __init__(self, key=None, cast=None, **kwargs): """ Constructs a mapper field with the given argument. :param key: The key of the property of the record to be mapped. :param cast: The function to be applied to the value. :param **kwargs: Keyword arguments to `Field`. """ super(KeyField, self).__init__(**kwargs) self.__key = key self.__cast = cast @property def key(self): """ Returns the key of for the field mapping. """ return self.__key def rename(self, name): """ Renames the field. :param name: The new name of the field. """ ## Call the super: super(KeyField, self).rename(name) ## If the key is None, set it with joy: if self.__key is None: self.__key = name def map(self, instance, record): """ Returns the value of for field as a Value instance. :param instance: The instance for which the value will be retrieved. :param record: The raw record. :return: A Value instance. """ ## Does the record have __getitem__ method (Indexable) and key exist? if hasattr(record, "__getitem__") and self.key in record: ## Yes, get the value: value = record.get(self.key) ## Nope, let's check if the record has such an attribute: elif hasattr(record, self.key): ## Yes, get the value using attribute access: value = getattr(record, self.key) ## We can't access such a value in the record. else: ## OK, Value shall be None: value = None ## Do we have a function: if self.func is None: ## Nope, skip: pass ## Check if the function is a callable or the name of an attribute of the instance: elif hasattr(self.func, "__call__"): ## The function is a callable. Call it directly on the ## instance, the record and the raw value: value = self.func(instance, record, value) else: ## The function is not a callable. We assume that it is ## the name of a method on the instance. Apply the ## instance method on the record and the raw value: value = getattr(instance, self.func)(record, value) ## OK, now we will cast if required: if self.__cast is not None: ## Is it a Value instance? if isinstance(value, Value): value = Value(value=self.__cast(value.value), status=value.status, message=value.message) else: value = self.__cast(value) ## Done, treat the value and return: return self.treat_value(value) class ChoiceKeyField(KeyField): """ Defines a choice mapper for the index of the record provided. >>> field = ChoiceKeyField(key="a", choices=dict(a=1, b=2)) >>> field.map(None, dict(a="a")).value 1 >>> field = ChoiceKeyField(key="a", choices=dict(a=1, b=2), func=lambda i, r, v: Decimal(str(v))) >>> field.map(None, dict(a="a")).value Decimal('1') """ def __init__(self, *args, **kwargs): ## Choices? choices = kwargs.pop("choices", {}) ## Get the function: functmp = kwargs.pop("func", None) ## Compute the func if functmp is not None: func = lambda i, r, v: functmp(i, r, choices.get(v, None)) else: func = lambda i, r, v: choices.get(v, None) ## Add the func back: kwargs["func"] = func ## OK, proceed as usual: super(ChoiceKeyField, self).__init__(*args, **kwargs) class RecordMetaclass(type): """ Provides a record metaclass. """ def __new__(mcs, name, bases, attrs, **kwargs): ## Pop all fields: fields = dict([(key, attrs.pop(key)) for key in list(attrs.keys()) if isinstance(attrs.get(key), Field)]) ## Check fields and make sure that names are added: for name, field in fields.items(): if field.name is None: field.rename(name) ## Get the record class as usual: record_cls = super(RecordMetaclass, mcs).__new__(mcs, name, bases, attrs, **kwargs) ## Attach fields to the class: record_cls._fields = {} ## Now, process the fields: record_cls._fields.update(fields) ## Done, return the record class: return record_cls @add_metaclass(RecordMetaclass) class Record(object): """ Provides a record normalizer base class. >>> class Test1Record(Record): ... a = KeyField() >>> record1 = Test1Record(dict(a=1)) >>> record1.a 1 >>> class Test2Record(Record): ... a = KeyField() ... b = ChoiceKeyField(choices={1: "Bir", 2: "Iki"}) >>> record2 = Test2Record(dict(a=1, b=2)) >>> record2.a 1 >>> record2.b 'Iki' We can get the dictionary representation of records: >>> record1.as_dict() OrderedDict([('a', 1)]) >>> record2.as_dict() OrderedDict([('a', 1), ('b', 'Iki')]) Or detailed: >>> record1.as_dict(detailed=True) OrderedDict([('a', OrderedDict([('value', '1'), ('status', 1), ('message', None)]))]) >>> record2.as_dict(detailed=True) OrderedDict([('a', OrderedDict([('value', '1'), ('status', 1), ('message', None)])), \ ('b', OrderedDict([('value', 'Iki'), ('status', 1), ('message', None)]))]) We can also create a new record from an existing record or dictionary: >>> class Test3Record(Record): ... a = KeyField() ... b = KeyField() >>> record3 = Test3Record.new(record2) >>> record3.a 1 >>> record3.b 'Iki' >>> record3.a == record2.a True >>> record3.b == record2.b True With dictionary: >>> record4 = Test3Record.new({"a": 1, "b": "Iki"}) >>> record4.a 1 >>> record4.b 'Iki' >>> record4.a == record2.a True >>> record4.b == record2.b True Or even override some fields: >>> record5 = Test3Record.new(record3, b="Bir") >>> record5.a 1 >>> record5.b 'Bir' """ ## TODO: [Improvement] Rename _fields -> __fields, _values -> __value def __init__(self, record): ## Save the record slot: self.__record = record ## Declare the values map: self._values = {} def __getattr__(self, item): """ Returns the value of the attribute named `item`, particularly from within the fields set or pre-calculated field values set. :param item: The name of the attribute, in particular the field name. :return: The value (value attribute of the Value). """ return self.getval(item).value def hasval(self, name): """ Indicates if we have a value slot called ``name``. :param name: The name of the value slot. :return: ``True`` if we have a value slot called ``name``, ``False`` otherwise. """ return name in self._fields def getval(self, name): """ Returns the value slot identified by the ``name``. :param name: The name of the value slot. :return: The value slot, ie. the boxed value instance of class :class:`Value`. """ ## Did we compute this before? if name in self._values: ## Yes, return the value slot: return self._values.get(name) ## Do we have such a value slot? if not self.hasval(name): raise AttributeError("Record does not have value slot named '{}'".format(name)) ## Apparently, we have never computed the value. Let's compute the value slot and return: return self.setval(name, self._fields.get(name).map(self, self.__record)) def setval(self, name, value, status=None, message=None, **kwargs): """ Sets a value to the value slot. :param name: The name of the value slot. :param value: The value to be set (Either a Python value or a :class:`Value` instance.) :param status: The status of the value slot if any. :param message: The message of the value slot if any. :param kwargs: Additional named values as payload to value. :return: The :class:`Value` instance set. """ ## Do we have such a value slot? if not self.hasval(name): raise AttributeError("Record does not have value slot named '{}'".format(name)) ## Create a value instance: if isinstance(value, Value): ## Get a copy of payload if any: payload = copy.deepcopy(value.payload) ## Update the payload with kwargs: payload.update(kwargs.copy()) ## Create the new value: value = Value(value=value.value, status=status or value.status, message=message or value.message, **payload) else: value = Value(value=value, status=status or Value.Status.Success, message=message, **kwargs) ## Save the slot: self._values[name] = value ## Done, return the value set: return value def delval(self, name): """ Deletes a stored value. :param name: The name of the value. """ if name in self._values: del self._values[name] def allvals(self): """ Returns all the value slots. :return: A dictionary of all computed value slots. """ return {field: self.getval(field) for field in self._fields} def val_none(self, name): """ Indicates if the value is None. :param name: The name of the value slot. :return: Boolean indicating if the value is None. """ return self.getval(name).value is None def val_blank(self, name): """ Indicates if the value is blank. :param name: The name of the value slot. :return: Boolean indicating if the value is blank. """ return self.getval(name).value == "" def val_some(self, name): """ Indicates if the value is something other than None or blank. :param name: The name of the value slot. :return: Boolean indicating if the value is something other than None or blank. """ return not self.val_none(name) and not self.val_blank(name) def val_success(self, name): """ Indicates if the value is success. :param name: The name of the value slot. :return: Boolean indicating if the value is success. """ return self.getval(name).status == Value.Status.Success def val_warning(self, name): """ Indicates if the value is warning. :param name: The name of the value slot. :return: Boolean indicating if the value is warning. """ return self.getval(name).status == Value.Status.Warning def val_error(self, name): """ Indicates if the value is error. :param name: The name of the value slot. :return: Boolean indicating if the value is error. """ return self.getval(name).status == Value.Status.Error def as_dict(self, detailed=False): """ Provides a JSON representation of the record instance. :param detailed: Indicates if we need detailed result, ie. with status and message for each field. :return: A JSON representation of the record instance. """ ## We have the fields and values saved in the `_fields` and `_values` attributes respectively. We will ## simply iterate over these fields and their respective values. ## ## Let's start with defining the data dictionary: retval = OrderedDict([]) ## Iterate over fields and get their values: for key in sorted(self._fields): ## Add the field to return value: retval[key] = getattr(self, key, None) ## If detailed, override with real Value instance: if detailed: ## Get the value: value = self._values.get(key, None) ## Add the value: retval[key] = OrderedDict([("value", str(value.value)), ("status", value.status), ("message", value.message)]) ## Done, return the value: return retval @classmethod def new(cls, record, **kwargs): """ Creates a new record from the provided record or dictionary and overriding values from the provided additional named arguments. :param record: The record or dictionary to be copied from. :param kwargs: Named arguments to override. :return: New record. """ ## First of all, get the record as value dictionary: base = copy.deepcopy(record.as_dict() if isinstance(record, Record) else record) ## Update the dictionary: base.update(kwargs) ## Done, create the new record and return: return cls(base)
bsd-2-clause
2,155,762,320,283,130,600
28.674893
120
0.576599
false
4.025764
false
false
false
CIECODE-Madrid/tipi-engine
stats/process_stats.py
1
6001
from tipi_data.models.stats import Stats from tipi_data.models.topic import Topic from tipi_data.models.initiative import Initiative class GenerateStats(object): def __init__(self): self.topics = Topic.objects() self.subtopics = self.topics.distinct('tags.subtopic') self.stats = Stats() def generate(self): Stats.objects().delete() self.overall() self.deputies_by_topics() self.deputies_by_subtopics() self.parliamentarygroups_by_topics() self.parliamentarygroups_by_subtopics() self.places_by_topics() self.places_by_subtopics() self.stats.save() def overall(self): self.stats['overall'] = { 'initiatives': Initiative.objects.count(), 'allinitiatives': Initiative.all.count(), 'topics': list(), 'subtopics': list() } pipeline = [ {'$match': {'topics': {'$exists': True, '$not': {'$size': 0}}}}, {'$unwind': '$topics'}, {'$group': {'_id': '$topics', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}} ] result = Initiative.objects().aggregate(*pipeline) for item in result: self.stats['overall']['topics'].append(item) for subtopic in self.subtopics: pipeline = [ {'$match': {'tags.subtopic': subtopic}}, {'$group': {'_id': subtopic, 'initiatives': {'$sum': 1}}} ] result = Initiative.objects().aggregate(*pipeline) if result._has_next(): self.stats['overall']['subtopics'].append(result.next()) self.stats['overall']['subtopics'].sort(key=lambda x: x['initiatives'], reverse=True) def deputies_by_topics(self): self.stats['deputiesByTopics'] = list() for topic in self.topics: pipeline = [ {'$match': {'topics': topic['name']}}, {'$unwind': '$author_deputies'}, {'$group': {'_id': '$author_deputies', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}}, {'$limit': 10} ] result = list(Initiative.objects().aggregate(*pipeline)) if len(result) > 0: self.stats['deputiesByTopics'].append({ '_id': topic['name'], 'deputies': result }) def parliamentarygroups_by_topics(self): self.stats['parliamentarygroupsByTopics'] = list() for topic in self.topics: pipeline = [ {'$match': {'topics': topic['name']}}, {'$unwind': '$author_parliamentarygroups'}, {'$group': {'_id': '$author_parliamentarygroups', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}} ] result = list(Initiative.objects().aggregate(*pipeline)) if len(result) > 0: self.stats['parliamentarygroupsByTopics'].append({ '_id': topic['name'], 'parliamentarygroups': result }) def places_by_topics(self): self.stats['placesByTopics'] = list() for topic in self.topics: pipeline = [ {'$match': {'topics': topic['name'], 'place': {'$not': {'$eq': ""}, '$exists': True}}}, {'$group': {'_id': '$place', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}}, {'$limit': 5} ] result = list(Initiative.objects().aggregate(*pipeline)) if len(result) > 0: self.stats['placesByTopics'].append({ '_id': topic['name'], 'places': result }) def deputies_by_subtopics(self): self.stats['deputiesBySubtopics'] = list() for subtopic in self.subtopics: pipeline = [ {'$match': { 'tags.subtopic': subtopic } }, {'$unwind': '$author_deputies'}, {'$group': {'_id': '$author_deputies', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}}, {'$limit': 10} ] result = list(Initiative.objects().aggregate(*pipeline)) if len(result) > 0: self.stats['deputiesBySubtopics'].append({ '_id': subtopic, 'deputies': result }) def parliamentarygroups_by_subtopics(self): self.stats['parliamentarygroupsBySubtopics'] = list() for subtopic in self.subtopics: pipeline = [ {'$match': { 'tags.subtopic': subtopic } }, {'$unwind': '$author_parliamentarygroups'}, {'$group': {'_id': '$author_parliamentarygroups', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}} ] result = list(Initiative.objects().aggregate(*pipeline)) if len(result) > 0: self.stats['parliamentarygroupsBySubtopics'].append({ '_id': subtopic, 'parliamentarygroups': result }) def places_by_subtopics(self): self.stats['placesBySubtopics'] = list() for subtopic in self.subtopics: pipeline = [ {'$match': { 'tags.subtopic': subtopic, 'place': {'$not': {'$eq': ""}, '$exists': True}}}, {'$group': {'_id': '$place', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}}, {'$limit': 5} ] result = list(Initiative.objects().aggregate(*pipeline)) if len(result) > 0: self.stats['placesBySubtopics'].append({ '_id': subtopic, 'places': result }) if __name__ == "__main__": GenerateStats().generate()
gpl-3.0
3,043,947,720,192,462,300
41.864286
130
0.481753
false
4.193571
false
false
false
mercycorps/tola-activity
htdocs/activitydb/migrations/0037_auto_20151028_1631.py
1
1470
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('activitydb', '0036_auto_20151028_1519'), ] operations = [ migrations.AlterField( model_name='siteprofile', name='avg_landholding_size', field=models.DecimalField(decimal_places=14, max_digits=25, blank=True, help_text='In hectares/jeribs', null=True, verbose_name='Average Landholding Size'), ), migrations.AlterField( model_name='siteprofile', name='populations_owning_land', field=models.IntegerField(help_text='(%)', null=True, verbose_name='Households Owning Land', blank=True), ), migrations.AlterField( model_name='siteprofile', name='literacy_rate', field=models.IntegerField(help_text='%', null=True, verbose_name='Literacy Rate (%)', blank=True), ), migrations.AlterField( model_name='siteprofile', name='literate_females', field=models.IntegerField(help_text='%', null=True, verbose_name='% of Literate Females', blank=True), ), migrations.AlterField( model_name='siteprofile', name='literate_males', field=models.IntegerField(help_text='%', null=True, verbose_name='% of Literate Males', blank=True), ), ]
gpl-2.0
-7,011,314,465,358,522,000
35.75
168
0.6
false
3.972973
false
false
false
srajag/contrail-controller
src/config/device-manager/device_manager/db.py
1
14129
# # Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. # """ This file contains implementation of data model for physical router configuration manager """ from vnc_api.common.exceptions import NoIdError from physical_router_config import PhysicalRouterConfig from sandesh.dm_introspect import ttypes as sandesh from cfgm_common.vnc_db import DBBase import copy class BgpRouterDM(DBBase): _dict = {} obj_type = 'bgp_router' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.bgp_routers = {} self.physical_router = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj['bgp_router_parameters'] self.update_single_ref('physical_router', obj) new_peers = {} for ref in obj.get('bgp_router_refs', []): new_peers[ref['uuid']] = ref['attr'] for peer_id in set(self.bgp_routers.keys()) - set(new_peers.keys()): peer = BgpRouterDM.get(peer_id) if self.uuid in peer.bgp_routers: del peer.bgp_routers[self.uuid] for peer_id, attrs in new_peers.items(): peer = BgpRouterDM.get(peer_id) if peer: peer.bgp_routers[self.uuid] = attrs self.bgp_routers = new_peers def sandesh_build(self): return sandesh.BgpRouter(name=self.name, uuid=self.uuid, peers=self.bgp_routers, physical_router=self.physical_router) @classmethod def sandesh_request(cls, req): # Return the list of BGP routers resp = sandesh.BgpRouterListResp(bgp_routers=[]) if req.name_or_uuid is None: for router in cls.values(): sandesh_router = router.sandesh_build() resp.bgp_routers.extend(sandesh_router) else: router = cls.find_by_name_or_uuid(req.name_or_uuid) if router: sandesh_router = router.sandesh_build() resp.bgp_routers.extend(sandesh_router) resp.response(req.context()) # end class BgpRouterDM class PhysicalRouterDM(DBBase): _dict = {} obj_type = 'physical_router' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_networks = set() self.bgp_router = None self.update(obj_dict) self.config_manager = PhysicalRouterConfig( self.management_ip, self.user_credentials, self._logger) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.management_ip = obj.get('physical_router_management_ip') self.vendor = obj.get('physical_router_vendor_name') self.user_credentials = obj.get('physical_router_user_credentials') self.update_single_ref('bgp_router', obj) self.update_multiple_refs('virtual_network', obj) self.physical_interfaces = set([pi['uuid'] for pi in obj.get('physical_interfaces', [])]) self.logical_interfaces = set([li['uuid'] for li in obj.get('logical_interfaces', [])]) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.config_manager.delete_bgp_config() obj.update_single_ref('bgp_router', {}) obj.update_multiple_refs('virtual_network', {}) del cls._dict[uuid] # end delete def push_config(self): self.config_manager.reset_bgp_config() bgp_router = BgpRouterDM.get(self.bgp_router) if bgp_router: for peer_uuid, params in bgp_router.bgp_routers.items(): peer = BgpRouterDM.get(peer_uuid) if peer is None: continue external = (bgp_router.params['autonomous_system'] == peer.params['autonomous_system']) self.config_manager.add_bgp_peer(peer.params['address'], params, external) self.config_manager.set_bgp_config(bgp_router.params) vn_dict = {} for vn_id in self.virtual_networks: vn_dict[vn_id] = [] li_set = self.logical_interfaces for pi_uuid in self.physical_interfaces: pi = PhysicalInterfaceDM.get(pi_uuid) if pi is None: continue li_set |= pi.logical_interfaces for li_uuid in li_set: li = LogicalInterfaceDM.get(li_uuid) if li is None: continue vmi_id = li.virtual_machine_interface vmi = VirtualMachineInterfaceDM.get(vmi_id) if vmi is None: continue vn_id = vmi.virtual_network if vn_id in vn_dict: vn_dict[vn_id].append(li.name) else: vn_dict[vn_id] = [li.name] for vn_id, interfaces in vn_dict.items(): vn_obj = VirtualNetworkDM.get(vn_id) if vn_obj is None: continue for ri_id in vn_obj.routing_instances: # Find the primary RI by matching the name ri_obj = RoutingInstanceDM.get(ri_id) if ri_obj is None: continue if ri_obj.fq_name[-1] == vn_obj.fq_name[-1]: vrf_name = ':'.join(vn_obj.fq_name) export_set = copy.copy(ri_obj.export_targets) import_set = copy.copy(ri_obj.import_targets) for ri2_id in ri_obj.routing_instances: ri2 = RoutingInstanceDM.get(ri2_id) if ri2 is None: continue import_set |= ri2.export_targets export_set |= ri2.import_targets self.config_manager.add_routing_instance(vrf_name, import_set, export_set, vn_obj.prefixes, vn_obj.gateways, vn_obj.router_external, interfaces, vn_obj.vxlan_vni) break self.config_manager.send_bgp_config() # end push_config # end PhysicalRouterDM class PhysicalInterfaceDM(DBBase): _dict = {} obj_type = 'physical_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) pr = PhysicalRouterDM.get(self.physical_router) if pr: pr.physical_interfaces.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.physical_router = self.get_parent_uuid(obj) self.logical_interfaces = set([li['uuid'] for li in obj.get('logical_interfaces', [])]) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] pr = PhysicalRouterDM.get(obj.physical_router) if pr: pr.physical_interfaces.discard(obj.uuid) del cls._dict[uuid] # end delete # end PhysicalInterfaceDM class LogicalInterfaceDM(DBBase): _dict = {} obj_type = 'logical_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interface = None self.update(obj_dict) if self.physical_interface: parent = PhysicalInterfaceDM.get(self.physical_interface) elif self.physical_router: parent = PhysicalRouterDM.get(self.physical_router) if parent: parent.logical_interfaces.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) if obj['parent_type'] == 'physical-router': self.physical_router = self.get_parent_uuid(obj) self.physical_interface = None else: self.physical_interface = self.get_parent_uuid(obj) self.physical_router = None self.update_single_ref('virtual_machine_interface', obj) self.name = obj['fq_name'][-1] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] if obj.physical_interface: parent = PhysicalInterfaceDM.get(obj.physical_interface) elif obj.physical_router: parent = PhysicalInterfaceDM.get(obj.physical_router) if parent: parent.logical_interfaces.discard(obj.uuid) obj.update_single_ref('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end LogicalInterfaceDM class VirtualMachineInterfaceDM(DBBase): _dict = {} obj_type = 'virtual_machine_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_network = None self.logical_interface = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.update_single_ref('logical_interface', obj) self.update_single_ref('virtual_network', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('logical_interface', {}) obj.update_single_ref('virtual_network', {}) del cls._dict[uuid] # end delete # end VirtualMachineInterfaceDM class VirtualNetworkDM(DBBase): _dict = {} obj_type = 'virtual_network' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.physical_routers = set() self.router_external = False self.vxlan_configured = False self.vxlan_vni = None self.gateways = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.update_multiple_refs('physical_router', obj) self.fq_name = obj['fq_name'] try: self.router_external = obj['router_external'] except KeyError: self.router_external = False try: prop = obj['virtual_network_properties'] if prop['vxlan_network_identifier'] is not None: self.vxlan_configured = True self.vxlan_vni = prop['vxlan_network_identifier'] except KeyError: self.vxlan_configured = False self.vxlan_vni = None self.routing_instances = set([ri['uuid'] for ri in obj.get('routing_instances', [])]) self.virtual_machine_interfaces = set( [vmi['uuid'] for vmi in obj.get('virtual_machine_interface_back_refs', [])]) self.prefixes = set() self.gateways = set() for ipam_ref in obj.get('network_ipam_refs', []): for subnet in ipam_ref['attr'].get('ipam_subnets', []): self.prefixes.add('%s/%d' % (subnet['subnet']['ip_prefix'], subnet['subnet']['ip_prefix_len']) ) self.gateways.add(subnet['default_gateway']) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('physical_router', {}) del cls._dict[uuid] # end delete # end VirtualNetworkDM class RoutingInstanceDM(DBBase): _dict = {} obj_type = 'routing_instance' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_network = None self.import_targets = set() self.export_targets = set() self.routing_instances = set() self.update(obj_dict) vn = VirtualNetworkDM.get(self.virtual_network) if vn: vn.routing_instances.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.fq_name = obj['fq_name'] self.virtual_network = self.get_parent_uuid(obj) self.import_targets = set() self.export_targets = set() for rt_ref in obj.get('route_target_refs', []): rt_name = rt_ref['to'][0] exim = rt_ref.get('attr').get('import_export') if exim == 'export': self.export_targets.add(rt_name) elif exim == 'import': self.import_targets.add(rt_name) else: self.import_targets.add(rt_name) self.export_targets.add(rt_name) self.update_multiple_refs('routing_instance', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] vn = VirtualNetworkDM.get(obj.virtual_network) if vn: vn.routing_instances.discard(obj.uuid) del cls._dict[uuid] # end delete # end RoutingInstanceDM DBBase._OBJ_TYPE_MAP = { 'bgp_router': BgpRouterDM, 'physical_router': PhysicalRouterDM, 'physical_interface': PhysicalInterfaceDM, 'logical_interface': LogicalInterfaceDM, 'virtual_machine_interface': VirtualMachineInterfaceDM, 'virtual_network': VirtualNetworkDM, 'routing_instance': RoutingInstanceDM, }
apache-2.0
-7,415,153,349,394,149,000
33.972772
84
0.544978
false
3.946648
true
false
false
globocom/database-as-a-service
dbaas/drivers/tests/test_driver_pymongo.py
1
9370
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import os from mock import patch, MagicMock from drivers import DriverFactory from physical.tests import factory as factory_physical from logical.tests import factory as factory_logical from logical.models import Database from drivers.mongodb import MongoDB, MongoDBReplicaSet from drivers.tests.base import (BaseMongoDriverTestCase, FakeDriverClient, BaseSingleInstanceUpdateSizesTest, BaseHAInstanceUpdateSizesTest) from physical.models import Instance @patch('drivers.mongodb.MongoDB.pymongo', new=FakeDriverClient) @patch('physical.models.DiskOffering.size_bytes', new=MagicMock(return_value=90)) class MongoSingleUpdateSizesTestCase( BaseSingleInstanceUpdateSizesTest, BaseMongoDriverTestCase): pass @patch('drivers.mongodb.MongoDB.pymongo', new=FakeDriverClient) @patch('physical.models.DiskOffering.size_bytes', new=MagicMock(return_value=90)) class MongoReplicaSetUpdateSizesTestCase( BaseMongoDriverTestCase, BaseHAInstanceUpdateSizesTest): driver_class = MongoDBReplicaSet secondary_instance_quantity = 2 secondary_instance_type = Instance.MONGODB_ARBITER class MongoUsedAndTotalTestCase(BaseMongoDriverTestCase): """ Tests Mongo total and used """ def test_masters_single_instance(self): """ Test validates return total and used size when has single instance """ self.instance.total_size_in_bytes = 105 self.instance.used_size_in_bytes = 55 self.instance.save() self.assertEqual(self.driver.masters_total_size_in_bytes, 105) expected_total_size_in_gb = 105 * self.GB_FACTOR self.assertEqual( self.driver.get_master_instance_total_size_in_gb(), expected_total_size_in_gb ) self.assertEqual(self.driver.masters_used_size_in_bytes, 55) def test_masters_replicaset_instance(self): """ Test validates return total and used size when has single instance """ self.driver = MongoDBReplicaSet(databaseinfra=self.databaseinfra) self.driver.check_instance_is_master = MagicMock( side_effect=self.instance_helper.check_instance_is_master ) self.instance_helper.create_instances_by_quant( infra=self.databaseinfra, base_address='131', instance_type=self.instance_type, total_size_in_bytes=35, used_size_in_bytes=10 ) self.instance.total_size_in_bytes = 35 self.instance.used_size_in_bytes = 10 self.instance.save() self.assertEqual(self.driver.masters_total_size_in_bytes, 35) expected_total_size_in_gb = 35 * self.GB_FACTOR self.assertEqual( self.driver.get_master_instance_total_size_in_gb(), expected_total_size_in_gb ) self.assertEqual(self.driver.masters_used_size_in_bytes, 10) class MongoDBEngineTestCase(BaseMongoDriverTestCase): """ Tests MongoDB Engine """ def test_mongodb_app_installed(self): self.assertTrue(DriverFactory.is_driver_available("mongodb_single")) self.assertTrue( DriverFactory.is_driver_available("mongodb_replica_set") ) # test mongo methods def test_instantiate_mongodb_using_engine_factory(self): self.assertEqual(MongoDB, type(self.driver)) self.assertEqual(self.databaseinfra, self.driver.databaseinfra) def test_connection_string(self): self.assertEqual( "mongodb://<user>:<password>@{}".format( self.instance_endpoint), self.driver.get_connection() ) def test_get_user(self): self.assertEqual(self.databaseinfra.user, self.driver.get_user()) def test_get_password(self): self.assertEqual( self.databaseinfra.password, self.driver.get_password()) def test_get_default_port(self): self.assertEqual(27017, self.driver.default_port) @patch.object(MongoDB, 'get_replica_name') def test_connection_string_when_in_replica_set(self, get_replica_name): self.instance = factory_physical.InstanceFactory( databaseinfra=self.databaseinfra, address='127.0.0.2', port=27018) get_replica_name.return_value = 'my_repl' expected_conn = ("mongodb://<user>:<password>" "@{},127.0.0.2:27018" "?replicaSet=my_repl").format(self.instance_endpoint) self.assertEqual(expected_conn, self.driver.get_connection()) def test_connection_with_database(self): self.database = factory_logical.DatabaseFactory( name="my_db_url_name", databaseinfra=self.databaseinfra) expected_conn = ("mongodb://<user>:<password>" "@{}/my_db_url_name").format(self.instance_endpoint) self.assertEqual( expected_conn, self.driver.get_connection(database=self.database) ) @patch.object(MongoDB, 'get_replica_name') def test_connection_with_database_and_replica(self, get_replica_name): self.instance = factory_physical.InstanceFactory( databaseinfra=self.databaseinfra, address='127.0.0.2', port=27018) get_replica_name.return_value = 'my_repl' self.database = factory_logical.DatabaseFactory( name="my_db_url_name", databaseinfra=self.databaseinfra) expected_conn = ("mongodb://<user>:<password>" "@{},127.0.0.2:27018/my_db_url_name" "?replicaSet=my_repl").format(self.instance_endpoint) self.assertEqual( expected_conn, self.driver.get_connection(database=self.database) ) class ManageDatabaseMongoDBTestCase(BaseMongoDriverTestCase): """ Test case to managing database in mongodb engine """ def setUp(self): super(ManageDatabaseMongoDBTestCase, self).setUp() self.database = factory_logical.DatabaseFactory( databaseinfra=self.databaseinfra) self.instance.address = os.getenv('TESTS_MONGODB_HOST', '127.0.0.1') self.instance.save() # ensure database is dropped self.driver_client.drop_database(self.database.name) def tearDown(self): if not Database.objects.filter(databaseinfra_id=self.databaseinfra.id): self.database.delete() super(ManageDatabaseMongoDBTestCase, self).tearDown() def test_mongodb_create_database(self): self.assertFalse( self.database.name in self.driver_client.database_names()) self.driver.create_database(self.database) self.assertTrue( self.database.name in self.driver_client.database_names()) def test_mongodb_remove_database(self): self.driver.create_database(self.database) self.assertTrue( self.database.name in self.driver_client.database_names()) self.driver.remove_database(self.database) self.assertFalse( self.database.name in self.driver_client.database_names()) class ManageCredentialsMongoDBTestCase(BaseMongoDriverTestCase): """ Test case to managing credentials in mongodb engine """ def setUp(self): super(ManageCredentialsMongoDBTestCase, self).setUp() self.database = factory_logical.DatabaseFactory( databaseinfra=self.databaseinfra) self.credential = factory_logical.CredentialFactory( database=self.database) self.instance.address = os.getenv('TESTS_MONGODB_HOST', '127.0.0.1') # self.instance.address = '127.0.0.1' self.instance.save() self.driver.create_database(self.database) def tearDown(self): self.driver.remove_database(self.database) self.credential.delete() self.database.delete() super(ManageCredentialsMongoDBTestCase, self).tearDown() def __find_user__(self, credential): v = self.driver_client.server_info()['version'] if v < '2.6': return getattr( self.driver_client, credential.database.name ).system.users.find_one({"user": credential.user}) else: return getattr( self.driver_client, "admin" ).system.users.find_one( {"user": credential.user, "db": credential.database.name} ) def test_mongodb_create_credential(self): self.assertIsNone( self.__find_user__(self.credential), "User %s already exists. Invalid test" % self.credential ) self.driver.create_user(self.credential) user = self.__find_user__(self.credential) self.assertIsNotNone(user) self.assertEquals(self.credential.user, user['user']) self.driver.remove_user(self.credential) def test_mongodb_remove_credential(self): self.driver.create_user(self.credential) self.assertIsNotNone( self.__find_user__(self.credential), "Error creating user %s. Invalid test" % self.credential ) self.driver.remove_user(self.credential) self.assertIsNone(self.__find_user__(self.credential))
bsd-3-clause
-8,187,081,293,294,255,000
36.48
79
0.649733
false
4.056277
true
false
false
Dzess/ALFIRT
alfirt.runner/src/readers/tests/TagReaderX3DUnitTests.py
1
4002
''' Created on Jun 9, 2011 @author: Piotr ''' import unittest import os from readers.TagReaderX3D import TagReaderX3D class TagReaderX3DUnitTests(unittest.TestCase): def setUp(self): # Setting up the X3D string with ALFIRT namespace tags x3dString = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE X3D PUBLIC "ISO//Web3D//DTD X3D 3.2//EN" "http://www.web3d.org/specifications/x3d-3.2.dtd"> <X3D profile="Interchange" version="3.2" xmlns:xsd="http://www.w3.org/2001/XMLSchema-instance" xmlns:alfirt="ALFIRT" xsd:noNamespaceSchemaLocation=" http://www.web3d.org/specifications/x3d-3.2.xsd "> <Scene> <Viewpoint description='Rear View' orientation='0 1 0 3.14159' position='0 0 -10'/> <Shape alfirt:anchor_translate="0 1 2" alfirt:anchor_rotate="0.4 0.2 0.3"> <IndexedFaceSet coordIndex="0 1 2"> <Coordinate point="0 0 0 1 0 0 0.5 1 0"/> </IndexedFaceSet> </Shape> </Scene> </X3D> """ # Creating file self.fileName = "test_file_name" with open(self.fileName, 'w') as fileStream: fileStream.write(x3dString) fileStream.close() def tearDown(self): # Removing file after test os.remove(self.fileName) def test_reading_none_results_in_exception(self): x3dReader = TagReaderX3D() with self.assertRaises(ValueError): x3dReader.readScene(None) with self.assertRaises(ValueError): x3dReader.readScene("some no existing file") def test_reading_file_with_no_anchor_results_in_exception(self): ''' The anchor is required for the polar transformations around the object. ''' # Setting up the X3D string with ALFIRT namespace tags x3dString = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE X3D PUBLIC "ISO//Web3D//DTD X3D 3.2//EN" "http://www.web3d.org/specifications/x3d-3.2.dtd"> <X3D profile="Interchange" version="3.2" xmlns:xsd="http://www.w3.org/2001/XMLSchema-instance" xmlns:alfirt="ALFIRT" xsd:noNamespaceSchemaLocation=" http://www.web3d.org/specifications/x3d-3.2.xsd "> <Scene> <Viewpoint description='Rear View' orientation='0 1 0 3.14159' position='0 0 -10'/> <Shape> <IndexedFaceSet coordIndex="0 1 2"> <Coordinate point="0 0 0 1 0 0 0.5 1 0"/> </IndexedFaceSet> </Shape> </Scene> </X3D> """ # Write this file into the data fileName = "test_file_without_anchor" with open(fileName, 'w') as fileStream: fileStream.write(x3dString) fileStream.close() # Get reader x3dReader = TagReaderX3D() try: x3dReader.readScene(fileName) except RuntimeError: return finally: os.remove(fileName) self.fail("The exception should have been thrown") def test_reading_file_with_alfirt_tags(self): ''' Checks if the elements passed in X3D string are correct. ''' x3dReader = TagReaderX3D() results = x3dReader.readScene(self.fileName) # assert the values translateCamera = results.camera.translate rotateCamera = results.camera.rotate translateAnchor = results.anchor.translate rotateAnchor = results.anchor.rotate self.assertEqual(translateAnchor, [0.0, 1.0, 2.0], 'Translate of the anchor should be 0 1 2') self.assertEqual(rotateAnchor , [0.4, 0.2, 0.3 ], "Rotate of the anchor should be 0.4, 0.2 0.3") self.assertEqual(translateCamera, [0.0, -10, 0], "The position of the camera should be 0 0 -10") self.assertEqual(rotateCamera, [1.5707963705062866, 1.7340079025429667e-13, 3.1415903568267822], "The rotation of the camera should be 0 1 0 3.14") #=============================================================================== # Test runner #=============================================================================== if (__name__ == 'main'): unittest.main(verbosity=2)
mit
5,748,462,383,747,537,000
32.07438
155
0.615942
false
3.417592
true
false
false
Russell-IO/ansible
lib/ansible/plugins/action/gather_facts.py
1
1434
# Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type from collections import MutableMapping from ansible import constants as C from ansible.plugins.action import ActionBase class ActionModule(ActionBase): def run(self, tmp=None, task_vars=None): ''' handler for package operations ''' self._supports_check_mode = True self._supports_async = True result = super(ActionModule, self).run(tmp, task_vars) result['ansible_facts'] = {} for fact_module in C.config.get_config_value('FACTS_MODULES', variables=task_vars): mod_args = task_vars.get('ansible_facts_modules', {}).get(fact_module, {}) if isinstance(mod_args, MutableMapping): mod_args.update(self._task.args.copy()) else: mod_args = self._task.args.copy() if fact_module != 'setup': del mod_args['gather_subset'] self._display.vvvv("Running %s" % fact_module) result.update(self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=self._task.async_val)) # tell executor facts were gathered result['ansible_facts']['_ansible_facts_gathered'] = True return result
gpl-3.0
-5,338,959,078,966,732,000
33.97561
148
0.640167
false
3.875676
false
false
false
SCPR/firetracker
calfire_tracker/utilities.py
1
3930
from django.conf import settings from django.db import models from django.utils.encoding import smart_str from django.utils import timezone from django.template.defaultfilters import slugify from geopy import geocoders import pytz import time import datetime import requests import logging logger = logging.getLogger("firetracker") def search_assethost_for_image(kpcc_image_token, **kwargs): ''' model save function to query kpcc image api given an asset_host_id ''' if kwargs['image_id'] is not None: url_prefix = 'https://a.scpr.org/api/assets/' url_suffix = '.json?auth_token=' search_url = '%s%s%s%s' % (url_prefix, kwargs['image_id'], url_suffix, kpcc_image_token) kpcc_query_api = requests.get(search_url, verify=False, headers={"From": "[email protected]","User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.53 Safari/525.19"}) kpcc_image_asset = kpcc_query_api.json() try: kpcc_image_data = {'asset_url_link': kpcc_image_asset['urls']['full'], 'asset_photo_credit': kpcc_image_asset['owner'], 'asset_host_image_id': kwargs['image_id']} except: kpcc_image_data = {'asset_url_link': None, 'asset_photo_credit': None, 'asset_host_image_id': None} else: kpcc_image_data = {'asset_url_link': None, 'asset_photo_credit': None, 'asset_host_image_id': None} return kpcc_image_data def fill_air_quality_data(location_latitude, location_longitude): try: air_quality_url = 'http://www.airnowapi.org/aq/observation/latLong/current/?format=application/json&latitude=%s&longitude=%s&distance=30&API_KEY=AABE5F75-6C5A-47C2-AB74-2D138C9055B2' % (location_latitude, location_longitude) air_quality_query = requests.get(air_quality_url, headers= {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.53 Safari/525.19"}) air_quality_json = air_quality_query.json() if len(air_quality_json) == 0: air_quality_rating = None air_quality_parameter = None elif len(air_quality_json) >= 1: for data in air_quality_json: if data["ParameterName"] == "PM2.5": air_quality_rating = data["AQI"] air_quality_parameter = "Fine particles (PM2.5)" elif data["ParameterName"] == "O3": air_quality_rating = data["AQI"] air_quality_parameter = "Ozone (O3)" else: air_quality_rating = None air_quality_parameter = None else: air_quality_rating = None air_quality_parameter = None except: air_quality_rating = None air_quality_parameter = None print "exception for %s, %s\n" % (location_latitude, location_longitude) return {"air_quality_rating": air_quality_rating, "air_quality_parameter": air_quality_parameter} def fill_geocode_data(computed_location): if computed_location is not None: try: g = geocoders.GoogleV3() address = smart_str(computed_location) computed_location, (location_latitude, location_longitude) = g.geocode(address) geolocation_data = { 'computed_location': str(computed_location), 'location_latitude': location_latitude, 'location_longitude': location_longitude, 'location_geocode_error': False, } except (UnboundLocalError, ValueError,geocoders.google.GQueryError): geolocation_data = { 'computed_location': str(computed_location), 'location_latitude': None, 'location_longitude': None, 'location_geocode_error': True, } return geolocation_data
gpl-2.0
-4,657,212,522,448,050,000
48.125
236
0.622901
false
3.575978
false
false
false
tinyms/ArchiveX
tinyms/controller/setting.py
1
3046
__author__ = 'tinyms' #coding=UTF8 from sqlalchemy import func from tinyms.core.common import Utils from tinyms.core.web import IAuthRequest from tinyms.core.entity import Account from tinyms.core.orm import SessionFactory from tinyms.core.annotation import ObjectPool, route, setting, api from tinyms.core.setting import UserSettingHelper, AppSettingHelper @route("/workbench/setting") class SettingPage(IAuthRequest): def get(self, *args, **kwargs): return self.render("workbench/setting.html", items=ObjectPool.setting) @api("tinyms.core.setting") class SettingApi(): def load(self): usr = self.request.current_user level_u = UserSettingHelper(usr) level_u_ = level_u.load() level_s = AppSettingHelper.load() level_all = dict(level_u_, **level_s) return level_all def save(self): kv = self.request.wrap_params_to_dict() level_user = dict() level_system = dict() for k in kv: if k.startswith("u_"): level_user[k] = kv[k] elif k.startswith("s_"): level_system[k] = kv[k] AppSettingHelper.set(level_system) u = UserSettingHelper("%s" % self.request.current_user) u.set(level_user) #允许用户在设置保存之后再做其它数据变更 items = ObjectPool.setting for k in items.keys(): obj = items[k].cls() if hasattr(obj, "save"): msg = obj.save(kv, self.request) if msg: return msg AppSettingHelper.reload() return "success" @setting("tinyms_core_setting_sys", "workbench/sys_setting_page.html", "基本", "tinyms.entity.setting.system") class SystemSetting(): def save(self, kv, http_req): return "" def form_submit_javascript(self, http_req): pass def form_fill_javascript(self, http_req): pass @setting("tinyms_core_setting_user", "workbench/user_setting_page.html", "个人", "tinyms.entity.setting.user") class SystemSetting(): def save(self, kv, http_req): _usr_old_pwd = kv.get("_usr_old_pwd") _usr_new_pwd = kv.get("_usr_new_pwd") _usr_new_repwd = kv.get("_usr_new_repwd") if _usr_old_pwd and _usr_new_pwd: if _usr_new_pwd == _usr_new_repwd: usr_id = http_req.current_user sf = SessionFactory.new() num = sf.query(func.count(Account.id)).filter(Account.id == usr_id) \ .filter(Account.login_pwd == Utils.md5(_usr_old_pwd)).scalar() if num > 0: a = sf.query(Account).get(usr_id) a.login_pwd = Utils.md5(_usr_new_pwd) sf.commit() return "" else: return "PasswordError" else: return "PasswordNotSame" def form_submit_javascript(self, req): pass def form_fill_javascript(self, req): pass
bsd-3-clause
-2,948,846,204,067,711,000
30.925532
108
0.576
false
3.516999
false
false
false
jcnix/shade
social/auth.py
1
2295
from django.contrib import auth from django.contrib.auth.models import User from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render, render_to_response from django.template import RequestContext import forms as myforms def login(request): if not request.user.is_authenticated(): form = myforms.LoginForm() if request.method == 'POST': form = myforms.LoginForm(request.POST) if form.is_valid(): e = form.cleaned_data['email'] p = form.cleaned_data['password'] user = auth.authenticate(username=e, password=p) if user is not None: auth.login(request, user) return HttpResponseRedirect('/dashboard/') else: form._errors['email'] = [u'Unable to authenticate'] return render(request, 'registration/login.html', {'form': form}) return render(request, 'registration/login.html', {'form': form}) else: return HttpResponseRedirect('/') def logout(request): auth.logout(request) return HttpResponseRedirect('/login/') def register(request): if not request.user.is_authenticated(): if request.method == 'POST': form = myforms.RegisterForm(request.POST) if form.is_valid(): e = form.cleaned_data['email'] p = form.cleaned_data['password'] fn = form.cleaned_data['first_name'] ln = form.cleaned_data['last_name'] user = User.objects.create_user( username=e, email=e, password=p ) user.first_name = fn user.last_name= ln user.save() return HttpResponseRedirect('/') else: return render_to_response('register.html', {'form': form}, context_instance=RequestContext(request)) else: form = myforms.RegisterForm() return render_to_response('register.html', {'form': form}, context_instance=RequestContext(request)) else: return HttpResponseRedirect('/')
gpl-3.0
-197,508,900,974,582,820
37.898305
85
0.554684
false
4.862288
false
false
false
pierrelux/mathnotes
mathnotes/views/auth.py
1
1936
from flask_oauthlib.client import OAuth from mathnotes.models import db, ZoteroAuthorization from flask import Flask, redirect, url_for, render_template, jsonify, Response, current_app, Blueprint, request from flask.ext.login import login_required, current_user oauth = OAuth() auth = Blueprint('auth', __name__, url_prefix='/auth') zotero = oauth.remote_app( 'zotero', base_url='https://api.zotero.org', request_token_url='https://www.zotero.org/oauth/request', access_token_url='https://www.zotero.org/oauth/access', authorize_url='https://www.zotero.org/oauth/authorize', app_key='ZOTERO' ) @zotero.tokengetter def get_zotero_token(): auth=current_user.authorizations.first() if auth is not None: return auth.oauth_token, auth.oauth_secret return None @auth.route('/oauth/zotero') @login_required def zotero_auth(): callback_url = url_for('auth.zotero_authorized', next=request.args.get('next')) return zotero.authorize(callback=callback_url or request.referrer or None) @auth.route('/oauth/zotero/authorized') @login_required @zotero.authorized_handler def zotero_authorized(resp): if resp is not None: auth = ZoteroAuthorization(oauth_token=resp['oauth_token'], oauth_secret=resp['oauth_token_secret'], userID=resp['userID'], username=resp['username'], user_id=current_user.id) db.session.add(auth) db.session.commit() else: flash("Remote authentication to Zotero failed") return redirect(request.args.get("next") or url_for("frontend.index")) @auth.route('/oauth/zotero/disconnect') @login_required def zotero_disconnect(): auth=current_user.authorizations.first() db.session.delete(auth) db.session.commit() return redirect(request.args.get("next") or url_for("frontend.index"))
bsd-3-clause
237,288,060,501,950,180
32.964912
111
0.668388
false
3.482014
false
false
false
malaterre/dicom-private-dicts
re/pms/dump1.py
1
3032
#!/usr/bin/env python """ dump 1 """ import sys, json from struct import * array=[] def doit(f): chunk = f.read(0x2) l0 = unpack('>H', chunk) assert l0[0] == 50 chunk = f.read(l0[0]) s = unpack('>%ds' % l0[0], chunk) chunk = f.read(0x1) l2 = unpack('>B', chunk) #assert l2[0] == 0 chunk = f.read(0x2) l1 = unpack('>H', chunk) #print l1[0] # wotsit ? #print l0[0],s[0].decode('utf-16'),l1[0],l2[0] #print l0[0],s[0].decode('utf-16'),l1[0]+l2[0] #print s[0].decode('utf-16'),l1[0] el = {} el['name'] = s[0].decode('utf-16') el['index'] = l1[0]+l2[0] array.append( el ) def extract_name(i,f): chunk = f.read(0x1) o = unpack('>B', chunk) assert o[0] == 1 chunk = f.read(0x1) l0 = unpack('>B', chunk) chunk = f.read(l0[0]) s = unpack('>%ds' % l0[0], chunk) #print s[0] array[i]['value']=s[0] array[i]['len']=l0[0] def isnull(instr): for c in instr: assert ord(c) == 0 def extract_dad_file(i,f): print f.tell() corr = 1 # old (orig file) corr = 0 # already aligned ??? assert (f.tell() - corr) % 8 == 0 # 8bytes alignement # read length: chunk = f.read(0x4) z = unpack('<I', chunk) fl = z[0] chunk = f.read(fl) with open("output_%03d.dad" % i, "wb") as binfile: binfile.write( chunk ) # trailing stuff handling: pad = (f.tell() - corr) % 8 if pad != 0: chunk = f.read(8 - pad) isnull(chunk) # no digital trash, must be an in-memory representation # the intersting stuff lie in: # $ dd if=PmsDView.DMP of=dummy2.exe skip=104921721 count=1802240 bs=1 # as a side note we also have: # $ dd if=PmsDView.DMP of=dummy3.exe skip=106723961 count=1802240 bs=1 # $ md5sum dummy2.exe dummy3.exe # 6a58cd8dc039b2cfbeb4529b4fd13106 dummy2.exe # 6a58cd8dc039b2cfbeb4529b4fd13106 dummy3.exe if __name__ == "__main__": filename = sys.argv[1] with open(filename,'rb') as f: # MZ starts at 0x640FA79 #f.seek( 104932524 ) # 0x64124ac # orig file f.seek( 0x12F86F3 ) # new # file type 1: #print "start:", f.tell() chunk = f.read(0x2) d = unpack('>H', chunk) assert d[0] == 120 # number of elements (x2)? chunk = f.read(0x2) d = unpack('>H', chunk) print d # wotsit ? assert d[0] == 0x0f00 for i in range(0,60): doit(f) chunk = f.read(0x1) z = unpack('>B', chunk) assert z[0] == 0 #print (f.tell() - 1) % 4 for i in range(0,60): extract_name(i,f) #print "end:", f.tell() # file type dad/dotd: chunk = f.read(5) for i in range(0,153): # i > 153 is junk... extract_dad_file(i,f) print format(f.tell(), '08x') chunk = f.read(2000) # Some .NET stuff (BSJB) # The initials correspond to Brian Harry, Susan Radke-Sproull, Jason # Zander, and Bill Evans who were part of the team in 1998 that worked on # the CLR. with open("general_metadata_header.bin" , "wb") as binfile: binfile.write( chunk ) #print array #print json.dumps(array, sort_keys=True, indent=4)
bsd-3-clause
-5,247,748,825,108,255,000
26.315315
77
0.581464
false
2.539363
false
false
false
parksandwildlife/wastd
taxonomy/migrations/0003_auto_20181022_1156.py
1
3205
# Generated by Django 2.0.8 on 2018-10-22 03:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('taxonomy', '0002_auto_20180926_1147'), ] operations = [ migrations.AlterField( model_name='hbvfamily', name='class_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Class'), ), migrations.AlterField( model_name='hbvfamily', name='division_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Division'), ), migrations.AlterField( model_name='hbvfamily', name='family_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Family Name'), ), migrations.AlterField( model_name='hbvfamily', name='kingdom_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Kingdom'), ), migrations.AlterField( model_name='hbvfamily', name='order_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Order Name'), ), migrations.AlterField( model_name='hbvfamily', name='supra_code', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='HBV Suprafamily Group Code'), ), migrations.AlterField( model_name='hbvgroup', name='class_id', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='HBV Suprafamily Group Code'), ), migrations.AlterField( model_name='hbvparent', name='class_id', field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='WACensus ClassID'), ), migrations.AlterField( model_name='hbvspecies', name='consv_code', field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Conservation Code'), ), migrations.AlterField( model_name='hbvspecies', name='naturalised', field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Naturalised'), ), migrations.AlterField( model_name='hbvspecies', name='ranking', field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Ranking'), ), migrations.AlterField( model_name='hbvvernacular', name='name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Name'), ), migrations.AlterField( model_name='hbvvernacular', name='vernacular', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Vernacular Name'), ), ]
mit
3,855,156,913,284,721,700
40.089744
132
0.582527
false
3.991283
false
false
false
jamespcole/home-assistant
homeassistant/components/openuv/binary_sensor.py
1
3751
"""Support for OpenUV binary sensors.""" import logging from homeassistant.components.binary_sensor import BinarySensorDevice from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.util.dt import as_local, parse_datetime, utcnow from . import ( BINARY_SENSORS, DATA_OPENUV_CLIENT, DATA_PROTECTION_WINDOW, DOMAIN, TOPIC_UPDATE, TYPE_PROTECTION_WINDOW, OpenUvEntity) _LOGGER = logging.getLogger(__name__) ATTR_PROTECTION_WINDOW_ENDING_TIME = 'end_time' ATTR_PROTECTION_WINDOW_ENDING_UV = 'end_uv' ATTR_PROTECTION_WINDOW_STARTING_TIME = 'start_time' ATTR_PROTECTION_WINDOW_STARTING_UV = 'start_uv' DEPENDENCIES = ['openuv'] async def async_setup_platform( hass, config, async_add_entities, discovery_info=None): """Set up an OpenUV sensor based on existing config.""" pass async def async_setup_entry(hass, entry, async_add_entities): """Set up an OpenUV sensor based on a config entry.""" openuv = hass.data[DOMAIN][DATA_OPENUV_CLIENT][entry.entry_id] binary_sensors = [] for sensor_type in openuv.binary_sensor_conditions: name, icon = BINARY_SENSORS[sensor_type] binary_sensors.append( OpenUvBinarySensor( openuv, sensor_type, name, icon, entry.entry_id)) async_add_entities(binary_sensors, True) class OpenUvBinarySensor(OpenUvEntity, BinarySensorDevice): """Define a binary sensor for OpenUV.""" def __init__(self, openuv, sensor_type, name, icon, entry_id): """Initialize the sensor.""" super().__init__(openuv) self._async_unsub_dispatcher_connect = None self._entry_id = entry_id self._icon = icon self._latitude = openuv.client.latitude self._longitude = openuv.client.longitude self._name = name self._sensor_type = sensor_type self._state = None @property def icon(self): """Return the icon.""" return self._icon @property def is_on(self): """Return the status of the sensor.""" return self._state @property def should_poll(self): """Disable polling.""" return False @property def unique_id(self) -> str: """Return a unique, HASS-friendly identifier for this entity.""" return '{0}_{1}_{2}'.format( self._latitude, self._longitude, self._sensor_type) async def async_added_to_hass(self): """Register callbacks.""" @callback def update(): """Update the state.""" self.async_schedule_update_ha_state(True) self._async_unsub_dispatcher_connect = async_dispatcher_connect( self.hass, TOPIC_UPDATE, update) async def async_will_remove_from_hass(self): """Disconnect dispatcher listener when removed.""" if self._async_unsub_dispatcher_connect: self._async_unsub_dispatcher_connect() async def async_update(self): """Update the state.""" data = self.openuv.data[DATA_PROTECTION_WINDOW] if not data: return if self._sensor_type == TYPE_PROTECTION_WINDOW: self._state = parse_datetime( data['from_time']) <= utcnow() <= parse_datetime( data['to_time']) self._attrs.update({ ATTR_PROTECTION_WINDOW_ENDING_TIME: as_local(parse_datetime(data['to_time'])), ATTR_PROTECTION_WINDOW_ENDING_UV: data['to_uv'], ATTR_PROTECTION_WINDOW_STARTING_UV: data['from_uv'], ATTR_PROTECTION_WINDOW_STARTING_TIME: as_local(parse_datetime(data['from_time'])), })
apache-2.0
5,144,571,123,839,647,000
32.491071
72
0.625966
false
3.952582
false
false
false
gokmen/Rasta
rasta_lib/model.py
1
1631
#!/usr/bin/python # -*- coding: utf-8 -*- ''' Rasta RST Editor 2010 - Gökmen Göksel <gokmeng:gmail.com> ''' # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as Published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. from PyQt4.QtCore import Qt from PyQt4.QtCore import QVariant from PyQt4.QtCore import QAbstractTableModel # i18n Support import gettext _ = gettext.translation('rasta', fallback=True).ugettext class LogTableModel(QAbstractTableModel): ''' Log table model for showing the logs in a proper way ''' def __init__(self, logs, parent=None, *args): QAbstractTableModel.__init__(self, parent, *args) self.arraydata = logs self.headerdata = [_('Line'), _('Message')] def rowCount(self, parent): ''' Return number of logs ''' return len(self.arraydata) def columnCount(self, parent): ''' It always returns 2 for now: Line and Message ''' return len(self.headerdata) def data(self, index, role): ''' Return data for given index and role ''' if not index.isValid(): return QVariant() elif role != Qt.DisplayRole: return QVariant() return QVariant(self.arraydata[index.row()][index.column()]) def headerData(self, col, orientation, role): ''' Return Header data for given column ''' if orientation == Qt.Horizontal and role == Qt.DisplayRole: return QVariant(self.headerdata[col]) return QVariant()
gpl-2.0
9,176,064,816,522,145,000
32.9375
79
0.654389
false
4.022222
false
false
false
mdavoodi/konkourse-python
messages/views.py
1
2496
# Create your views here. from django.http import HttpResponse from django.template import loader from django.template.context import Context from django.template import RequestContext from django.shortcuts import redirect from django.shortcuts import render from conversation.models import ConvoWall, ConversationPost def messages(request): if request.user.is_authenticated(): first_name = request.user.first_name last_name = request.user.last_name username = request.user.username messageWall = request.user.get_profile().messages messages = ConversationPost.objects.filter(wall=request.user.get_profile().messages) variables_for_template = { 'first_name': first_name, 'last_name': last_name, 'username': username, 'messages': messages, 'messageWall': messageWall, } return render(request, 'website/messages.html', variables_for_template, context_instance=RequestContext(request)) def messages_compose(request): if request.user.is_authenticated(): first_name = request.user.first_name last_name = request.user.last_name username = request.user.username messageWall = request.user.get_profile().messages messages = ConversationPost.objects.filter(wall=request.user.get_profile().messages) variables_for_template = { 'first_name': first_name, 'last_name': last_name, 'username': username, 'messages': messages, 'messageWall': messageWall, } return render(request, 'website/messages_compose.html', variables_for_template, context_instance=RequestContext(request)) def messages_view(request): if request.user.is_authenticated(): first_name = request.user.first_name last_name = request.user.last_name username = request.user.username messageWall = request.user.get_profile().messages messages = ConversationPost.objects.filter(wall=request.user.get_profile().messages) variables_for_template = { 'first_name': first_name, 'last_name': last_name, 'username': username, 'messages': messages, 'messageWall': messageWall, } return render(request, 'website/messages_view.html', variables_for_template, context_instance=RequestContext(request))
mit
-7,466,661,102,449,948,000
37.4
92
0.648638
false
4.441281
false
false
false
jelly/calibre
src/calibre/gui2/tweak_book/ui.py
1
39312
#!/usr/bin/env python2 # vim:fileencoding=utf-8 from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' import os from functools import partial from itertools import product from future_builtins import map from PyQt5.Qt import ( QDockWidget, Qt, QLabel, QIcon, QAction, QApplication, QWidget, QEvent, QVBoxLayout, QStackedWidget, QTabWidget, QImage, QPixmap, pyqtSignal, QMenu, QHBoxLayout, QTimer, QUrl, QSize) from calibre import prints from calibre.constants import __appname__, get_version, isosx, DEBUG from calibre.gui2 import elided_text, open_url from calibre.gui2.dbus_export.widgets import factory from calibre.gui2.keyboard import Manager as KeyboardManager from calibre.gui2.main_window import MainWindow from calibre.gui2.throbber import ThrobbingButton from calibre.gui2.tweak_book import ( current_container, tprefs, actions, capitalize, toolbar_actions, editors, update_mark_text_action) from calibre.gui2.tweak_book.file_list import FileListWidget from calibre.gui2.tweak_book.job import BlockingJob from calibre.gui2.tweak_book.boss import Boss from calibre.gui2.tweak_book.undo import CheckpointView from calibre.gui2.tweak_book.preview import Preview from calibre.gui2.tweak_book.plugin import create_plugin_actions from calibre.gui2.tweak_book.search import SearchPanel from calibre.gui2.tweak_book.check import Check from calibre.gui2.tweak_book.check_links import CheckExternalLinks from calibre.gui2.tweak_book.spell import SpellCheck from calibre.gui2.tweak_book.search import SavedSearches from calibre.gui2.tweak_book.text_search import TextSearch from calibre.gui2.tweak_book.toc import TOCViewer from calibre.gui2.tweak_book.char_select import CharSelect from calibre.gui2.tweak_book.live_css import LiveCSS from calibre.gui2.tweak_book.reports import Reports from calibre.gui2.tweak_book.manage_fonts import ManageFonts from calibre.gui2.tweak_book.function_replace import DebugOutput from calibre.gui2.tweak_book.editor.widget import register_text_editor_actions from calibre.gui2.tweak_book.editor.insert_resource import InsertImage from calibre.utils.icu import character_name, sort_key from calibre.utils.localization import localize_user_manual_link def open_donate(): open_url(QUrl('https://calibre-ebook.com/donate')) class Central(QStackedWidget): # {{{ ' The central widget, hosts the editors ' current_editor_changed = pyqtSignal() close_requested = pyqtSignal(object) def __init__(self, parent=None): QStackedWidget.__init__(self, parent) self.welcome = w = QLabel('<p>'+_( 'Double click a file in the left panel to start editing' ' it.')) self.addWidget(w) w.setWordWrap(True) w.setAlignment(Qt.AlignTop | Qt.AlignHCenter) self.container = c = QWidget(self) self.addWidget(c) l = c.l = QVBoxLayout(c) c.setLayout(l) l.setContentsMargins(0, 0, 0, 0) self.editor_tabs = t = QTabWidget(c) l.addWidget(t) t.setDocumentMode(True) t.setTabsClosable(True) t.setMovable(True) pal = self.palette() if pal.color(pal.WindowText).lightness() > 128: i = QImage(I('modified.png')) i.invertPixels() self.modified_icon = QIcon(QPixmap.fromImage(i)) else: self.modified_icon = QIcon(I('modified.png')) self.editor_tabs.currentChanged.connect(self.current_editor_changed) self.editor_tabs.tabCloseRequested.connect(self._close_requested) self.search_panel = SearchPanel(self) l.addWidget(self.search_panel) self.restore_state() self.editor_tabs.tabBar().installEventFilter(self) def _close_requested(self, index): editor = self.editor_tabs.widget(index) self.close_requested.emit(editor) def add_editor(self, name, editor): fname = name.rpartition('/')[2] index = self.editor_tabs.addTab(editor, fname) self.editor_tabs.setTabToolTip(index, _('Full path:') + ' ' + name) editor.modification_state_changed.connect(self.editor_modified) @property def tab_order(self): ans = [] rmap = {v:k for k, v in editors.iteritems()} for i in xrange(self.editor_tabs.count()): name = rmap.get(self.editor_tabs.widget(i)) if name is not None: ans.append(name) return ans def rename_editor(self, editor, name): for i in xrange(self.editor_tabs.count()): if self.editor_tabs.widget(i) is editor: fname = name.rpartition('/')[2] self.editor_tabs.setTabText(i, fname) self.editor_tabs.setTabToolTip(i, _('Full path:') + ' ' + name) def show_editor(self, editor): self.setCurrentIndex(1) self.editor_tabs.setCurrentWidget(editor) def close_editor(self, editor): for i in xrange(self.editor_tabs.count()): if self.editor_tabs.widget(i) is editor: self.editor_tabs.removeTab(i) if self.editor_tabs.count() == 0: self.setCurrentIndex(0) return True return False def editor_modified(self, *args): tb = self.editor_tabs.tabBar() for i in xrange(self.editor_tabs.count()): editor = self.editor_tabs.widget(i) modified = getattr(editor, 'is_modified', False) tb.setTabIcon(i, self.modified_icon if modified else QIcon()) def close_current_editor(self): ed = self.current_editor if ed is not None: self.close_requested.emit(ed) def close_all_but_current_editor(self): self.close_all_but(self.current_editor) def close_all_but(self, ed): close = [] if ed is not None: for i in xrange(self.editor_tabs.count()): q = self.editor_tabs.widget(i) if q is not None and q is not ed: close.append(q) for q in close: self.close_requested.emit(q) @property def current_editor(self): return self.editor_tabs.currentWidget() def save_state(self): tprefs.set('search-panel-visible', self.search_panel.isVisible()) self.search_panel.save_state() for ed in editors.itervalues(): ed.save_state() if self.current_editor is not None: self.current_editor.save_state() # Ensure the current editor saves it state last def restore_state(self): self.search_panel.setVisible(tprefs.get('search-panel-visible', False)) self.search_panel.restore_state() def show_find(self): self.search_panel.show_panel() def pre_fill_search(self, text): self.search_panel.pre_fill(text) def eventFilter(self, obj, event): base = super(Central, self) if obj is not self.editor_tabs.tabBar() or event.type() != QEvent.MouseButtonPress or event.button() not in (Qt.RightButton, Qt.MidButton): return base.eventFilter(obj, event) index = self.editor_tabs.tabBar().tabAt(event.pos()) if index < 0: return base.eventFilter(obj, event) if event.button() == Qt.MidButton: self._close_requested(index) ed = self.editor_tabs.widget(index) if ed is not None: menu = QMenu(self) menu.addAction(actions['close-current-tab'].icon(), _('Close tab'), partial(self.close_requested.emit, ed)) menu.addSeparator() menu.addAction(actions['close-all-but-current-tab'].icon(), _('Close other tabs'), partial(self.close_all_but, ed)) menu.exec_(self.editor_tabs.tabBar().mapToGlobal(event.pos())) return True # }}} class CursorPositionWidget(QWidget): # {{{ def __init__(self, parent): QWidget.__init__(self, parent) self.l = QHBoxLayout(self) self.setLayout(self.l) self.la = QLabel('') self.l.addWidget(self.la) self.l.setContentsMargins(0, 0, 0, 0) f = self.la.font() f.setBold(False) self.la.setFont(f) def update_position(self, line=None, col=None, character=None): if line is None: self.la.setText('') else: try: name = character_name(character) if character and tprefs['editor_show_char_under_cursor'] else None except Exception: name = None text = _('Line: {0} : {1}').format(line, col) if not name: name = {'\t':'TAB'}.get(character, None) if name and tprefs['editor_show_char_under_cursor']: text = name + ' : ' + text self.la.setText(text) # }}} class Main(MainWindow): APP_NAME = _('Edit book') STATE_VERSION = 0 def __init__(self, opts, notify=None): MainWindow.__init__(self, opts, disable_automatic_gc=True) self.setWindowTitle(self.APP_NAME) self.boss = Boss(self, notify=notify) self.setWindowIcon(QIcon(I('tweak.png'))) self.opts = opts self.path_to_ebook = None self.container = None self.current_metadata = None self.blocking_job = BlockingJob(self) self.keyboard = KeyboardManager(self, config_name='shortcuts/tweak_book') self.central = Central(self) self.setCentralWidget(self.central) self.check_book = Check(self) self.spell_check = SpellCheck(parent=self) self.toc_view = TOCViewer(self) self.text_search = TextSearch(self) self.saved_searches = SavedSearches(self) self.image_browser = InsertImage(self, for_browsing=True) self.reports = Reports(self) self.check_external_links = CheckExternalLinks(self) self.insert_char = CharSelect(self) self.manage_fonts = ManageFonts(self) self.sr_debug_output = DebugOutput(self) self.create_actions() self.create_toolbars() self.create_docks() self.create_menubar() self.status_bar = self.statusBar() self.status_bar.addPermanentWidget(self.boss.save_manager.status_widget) self.cursor_position_widget = CursorPositionWidget(self) self.status_bar.addPermanentWidget(self.cursor_position_widget) self.status_bar_default_msg = la = QLabel(' ' + _('{0} {1} created by {2}').format(__appname__, get_version(), 'Kovid Goyal')) la.base_template = unicode(la.text()) self.status_bar.addWidget(la) f = self.status_bar.font() f.setBold(True) self.status_bar.setFont(f) self.boss(self) g = QApplication.instance().desktop().availableGeometry(self) self.resize(g.width()-50, g.height()-50) self.restore_state() self.apply_settings() def apply_settings(self): self.keyboard.finalize() self.setDockNestingEnabled(tprefs['nestable_dock_widgets']) for v, h in product(('top', 'bottom'), ('left', 'right')): p = 'dock_%s_%s' % (v, h) pref = tprefs[p] or tprefs.defaults[p] area = getattr(Qt, '%sDockWidgetArea' % capitalize({'vertical':h, 'horizontal':v}[pref])) self.setCorner(getattr(Qt, '%s%sCorner' % tuple(map(capitalize, (v, h)))), area) self.preview.apply_settings() self.live_css.apply_theme() for bar in (self.global_bar, self.tools_bar, self.plugins_bar): bar.setIconSize(QSize(tprefs['toolbar_icon_size'], tprefs['toolbar_icon_size'])) def show_status_message(self, msg, timeout=5): self.status_bar.showMessage(msg, int(timeout*1000)) def elided_text(self, text, width=300): return elided_text(text, font=self.font(), width=width) @property def editor_tabs(self): return self.central.editor_tabs def create_actions(self): group = _('Global actions') def reg(icon, text, target, sid, keys, description, toolbar_allowed=False): if not isinstance(icon, QIcon): icon = QIcon(I(icon)) ac = actions[sid] = QAction(icon, text, self) if icon else QAction(text, self) ac.setObjectName('action-' + sid) if toolbar_allowed: toolbar_actions[sid] = ac if target is not None: ac.triggered.connect(target) if isinstance(keys, type('')): keys = (keys,) self.keyboard.register_shortcut( sid, unicode(ac.text()).replace('&', ''), default_keys=keys, description=description, action=ac, group=group) self.addAction(ac) return ac def treg(icon, text, target, sid, keys, description): return reg(icon, text, target, sid, keys, description, toolbar_allowed=icon is not None) self.action_new_file = treg('document-new.png', _('&New file (images/fonts/HTML/etc.)'), self.boss.add_file, 'new-file', (), _('Create a new file in the current book')) self.action_import_files = treg('document-import.png', _('&Import files into book'), self.boss.add_files, 'new-files', (), _('Import files into book')) self.action_open_book = treg('document_open.png', _('&Open book'), self.boss.open_book, 'open-book', 'Ctrl+O', _('Open a new book')) self.action_open_book_folder = treg('mimetypes/dir.png', _('Open &folder (unzipped EPUB) as book'), partial(self.boss.open_book, open_folder=True), 'open-folder-as-book', (), _('Open a folder (unzipped EPUB) as a book')) # Qt does not generate shortcut overrides for cmd+arrow on os x which # means these shortcuts interfere with editing self.action_global_undo = treg('back.png', _('&Revert to before'), self.boss.do_global_undo, 'global-undo', () if isosx else 'Ctrl+Left', _('Revert book to before the last action (Undo)')) self.action_global_redo = treg('forward.png', _('&Revert to after'), self.boss.do_global_redo, 'global-redo', () if isosx else 'Ctrl+Right', _('Revert book state to after the next action (Redo)')) self.action_save = treg('save.png', _('&Save'), self.boss.save_book, 'save-book', 'Ctrl+S', _('Save book')) self.action_save.setEnabled(False) self.action_save_copy = treg('save.png', _('Save a &copy'), self.boss.save_copy, 'save-copy', 'Ctrl+Alt+S', _('Save a copy of the book')) self.action_quit = treg('window-close.png', _('&Quit'), self.boss.quit, 'quit', 'Ctrl+Q', _('Quit')) self.action_preferences = treg('config.png', _('&Preferences'), self.boss.preferences, 'preferences', 'Ctrl+P', _('Preferences')) self.action_new_book = treg('plus.png', _('Create &new, empty book'), self.boss.new_book, 'new-book', (), _('Create a new, empty book')) self.action_import_book = treg('add_book.png', _('&Import an HTML or DOCX file as a new book'), self.boss.import_book, 'import-book', (), _('Import an HTML or DOCX file as a new book')) self.action_quick_edit = treg('modified.png', _('&Quick open a file to edit'), self.boss.quick_open, 'quick-open', ('Ctrl+T'), _( 'Quickly open a file from the book to edit it')) # Editor actions group = _('Editor actions') self.action_editor_undo = reg('edit-undo.png', _('&Undo'), self.boss.do_editor_undo, 'editor-undo', 'Ctrl+Z', _('Undo typing')) self.action_editor_redo = reg('edit-redo.png', _('R&edo'), self.boss.do_editor_redo, 'editor-redo', 'Ctrl+Y', _('Redo typing')) self.action_editor_cut = reg('edit-cut.png', _('Cut &text'), self.boss.do_editor_cut, 'editor-cut', ('Ctrl+X', 'Shift+Delete', ), _('Cut text')) self.action_editor_copy = reg('edit-copy.png', _('&Copy to clipboard'), self.boss.do_editor_copy, 'editor-copy', ('Ctrl+C', 'Ctrl+Insert'), _('Copy to clipboard')) self.action_editor_paste = reg('edit-paste.png', _('P&aste from clipboard'), self.boss.do_editor_paste, 'editor-paste', ('Ctrl+V', 'Shift+Insert', ), _('Paste from clipboard')) self.action_editor_cut.setEnabled(False) self.action_editor_copy.setEnabled(False) self.action_editor_undo.setEnabled(False) self.action_editor_redo.setEnabled(False) # Tool actions group = _('Tools') self.action_toc = treg('toc.png', _('&Edit Table of Contents'), self.boss.edit_toc, 'edit-toc', (), _('Edit Table of Contents')) self.action_inline_toc = treg('chapters.png', _('&Insert inline Table of Contents'), self.boss.insert_inline_toc, 'insert-inline-toc', (), _('Insert inline Table of Contents')) self.action_fix_html_current = reg('html-fix.png', _('&Fix HTML'), partial(self.boss.fix_html, True), 'fix-html-current', (), _('Fix HTML in the current file')) self.action_fix_html_all = treg('html-fix.png', _('&Fix HTML - all files'), partial(self.boss.fix_html, False), 'fix-html-all', (), _('Fix HTML in all files')) self.action_pretty_current = reg('beautify.png', _('&Beautify current file'), partial(self.boss.pretty_print, True), 'pretty-current', (), _('Beautify current file')) self.action_pretty_all = treg('beautify.png', _('&Beautify all files'), partial(self.boss.pretty_print, False), 'pretty-all', (), _('Beautify all files')) self.action_insert_char = treg('character-set.png', _('&Insert special character'), self.boss.insert_character, 'insert-character', (), _('Insert special character')) self.action_rationalize_folders = treg('mimetypes/dir.png', _('&Arrange into folders'), self.boss.rationalize_folders, 'rationalize-folders', (), _('Arrange into folders')) self.action_set_semantics = treg('tags.png', _('Set &semantics'), self.boss.set_semantics, 'set-semantics', (), _('Set semantics')) self.action_filter_css = treg('filter.png', _('&Filter style information'), self.boss.filter_css, 'filter-css', (), _('Filter style information')) self.action_manage_fonts = treg('font.png', _('&Manage fonts'), self.boss.manage_fonts, 'manage-fonts', (), _('Manage fonts in the book')) self.action_add_cover = treg('default_cover.png', _('Add &cover'), self.boss.add_cover, 'add-cover', (), _('Add a cover to the book')) self.action_reports = treg( 'reports.png', _('&Reports'), self.boss.show_reports, 'show-reports', ('Ctrl+Shift+R',), _('Show a report on various aspects of the book')) self.action_check_external_links = treg('insert-link.png', _('Check &external links'), self.boss.check_external_links, 'check-external-links', (), _( 'Check external links in the book')) self.action_compress_images = treg('compress-image.png', _('C&ompress images losslessly'), self.boss.compress_images, 'compress-images', (), _( 'Compress images losslessly')) self.action_transform_styles = treg('wizard.png', _('Transform &styles'), self.boss.transform_styles, 'transform-styles', (), _( 'Transform styles used in the book')) self.action_get_ext_resources = treg('download-metadata.png', _('Download external &resources'), self.boss.get_external_resources, 'get-external-resources', (), _( 'Download external resources in the book (images/stylesheets/etc/ that are not included in the book)')) def ereg(icon, text, target, sid, keys, description): return reg(icon, text, partial(self.boss.editor_action, target), sid, keys, description) register_text_editor_actions(ereg, self.palette()) # Polish actions group = _('Polish book') self.action_subset_fonts = treg( 'subset-fonts.png', _('&Subset embedded fonts'), partial( self.boss.polish, 'subset', _('Subset fonts')), 'subset-fonts', (), _('Subset embedded fonts')) self.action_embed_fonts = treg( 'embed-fonts.png', _('&Embed referenced fonts'), partial( self.boss.polish, 'embed', _('Embed fonts')), 'embed-fonts', (), _('Embed referenced fonts')) self.action_smarten_punctuation = treg( 'smarten-punctuation.png', _('&Smarten punctuation (works best for English)'), partial( self.boss.polish, 'smarten_punctuation', _('Smarten punctuation')), 'smarten-punctuation', (), _('Smarten punctuation')) self.action_remove_unused_css = treg( 'edit-clear.png', _('Remove &unused CSS rules'), partial( self.boss.polish, 'remove_unused_css', _('Remove unused CSS rules')), 'remove-unused-css', (), _('Remove unused CSS rules')) # Preview actions group = _('Preview') self.action_auto_reload_preview = reg('auto-reload.png', _('Auto reload preview'), None, 'auto-reload-preview', (), _('Auto reload preview')) self.action_auto_sync_preview = reg('sync-right.png', _('Sync preview position to editor position'), None, 'sync-preview-to-editor', (), _( 'Sync preview position to editor position')) self.action_reload_preview = reg('view-refresh.png', _('Refresh preview'), None, 'reload-preview', ('F5',), _('Refresh preview')) self.action_split_in_preview = reg('document-split.png', _('Split this file'), None, 'split-in-preview', (), _( 'Split file in the preview panel')) self.action_find_next_preview = reg('arrow-down.png', _('Find next'), None, 'find-next-preview', (), _('Find next in preview')) self.action_find_prev_preview = reg('arrow-up.png', _('Find previous'), None, 'find-prev-preview', (), _('Find previous in preview')) # Search actions group = _('Search') self.action_find = treg('search.png', _('&Find/replace'), self.boss.show_find, 'find-replace', ('Ctrl+F',), _('Show the Find/replace panel')) def sreg(name, text, action, overrides={}, keys=(), description=None, icon=None): return reg(icon, text, partial(self.boss.search_action_triggered, action, overrides), name, keys, description or text.replace('&', '')) self.action_find_next = sreg('find-next', _('Find &next'), 'find', {'direction':'down'}, ('F3', 'Ctrl+G'), _('Find next match')) self.action_find_previous = sreg('find-previous', _('Find &previous'), 'find', {'direction':'up'}, ('Shift+F3', 'Shift+Ctrl+G'), _('Find previous match')) self.action_replace = sreg('replace', _('&Replace'), 'replace', keys=('Ctrl+R'), description=_('Replace current match')) self.action_replace_next = sreg('replace-next', _('&Replace and find next'), 'replace-find', {'direction':'down'}, ('Ctrl+]'), _('Replace current match and find next')) self.action_replace_previous = sreg('replace-previous', _('R&eplace and find previous'), 'replace-find', {'direction':'up'}, ('Ctrl+['), _('Replace current match and find previous')) self.action_replace_all = sreg('replace-all', _('Replace &all'), 'replace-all', keys=('Ctrl+A'), description=_('Replace all matches')) self.action_count = sreg('count-matches', _('&Count all'), 'count', keys=('Ctrl+N'), description=_('Count number of matches')) self.action_mark = reg(None, _('&Mark selected text'), self.boss.mark_selected_text, 'mark-selected-text', ('Ctrl+Shift+M',), _('Mark selected text or unmark already marked text')) self.action_mark.default_text = self.action_mark.text() self.action_go_to_line = reg(None, _('Go to &line'), self.boss.go_to_line_number, 'go-to-line-number', ('Ctrl+.',), _('Go to line number')) self.action_saved_searches = treg('folder_saved_search.png', _('Sa&ved searches'), self.boss.saved_searches, 'saved-searches', (), _('Show the saved searches dialog')) self.action_text_search = treg('view.png', _('&Search ignoring HTML markup'), self.boss.show_text_search, 'text-search', (), _('Show the text search panel')) # Check Book actions group = _('Check book') self.action_check_book = treg('debug.png', _('&Check book'), self.boss.check_requested, 'check-book', ('F7'), _('Check book for errors')) self.action_spell_check_book = treg('spell-check.png', _('Check &spelling'), self.boss.spell_check_requested, 'spell-check-book', ('Alt+F7'), _( 'Check book for spelling errors')) self.action_check_book_next = reg('forward.png', _('&Next error'), partial( self.check_book.next_error, delta=1), 'check-book-next', ('Ctrl+F7'), _('Show next error')) self.action_check_book_previous = reg('back.png', _('&Previous error'), partial( self.check_book.next_error, delta=-1), 'check-book-previous', ('Ctrl+Shift+F7'), _('Show previous error')) self.action_spell_check_next = reg('forward.png', _('&Next spelling mistake'), self.boss.next_spell_error, 'spell-next', ('F8'), _('Go to next spelling mistake')) # Miscellaneous actions group = _('Miscellaneous') self.action_create_checkpoint = treg( 'marked.png', _('&Create checkpoint'), self.boss.create_checkpoint, 'create-checkpoint', (), _( 'Create a checkpoint with the current state of the book')) self.action_close_current_tab = reg( 'window-close.png', _('&Close current tab'), self.central.close_current_editor, 'close-current-tab', 'Ctrl+W', _( 'Close the currently open tab')) self.action_close_all_but_current_tab = reg( 'edit-clear.png', _('&Close other tabs'), self.central.close_all_but_current_editor, 'close-all-but-current-tab', 'Ctrl+Alt+W', _( 'Close all tabs except the current tab')) self.action_help = treg( 'help.png', _('User &Manual'), lambda : open_url(QUrl(localize_user_manual_link( 'https://manual.calibre-ebook.com/edit.html'))), 'user-manual', 'F1', _( 'Show User Manual')) self.action_browse_images = treg( 'view-image.png', _('&Browse images in book'), self.boss.browse_images, 'browse-images', (), _( 'Browse images in the books visually')) self.action_multiple_split = treg( 'document-split.png', _('&Split at multiple locations'), self.boss.multisplit, 'multisplit', (), _( 'Split HTML file at multiple locations')) self.action_compare_book = treg('diff.png', _('Compare to &another book'), self.boss.compare_book, 'compare-book', (), _( 'Compare to another book')) self.action_manage_snippets = treg( 'snippets.png', _('Manage &Snippets'), self.boss.manage_snippets, 'manage-snippets', (), _( 'Manage user created snippets')) self.plugin_menu_actions = [] create_plugin_actions(actions, toolbar_actions, self.plugin_menu_actions) def create_menubar(self): if isosx: p, q = self.create_application_menubar() q.triggered.connect(self.action_quit.trigger) p.triggered.connect(self.action_preferences.trigger) f = factory(app_id='com.calibre-ebook.EditBook-%d' % os.getpid()) b = f.create_window_menubar(self) f = b.addMenu(_('&File')) f.addAction(self.action_new_file) f.addAction(self.action_import_files) f.addSeparator() f.addAction(self.action_open_book) f.addAction(self.action_new_book) f.addAction(self.action_import_book) f.addAction(self.action_open_book_folder) self.recent_books_menu = f.addMenu(_('&Recently opened books')) self.update_recent_books() f.addSeparator() f.addAction(self.action_save) f.addAction(self.action_save_copy) f.addSeparator() f.addAction(self.action_compare_book) f.addAction(self.action_quit) e = b.addMenu(_('&Edit')) e.addAction(self.action_global_undo) e.addAction(self.action_global_redo) e.addAction(self.action_create_checkpoint) e.addSeparator() e.addAction(self.action_editor_undo) e.addAction(self.action_editor_redo) e.addSeparator() e.addAction(self.action_editor_cut) e.addAction(self.action_editor_copy) e.addAction(self.action_editor_paste) e.addAction(self.action_insert_char) e.addSeparator() e.addAction(self.action_quick_edit) e.addAction(self.action_preferences) e = b.addMenu(_('&Tools')) tm = e.addMenu(_('Table of Contents')) tm.addAction(self.action_toc) tm.addAction(self.action_inline_toc) e.addAction(self.action_manage_fonts) e.addAction(self.action_embed_fonts) e.addAction(self.action_subset_fonts) e.addAction(self.action_compress_images) e.addAction(self.action_smarten_punctuation) e.addAction(self.action_remove_unused_css) e.addAction(self.action_transform_styles) e.addAction(self.action_fix_html_all) e.addAction(self.action_pretty_all) e.addAction(self.action_rationalize_folders) e.addAction(self.action_add_cover) e.addAction(self.action_set_semantics) e.addAction(self.action_filter_css) e.addAction(self.action_spell_check_book) er = e.addMenu(_('External &links')) er.addAction(self.action_check_external_links) er.addAction(self.action_get_ext_resources) e.addAction(self.action_check_book) e.addAction(self.action_reports) e = b.addMenu(_('&View')) t = e.addMenu(_('Tool&bars')) e.addSeparator() for name in sorted(actions, key=lambda x:sort_key(actions[x].text())): ac = actions[name] if name.endswith('-dock'): e.addAction(ac) elif name.endswith('-bar'): t.addAction(ac) e.addAction(self.action_browse_images) e.addSeparator() e.addAction(self.action_close_current_tab) e.addAction(self.action_close_all_but_current_tab) e = b.addMenu(_('&Search')) a = e.addAction a(self.action_find) e.addSeparator() a(self.action_find_next) a(self.action_find_previous) e.addSeparator() a(self.action_replace) a(self.action_replace_next) a(self.action_replace_previous) a(self.action_replace_all) e.addSeparator() a(self.action_count) e.addSeparator() a(self.action_mark) e.addSeparator() a(self.action_go_to_line) e.addSeparator() a(self.action_saved_searches) e.aboutToShow.connect(self.search_menu_about_to_show) e.addSeparator() a(self.action_text_search) if self.plugin_menu_actions: e = b.addMenu(_('&Plugins')) for ac in sorted(self.plugin_menu_actions, key=lambda x:sort_key(unicode(x.text()))): e.addAction(ac) e = b.addMenu(_('&Help')) a = e.addAction a(self.action_help) a(QIcon(I('donate.png')), _('&Donate to support calibre development'), open_donate) a(self.action_preferences) def search_menu_about_to_show(self): ed = self.central.current_editor update_mark_text_action(ed) def update_recent_books(self): m = self.recent_books_menu m.clear() books = tprefs.get('recent-books', []) for path in books: m.addAction(self.elided_text(path, width=500), partial(self.boss.open_book, path=path)) def create_toolbars(self): def create(text, name): name += '-bar' b = self.addToolBar(text) b.setObjectName(name) # Needed for saveState actions[name] = b.toggleViewAction() b.setIconSize(QSize(tprefs['toolbar_icon_size'], tprefs['toolbar_icon_size'])) return b self.global_bar = create(_('Book tool bar'), 'global') self.tools_bar = create(_('Tools tool bar'), 'tools') self.plugins_bar = create(_('Plugins tool bar'), 'plugins') self.populate_toolbars(animate=True) def populate_toolbars(self, animate=False): self.global_bar.clear(), self.tools_bar.clear(), self.plugins_bar.clear() def add(bar, ac): if ac is None: bar.addSeparator() elif ac == 'donate': self.donate_button = b = ThrobbingButton(self) b.clicked.connect(open_donate) b.setAutoRaise(True) b.setToolTip(_('Donate to support calibre development')) if animate: QTimer.singleShot(10, b.start_animation) bar.addWidget(b) else: try: bar.addAction(actions[ac]) except KeyError: if DEBUG: prints('Unknown action for toolbar %r: %r' % (unicode(bar.objectName()), ac)) for x in tprefs['global_book_toolbar']: add(self.global_bar, x) for x in tprefs['global_tools_toolbar']: add(self.tools_bar, x) for x in tprefs['global_plugins_toolbar']: add(self.plugins_bar, x) self.plugins_bar.setVisible(bool(tprefs['global_plugins_toolbar'])) def create_docks(self): def create(name, oname): oname += '-dock' d = QDockWidget(name, self) d.setObjectName(oname) # Needed for saveState ac = d.toggleViewAction() desc = _('Toggle %s') % name.replace('&', '') self.keyboard.register_shortcut( oname, desc, description=desc, action=ac, group=_('Windows')) actions[oname] = ac setattr(self, oname.replace('-', '_'), d) return d d = create(_('File browser'), 'files-browser') d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) self.file_list = FileListWidget(d) d.setWidget(self.file_list) self.addDockWidget(Qt.LeftDockWidgetArea, d) d = create(_('File preview'), 'preview') d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea) self.preview = Preview(d) d.setWidget(self.preview) self.addDockWidget(Qt.RightDockWidgetArea, d) d = create(_('Live CSS'), 'live-css') d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea) self.live_css = LiveCSS(self.preview, parent=d) d.setWidget(self.live_css) self.addDockWidget(Qt.RightDockWidgetArea, d) d.close() # Hidden by default d = create(_('Check book'), 'check-book') d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea) d.setWidget(self.check_book) self.addDockWidget(Qt.TopDockWidgetArea, d) d.close() # By default the check window is closed d = create(_('Inspector'), 'inspector') d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea) d.setWidget(self.preview.inspector) self.preview.inspector.setParent(d) self.addDockWidget(Qt.BottomDockWidgetArea, d) d.close() # By default the inspector window is closed d.setFeatures(d.DockWidgetClosable | d.DockWidgetMovable) # QWebInspector does not work in a floating dock d = create(_('Table of Contents'), 'toc-viewer') d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea) d.setWidget(self.toc_view) self.addDockWidget(Qt.LeftDockWidgetArea, d) d.close() # Hidden by default d = create(_('Text search'), 'text-search') d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea) d.setWidget(self.text_search) self.addDockWidget(Qt.LeftDockWidgetArea, d) d.close() # Hidden by default d = create(_('Checkpoints'), 'checkpoints') d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea) self.checkpoints = CheckpointView(self.boss.global_undo, parent=d) d.setWidget(self.checkpoints) self.addDockWidget(Qt.LeftDockWidgetArea, d) d.close() # Hidden by default d = create(_('Saved searches'), 'saved-searches') d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea) d.setWidget(self.saved_searches) self.addDockWidget(Qt.LeftDockWidgetArea, d) d.close() # Hidden by default def resizeEvent(self, ev): self.blocking_job.resize(ev.size()) return super(Main, self).resizeEvent(ev) def update_window_title(self): fname = os.path.basename(current_container().path_to_ebook) self.setWindowTitle(self.current_metadata.title + ' [%s] :: %s :: %s' %(current_container().book_type.upper(), fname, self.APP_NAME)) def closeEvent(self, e): if self.boss.quit(): e.accept() else: e.ignore() def save_state(self): tprefs.set('main_window_geometry', bytearray(self.saveGeometry())) tprefs.set('main_window_state', bytearray(self.saveState(self.STATE_VERSION))) self.central.save_state() self.saved_searches.save_state() self.check_book.save_state() self.text_search.save_state() def restore_state(self): geom = tprefs.get('main_window_geometry', None) if geom is not None: self.restoreGeometry(geom) state = tprefs.get('main_window_state', None) if state is not None: self.restoreState(state, self.STATE_VERSION) self.central.restore_state() self.saved_searches.restore_state() def contextMenuEvent(self, ev): ev.ignore()
gpl-3.0
1,148,555,098,009,233,500
49.142857
159
0.604777
false
3.720965
false
false
false
andrewlewis/camera-id
test_characteristic.py
1
1197
#!/usr/bin/env python from make_characteristic import get_noise_from_file import cPickle import glob import numpy import sys from PIL import Image, ImageOps TILE_OVERLAP = 8 if len(sys.argv) != 3: print "Usage:\n\t%s noise_file_name path_with_png_files" % (sys.argv[0],) sys.exit(0) noise_file_name = sys.argv[1] image_path_name = sys.argv[2] # Load the camera noise. camera_noise = numpy.loadtxt(noise_file_name, dtype=numpy.float) camera_noise_average = numpy.average(camera_noise) camera_noise -= camera_noise_average camera_noise_norm = numpy.sqrt(numpy.sum(camera_noise * camera_noise)) file_list = glob.glob(image_path_name + '/*.png') print "Processing %d images" % (len(file_list),) for f in file_list: # Get this image's noise. image_noise = get_noise_from_file(f)[1] image_noise_average = numpy.average(image_noise) image_noise -= image_noise_average image_noise_norm = numpy.sqrt(numpy.sum(image_noise * image_noise)) # Calculate the correlation between the two signals. print "Dot product %s is: %s" % (f, numpy.sum(camera_noise * image_noise) / (camera_noise_norm * image_noise_norm))
mit
6,780,480,121,013,508,000
29.692308
76
0.675856
false
3.133508
false
false
false
unt-libraries/django-nomination
nomination/migrations/0004_auto_20190927_1904.py
1
7862
# -*- coding: utf-8 -*- # Generated by Django 1.11.24 on 2019-09-27 19:04 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('nomination', '0003_project_archive_url'), ] operations = [ migrations.AlterField( model_name='metadata', name='name', field=models.SlugField(help_text='Assign a name for the metadata field (letters, numbers, underscores, and hyphens are permissible).'), ), migrations.AlterField( model_name='metadata', name='value_sets', field=models.ManyToManyField(blank=True, help_text='In addition to values manually assigned, values in selected pre-defined sets will also be available to metadata fields.', to='nomination.ValueSet', verbose_name='metadata value sets'), ), migrations.AlterField( model_name='metadata', name='values', field=models.ManyToManyField(blank=True, help_text='Allowed value for metadata field.', through='nomination.Metadata_Values', to='nomination.Value', verbose_name='values'), ), migrations.AlterField( model_name='metadata_values', name='value_order', field=models.PositiveIntegerField(default=1, help_text='Change the ordering of the value fields, ordered lowest to highest'), ), migrations.AlterField( model_name='nominator', name='nominator_email', field=models.CharField(help_text='An email address for identifying your nominations in the system.', max_length=100), ), migrations.AlterField( model_name='nominator', name='nominator_institution', field=models.CharField(help_text='Your institutional affiliation.', max_length=100), ), migrations.AlterField( model_name='nominator', name='nominator_name', field=models.CharField(help_text='Your name.', max_length=100), ), migrations.AlterField( model_name='project', name='admin_email', field=models.CharField(help_text='Email address of project administrator.', max_length=80), ), migrations.AlterField( model_name='project', name='admin_name', field=models.CharField(help_text='Name of project administrator.', max_length=80), ), migrations.AlterField( model_name='project', name='archive_url', field=models.URLField(blank=True, help_text='Base URL for accessing site archives.', null=True), ), migrations.AlterField( model_name='project', name='nomination_end', field=models.DateTimeField(help_text='Date to stop accepting URL nominations.'), ), migrations.AlterField( model_name='project', name='nomination_start', field=models.DateTimeField(help_text='Date to start accepting URL nominations.'), ), migrations.AlterField( model_name='project', name='project_description', field=models.TextField(help_text='Description of project.'), ), migrations.AlterField( model_name='project', name='project_end', field=models.DateTimeField(help_text='Ending date for project.'), ), migrations.AlterField( model_name='project', name='project_name', field=models.CharField(help_text='Name given to nomination project.', max_length=250), ), migrations.AlterField( model_name='project', name='project_slug', field=models.CharField(help_text='Up to 25 character identifier for the project (used in URLS, etc.).', max_length=25, unique=True), ), migrations.AlterField( model_name='project', name='project_start', field=models.DateTimeField(help_text='Starting date for project.'), ), migrations.AlterField( model_name='project', name='project_url', field=models.CharField(help_text='Project affiliated URL.', max_length=255), ), migrations.AlterField( model_name='project_metadata', name='description', field=models.CharField(help_text='Used as a descriptive title for the metadata field on Web forms.', max_length=255), ), migrations.AlterField( model_name='project_metadata', name='form_type', field=models.CharField(choices=[('checkbox', 'checkbox'), ('date', 'date'), ('radio', 'radio button'), ('select', 'menu-select multiple values'), ('selectsingle', 'menu-select single value'), ('text', 'text input'), ('textarea', 'text area')], help_text='Type of HTML form element that should represent the field.', max_length=30), ), migrations.AlterField( model_name='project_metadata', name='help', field=models.CharField(blank=True, help_text='String used on Web forms to prompt users for accurate data.', max_length=255), ), migrations.AlterField( model_name='project_metadata', name='metadata_order', field=models.PositiveIntegerField(default=1, help_text='Change the ordering of the metadata fields, ordered lowest to highest'), ), migrations.AlterField( model_name='project_metadata', name='required', field=models.BooleanField(help_text='Are users required to submit data for this field when nominating a URL?'), ), migrations.AlterField( model_name='url', name='attribute', field=models.CharField(help_text='A property of the URL you wish to describe.', max_length=255), ), migrations.AlterField( model_name='url', name='entity', field=models.CharField(help_text='The URL to nominate for capture.', max_length=300), ), migrations.AlterField( model_name='url', name='url_project', field=models.ForeignKey(help_text='The project for which you want to add a URL.', on_delete=django.db.models.deletion.CASCADE, to='nomination.Project'), ), migrations.AlterField( model_name='url', name='value', field=models.CharField(help_text='The value of the associated attribute.', max_length=255), ), migrations.AlterField( model_name='value', name='key', field=models.CharField(help_text='Up to 35 character identifier for the metadata field.', max_length=35, unique=True), ), migrations.AlterField( model_name='value', name='value', field=models.CharField(help_text='Permitted value for associated metadata field.', max_length=255), ), migrations.AlterField( model_name='valueset', name='name', field=models.CharField(help_text='Name given to value set.', max_length=75, unique=True), ), migrations.AlterField( model_name='valueset', name='values', field=models.ManyToManyField(through='nomination.Valueset_Values', to='nomination.Value', verbose_name='values'), ), migrations.AlterField( model_name='valueset_values', name='value_order', field=models.PositiveIntegerField(default=1, help_text='Change the ordering of the value fields, ordered lowest to highest'), ), ]
bsd-3-clause
2,851,599,570,489,149,000
43.670455
343
0.598067
false
4.627428
false
false
false
medunigraz/outpost
src/outpost/django/kages/api.py
1
1284
import logging import ldap from rest_framework import ( exceptions, permissions, viewsets, ) from rest_framework.response import Response from . import models from .conf import settings logger = logging.getLogger(__name__) class TranslateViewSet(viewsets.ViewSet): permission_classes = ( permissions.IsAuthenticated, ) def list(self, request): return Response() def retrieve(self, request, pk=None): if not pk: return Response(False) try: conn = ldap.initialize(settings.AUTH_LDAP_SERVER_URI) conn.simple_bind_s( settings.AUTH_LDAP_BIND_DN, settings.AUTH_LDAP_BIND_PASSWORD ) result = conn.search_s( settings.AUTH_LDAP_USER_SEARCH.base_dn, settings.AUTH_LDAP_USER_SEARCH.scope, settings.KAGES_PERS_ID_FILTER.format(id=int(pk)), settings.KAGES_PERS_FIELDS ) found = len(result) == 1 except Exception as e: logger.warn( f'LDAP query failed when matching KAGes ID: {e}' ) found = False logger.debug(f'Matched KAGes ID: {found}') return Response({'exists': found})
bsd-2-clause
-7,632,317,316,049,999,000
26.319149
65
0.576324
false
4.196078
false
false
false
Acidburn0zzz/archiso-gui
releng/root-image/usr/share/cnchi/src/misc.py
1
28903
#!/usr/bin/python # -*- coding: UTF-8 -*- # # Copyright (c) 2012 Canonical Ltd. # Copyright (c) 2013 Antergos # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from collections import namedtuple import contextlib import grp import os import pwd import re import shutil import subprocess import syslog import socket import osextras def copytree(src, dst, symlinks=False, ignore=None): for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d) def utf8(s, errors="strict"): """Decode a string as UTF-8 if it isn't already Unicode.""" if isinstance(s, str): return s else: return str(s, "utf-8", errors) def is_swap(device): try: with open('/proc/swaps') as fp: for line in fp: if line.startswith(device + ' '): return True except Exception: pass return False _dropped_privileges = 0 def set_groups_for_uid(uid): if uid == os.geteuid() or uid == os.getuid(): return user = pwd.getpwuid(uid).pw_name try: os.setgroups([g.gr_gid for g in grp.getgrall() if user in g.gr_mem]) except OSError: import traceback for line in traceback.format_exc().split('\n'): syslog.syslog(syslog.LOG_ERR, line) def drop_all_privileges(): # gconf needs both the UID and effective UID set. global _dropped_privileges uid = os.environ.get('SUDO_UID') gid = os.environ.get('SUDO_GID') if uid is not None: uid = int(uid) set_groups_for_uid(uid) if gid is not None: gid = int(gid) os.setregid(gid, gid) if uid is not None: uid = int(uid) os.setreuid(uid, uid) os.environ['HOME'] = pwd.getpwuid(uid).pw_dir os.environ['LOGNAME'] = pwd.getpwuid(uid).pw_name _dropped_privileges = None def drop_privileges(): global _dropped_privileges assert _dropped_privileges is not None if _dropped_privileges == 0: uid = os.environ.get('SUDO_UID') gid = os.environ.get('SUDO_GID') if uid is not None: uid = int(uid) set_groups_for_uid(uid) if gid is not None: gid = int(gid) os.setegid(gid) if uid is not None: os.seteuid(uid) _dropped_privileges += 1 def regain_privileges(): global _dropped_privileges assert _dropped_privileges is not None _dropped_privileges -= 1 if _dropped_privileges == 0: os.seteuid(0) os.setegid(0) os.setgroups([]) def drop_privileges_save(): """Drop the real UID/GID as well, and hide them in saved IDs.""" # At the moment, we only know how to handle this when effective # privileges were already dropped. assert _dropped_privileges is not None and _dropped_privileges > 0 uid = os.environ.get('SUDO_UID') gid = os.environ.get('SUDO_GID') if uid is not None: uid = int(uid) set_groups_for_uid(uid) if gid is not None: gid = int(gid) os.setresgid(gid, gid, 0) if uid is not None: os.setresuid(uid, uid, 0) def regain_privileges_save(): """Recover our real UID/GID after calling drop_privileges_save.""" assert _dropped_privileges is not None and _dropped_privileges > 0 os.setresuid(0, 0, 0) os.setresgid(0, 0, 0) os.setgroups([]) @contextlib.contextmanager def raised_privileges(): """As regain_privileges/drop_privileges, but in context manager style.""" regain_privileges() try: yield finally: drop_privileges() def raise_privileges(func): """As raised_privileges, but as a function decorator.""" from functools import wraps @wraps(func) def helper(*args, **kwargs): with raised_privileges(): return func(*args, **kwargs) return helper @raise_privileges def grub_options(): """ Generates a list of suitable targets for grub-installer @return empty list or a list of ['/dev/sda1','Ubuntu Hardy 8.04'] """ from ubiquity.parted_server import PartedServer l = [] try: oslist = {} subp = subprocess.Popen( ['os-prober'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) result = subp.communicate()[0].splitlines() for res in result: res = res.split(':') oslist[res[0]] = res[1] p = PartedServer() for disk in p.disks(): p.select_disk(disk) with open(p.device_entry('model')) as fp: mod = fp.readline() with open(p.device_entry('device')) as fp: dev = fp.readline() with open(p.device_entry('size')) as fp: size = fp.readline() if dev and mod: if size.isdigit(): size = format_size(int(size)) l.append([dev, '%s (%s)' % (mod, size)]) else: l.append([dev, mod]) for part in p.partitions(): ostype = '' if part[4] == 'linux-swap': continue if part[4] == 'free': continue if os.path.exists(p.part_entry(part[1], 'format')): # Don't bother looking for an OS type. pass elif part[5] in oslist.keys(): ostype = oslist[part[5]] l.append([part[5], ostype]) except: import traceback for line in traceback.format_exc().split('\n'): syslog.syslog(syslog.LOG_ERR, line) return l @raise_privileges def boot_device(): from ubiquity.parted_server import PartedServer boot = None root = None try: p = PartedServer() for disk in p.disks(): p.select_disk(disk) for part in p.partitions(): part = part[1] if p.has_part_entry(part, 'mountpoint'): mp = p.readline_part_entry(part, 'mountpoint') if mp == '/boot': boot = disk.replace('=', '/') elif mp == '/': root = disk.replace('=', '/') except Exception: import traceback for line in traceback.format_exc().split('\n'): syslog.syslog(syslog.LOG_ERR, line) if boot: return boot return root def is_removable(device): if device is None: return None device = os.path.realpath(device) devpath = None is_partition = False removable_bus = False subp = subprocess.Popen(['udevadm', 'info', '-q', 'property', '-n', device], stdout=subprocess.PIPE, universal_newlines=True) for line in subp.communicate()[0].splitlines(): line = line.strip() if line.startswith('DEVPATH='): devpath = line[8:] elif line == 'DEVTYPE=partition': is_partition = True elif line == 'ID_BUS=usb' or line == 'ID_BUS=ieee1394': removable_bus = True if devpath is not None: if is_partition: devpath = os.path.dirname(devpath) is_removable = removable_bus try: with open('/sys%s/removable' % devpath) as removable: if removable.readline().strip() != '0': is_removable = True except IOError: pass if is_removable: try: subp = subprocess.Popen(['udevadm', 'info', '-q', 'name', '-p', devpath], stdout=subprocess.PIPE, universal_newlines=True) return ('/dev/%s' % subp.communicate()[0].splitlines()[0].strip()) except Exception: pass return None def mount_info(path): """Return filesystem name, type, and ro/rw for a given mountpoint.""" fsname = '' fstype = '' writable = '' with open('/proc/mounts') as fp: for line in fp: line = line.split() if line[1] == path: fsname = line[0] fstype = line[2] writable = line[3].split(',')[0] return fsname, fstype, writable def udevadm_info(args): fullargs = ['udevadm', 'info', '-q', 'property'] fullargs.extend(args) udevadm = {} subp = subprocess.Popen( fullargs, stdout=subprocess.PIPE, universal_newlines=True) for line in subp.communicate()[0].splitlines(): line = line.strip() if '=' not in line: continue name, value = line.split('=', 1) udevadm[name] = value return udevadm def partition_to_disk(partition): """Convert a partition device to its disk device, if any.""" udevadm_part = udevadm_info(['-n', partition]) if ('DEVPATH' not in udevadm_part or udevadm_part.get('DEVTYPE') != 'partition'): return partition disk_syspath = '/sys%s' % udevadm_part['DEVPATH'].rsplit('/', 1)[0] udevadm_disk = udevadm_info(['-p', disk_syspath]) return udevadm_disk.get('DEVNAME', partition) def is_boot_device_removable(boot=None): if boot: return is_removable(boot) else: return is_removable(boot_device()) def cdrom_mount_info(): """Return mount information for /cdrom. This is the same as mount_info, except that the partition is converted to its containing disk, and we don't care whether the mount point is writable. """ cdsrc, cdfs, _ = mount_info('/cdrom') cdsrc = partition_to_disk(cdsrc) return cdsrc, cdfs @raise_privileges def grub_device_map(): """Return the contents of the default GRUB device map.""" subp = subprocess.Popen(['grub-mkdevicemap', '--no-floppy', '-m', '-'], stdout=subprocess.PIPE, universal_newlines=True) return subp.communicate()[0].splitlines() def grub_default(boot=None): """Return the default GRUB installation target.""" # Much of this is intentionally duplicated from grub-installer, so that # we can show the user what device GRUB will be installed to before # grub-installer is run. Pursuant to that, we intentionally run this in # the installer root as /target might not yet be available. bootremovable = is_boot_device_removable(boot=boot) if bootremovable is not None: return bootremovable devices = grub_device_map() target = None if devices: try: target = os.path.realpath(devices[0].split('\t')[1]) except (IndexError, OSError): pass # last resort if target is None: target = '(hd0)' cdsrc, cdfs = cdrom_mount_info() try: # The target is usually under /dev/disk/by-id/, so string equality # is insufficient. same = os.path.samefile(cdsrc, target) except OSError: same = False if ((same or target == '(hd0)') and ((cdfs and cdfs != 'iso9660') or is_removable(cdsrc))): # Installing from removable media other than a CD. Make sure that # we don't accidentally install GRUB to it. boot = boot_device() try: if boot: target = boot else: # Try the next disk along (which can't also be the CD source). target = os.path.realpath(devices[1].split('\t')[1]) target = re.sub(r'(/dev/(cciss|ida)/c[0-9]d[0-9]|/dev/[a-z]+).*', r'\1', target) except (IndexError, OSError): pass return target _os_prober_oslist = {} _os_prober_osvers = {} _os_prober_called = False def find_in_os_prober(device, with_version=False): """Look for the device name in the output of os-prober. Return the friendly name of the device, or the empty string on error. """ try: oslist, osvers = os_prober() if device in oslist: ret = oslist[device] elif is_swap(device): ret = 'swap' else: syslog.syslog('Device %s not found in os-prober output' % device) ret = '' ret = utf8(ret, errors='replace') ver = utf8(osvers.get(device, ''), errors='replace') if with_version: return ret, ver else: return ret except (KeyboardInterrupt, SystemExit): pass except: import traceback syslog.syslog(syslog.LOG_ERR, "Error in find_in_os_prober:") for line in traceback.format_exc().split('\n'): syslog.syslog(syslog.LOG_ERR, line) return '' @raise_privileges def os_prober(): global _os_prober_oslist global _os_prober_osvers global _os_prober_called if not _os_prober_called: _os_prober_called = True subp = subprocess.Popen( ['os-prober'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) result = subp.communicate()[0].splitlines() for res in result: res = res.split(':') if res[2] == 'Ubuntu': version = [v for v in re.findall('[0-9.]*', res[1]) if v][0] # Get rid of the superfluous (development version) (11.04) text = re.sub('\s*\(.*\).*', '', res[1]) _os_prober_oslist[res[0]] = text _os_prober_osvers[res[0]] = version else: # Get rid of the bootloader indication. It's not relevant here. _os_prober_oslist[res[0]] = res[1].replace(' (loader)', '') return _os_prober_oslist, _os_prober_osvers @raise_privileges def remove_os_prober_cache(): osextras.unlink_force('/var/lib/ubiquity/os-prober-cache') shutil.rmtree('/var/lib/ubiquity/linux-boot-prober-cache', ignore_errors=True) def windows_startup_folder(mount_path): locations = [ # Windows 8 'ProgramData/Microsoft/Windows/Start Menu/Programs/StartUp', # Windows 7 'ProgramData/Microsoft/Windows/Start Menu/Programs/Startup', # Windows XP 'Documents and Settings/All Users/Start Menu/Programs/Startup', # Windows NT 'Winnt/Profiles/All Users/Start Menu/Programs/Startup', ] for location in locations: path = os.path.join(mount_path, location) if os.path.exists(path): return path return '' ReleaseInfo = namedtuple('ReleaseInfo', 'name, version') def get_release(): if get_release.release_info is None: try: with open('/cdrom/.disk/info') as fp: line = fp.readline() if line: line = line.split() if line[2] == 'LTS': line[1] += ' LTS' get_release.release_info = ReleaseInfo( name=line[0], version=line[1]) except: syslog.syslog(syslog.LOG_ERR, 'Unable to determine the release.') if not get_release.release_info: get_release.release_info = ReleaseInfo(name='Ubuntu', version='') return get_release.release_info get_release.release_info = None def get_release_name(): import warnings warnings.warn('get_release_name() is deprecated, ' 'use get_release().name instead.', category=DeprecationWarning) if not get_release_name.release_name: try: with open('/cdrom/.disk/info') as fp: line = fp.readline() if line: line = line.split() if line[2] == 'LTS': get_release_name.release_name = ' '.join(line[:3]) else: get_release_name.release_name = ' '.join(line[:2]) except: syslog.syslog( syslog.LOG_ERR, "Unable to determine the distribution name from " "/cdrom/.disk/info") if not get_release_name.release_name: get_release_name.release_name = 'Ubuntu' return get_release_name.release_name get_release_name.release_name = '' @raise_privileges def get_install_medium(): if not get_install_medium.medium: try: if os.access('/cdrom', os.W_OK): get_install_medium.medium = 'USB' else: get_install_medium.medium = 'CD' except: syslog.syslog( syslog.LOG_ERR, "Unable to determine install medium.") get_install_medium.medium = 'CD' return get_install_medium.medium get_install_medium.medium = '' def execute(*args): """runs args* in shell mode. Output status is taken.""" log_args = ['log-output', '-t', 'ubiquity'] log_args.extend(args) try: status = subprocess.call(log_args) except IOError as e: syslog.syslog(syslog.LOG_ERR, ' '.join(log_args)) syslog.syslog(syslog.LOG_ERR, "OS error(%s): %s" % (e.errno, e.strerror)) return False else: if status != 0: syslog.syslog(syslog.LOG_ERR, ' '.join(log_args)) return False syslog.syslog(' '.join(log_args)) return True @raise_privileges def execute_root(*args): return execute(*args) def format_size(size): """Format a partition size.""" if size < 1000: unit = 'B' factor = 1 elif size < 1000 * 1000: unit = 'kB' factor = 1000 elif size < 1000 * 1000 * 1000: unit = 'MB' factor = 1000 * 1000 elif size < 1000 * 1000 * 1000 * 1000: unit = 'GB' factor = 1000 * 1000 * 1000 else: unit = 'TB' factor = 1000 * 1000 * 1000 * 1000 return '%.1f %s' % (float(size) / factor, unit) def debconf_escape(text): escaped = text.replace('\\', '\\\\').replace('\n', '\\n') return re.sub(r'(\s)', r'\\\1', escaped) def create_bool(text): if text == 'true': return True elif text == 'false': return False else: return text @raise_privileges def dmimodel(): model = '' kwargs = {} if os.geteuid() != 0: # Silence annoying warnings during the test suite. kwargs['stderr'] = open('/dev/null', 'w') try: proc = subprocess.Popen( ['dmidecode', '--string', 'system-manufacturer'], stdout=subprocess.PIPE, universal_newlines=True, **kwargs) manufacturer = proc.communicate()[0] if not manufacturer: return manufacturer = manufacturer.lower() if 'to be filled' in manufacturer: # Don't bother with products in development. return if 'bochs' in manufacturer or 'vmware' in manufacturer: model = 'virtual machine' # VirtualBox sets an appropriate system-product-name. else: if 'lenovo' in manufacturer or 'ibm' in manufacturer: key = 'system-version' else: key = 'system-product-name' proc = subprocess.Popen(['dmidecode', '--string', key], stdout=subprocess.PIPE, universal_newlines=True) model = proc.communicate()[0] if 'apple' in manufacturer: # MacBook4,1 - strip the 4,1 model = re.sub('[^a-zA-Z\s]', '', model) # Replace each gap of non-alphanumeric characters with a dash. # Ensure the resulting string does not begin or end with a dash. model = re.sub('[^a-zA-Z0-9]+', '-', model).rstrip('-').lstrip('-') if model.lower() == 'not-available': return except Exception: syslog.syslog(syslog.LOG_ERR, 'Unable to determine the model from DMI') finally: if 'stderr' in kwargs: kwargs['stderr'].close() return model def set_indicator_keymaps(lang): import xml.etree.cElementTree as ElementTree from gi.repository import Xkl, GdkX11 # GdkX11.x11_get_default_xdisplay() segfaults if Gtk hasn't been # imported; possibly finer-grained than this, but anything using this # will already have imported Gtk anyway ... from gi.repository import Gtk from ubiquity import gsettings # pacify pyflakes Gtk gsettings_key = ['org.gnome.libgnomekbd.keyboard', 'layouts'] lang = lang.split('_')[0] variants = [] # Map inspired from that of gfxboot-theme-ubuntu that's itself # based on console-setup's. This one has been restricted to # language => keyboard layout not locale => keyboard layout as # we don't actually know the exact locale default_keymap = { 'ar': 'ara', 'bs': 'ba', 'de': 'de', 'el': 'gr', 'en': 'us', 'eo': 'epo', 'fr': 'fr_oss', 'gu': 'in_guj', 'hi': 'in', 'hr': 'hr', 'hy': 'am', 'ka': 'ge', 'kn': 'in_kan', 'lo': 'la', 'ml': 'in_mal', 'pa': 'in_guru', 'sr': 'rs', 'sv': 'se', 'ta': 'in_tam', 'te': 'in_tel', 'zh': 'cn', } def item_str(s): '''Convert a zero-terminated byte array to a proper str''' i = s.find(b'\x00') return s[:i].decode() def process_variant(*args): if hasattr(args[2], 'name'): variants.append( '%s\t%s' % (item_str(args[1].name), item_str(args[2].name))) else: variants.append(item_str(args[1].name)) def restrict_list(variants): new_variants = [] # Start by looking by an explicit default layout in the keymap if lang in default_keymap: if default_keymap[lang] in variants: variants.remove(default_keymap[lang]) new_variants.append(default_keymap[lang]) else: tab_keymap = default_keymap[lang].replace('_', '\t') if tab_keymap in variants: variants.remove(tab_keymap) new_variants.append(tab_keymap) # Prioritize the layout matching the language (if any) if lang in variants: variants.remove(lang) new_variants.append(lang) # Uniquify our list (just in case) variants = list(set(variants)) if len(variants) > 4: # We have a problem, X only supports 4 # Add as many entry as we can that are layouts without variant country_variants = sorted( entry for entry in variants if '\t' not in entry) for entry in country_variants[:4 - len(new_variants)]: new_variants.append(entry) variants.remove(entry) if len(new_variants) < 4: # We can add some more simple_variants = sorted( entry for entry in variants if '_' not in entry) for entry in simple_variants[:4 - len(new_variants)]: new_variants.append(entry) variants.remove(entry) if len(new_variants) < 4: # Now just add anything left for entry in variants[:4 - len(new_variants)]: new_variants.append(entry) variants.remove(entry) else: new_variants += list(variants) # gsettings doesn't understand utf8 new_variants = [str(variant) for variant in new_variants] return new_variants def call_setxkbmap(variants): kb_layouts = [] kb_variants = [] for entry in variants: fields = entry.split('\t') if len(fields) > 1: kb_layouts.append(fields[0]) kb_variants.append(fields[1]) else: kb_layouts.append(fields[0]) kb_variants.append("") execute( "setxkbmap", "-layout", ",".join(kb_layouts), "-variant", ",".join(kb_variants)) iso_639_3 = ElementTree.parse('/usr/share/xml/iso-codes/iso_639_3.xml') nodes = [element for element in iso_639_3.findall('iso_639_3_entry') if element.get('part1_code') == lang] display = GdkX11.x11_get_default_xdisplay() engine = Xkl.Engine.get_instance(display) if nodes: configreg = Xkl.ConfigRegistry.get_instance(engine) configreg.load(False) # Apparently part2_code doesn't always work (fails with French) for prop in ('part2_code', 'id', 'part1_code'): code = nodes[0].get(prop) if code is not None: configreg.foreach_language_variant(code, process_variant, None) if variants: restricted_variants = restrict_list(variants) call_setxkbmap(restricted_variants) gsettings.set_list( gsettings_key[0], gsettings_key[1], restricted_variants) break else: # Use the system default if no other keymaps can be determined. gsettings.set_list(gsettings_key[0], gsettings_key[1], []) engine.lock_group(0) NM = 'org.freedesktop.NetworkManager' NM_STATE_CONNECTED_GLOBAL = 70 def get_prop(obj, iface, prop): import dbus try: return obj.Get(iface, prop, dbus_interface=dbus.PROPERTIES_IFACE) except dbus.DBusException as e: if e.get_dbus_name() == 'org.freedesktop.DBus.Error.UnknownMethod': return None else: raise def is_wireless_enabled(): import dbus bus = dbus.SystemBus() manager = bus.get_object(NM, '/org/freedesktop/NetworkManager') return get_prop(manager, NM, 'WirelessEnabled') def has_connection(): import dbus bus = dbus.SystemBus() manager = bus.get_object(NM, '/org/freedesktop/NetworkManager') state = get_prop(manager, NM, 'state') return state == NM_STATE_CONNECTED_GLOBAL def add_connection_watch(func): import dbus def connection_cb(state): func(state == NM_STATE_CONNECTED_GLOBAL) bus = dbus.SystemBus() bus.add_signal_receiver(connection_cb, 'StateChanged', NM, NM) try: func(has_connection()) except dbus.DBusException: # We can't talk to NM, so no idea. Wild guess: we're connected # using ssh with X forwarding, and are therefore connected. This # allows us to proceed with a minimum of complaint. func(True) def install_size(): if min_install_size: return min_install_size # Fallback size to 5 GB size = 5 * 1024 * 1024 * 1024 # Maximal size to 8 GB max_size = 8 * 1024 * 1024 * 1024 try: with open('/cdrom/casper/filesystem.size') as fp: size = int(fp.readline()) except IOError: pass # TODO substitute into the template for the state box. min_disk_size = size * 2 # fudge factor # Set minimum size to 8GB if current minimum size is larger # than 8GB and we still have an extra 20% of free space if min_disk_size > max_size and size * 1.2 < max_size: min_disk_size = max_size return min_disk_size min_install_size = None # vim:ai:et:sts=4:tw=80:sw=4: def get_network(): intip = False s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(("antergos.com",1234)) except: return "" myip = s.getsockname()[0] s.close() spip = myip.split(".") if spip[0] == '192': if spip[1] == '168': intip = True elif spip[0] == '10': intip = True elif spip[0] == '172': if int(spip[1]) > 15 and int(spip[1]) < 32: intip = True if intip: ipran = '.'.join(spip[:-1]) + ".0/24" else: ipran = '.'.join(spip) return ipran
gpl-3.0
-3,050,432,571,631,182,000
30.484749
79
0.558593
false
3.836342
false
false
false
mosbasik/fluidspaces
src/fluidspaces/i3_commands.py
1
1215
import subprocess class i3Commands(object): @staticmethod def send_to_wp(i3_name): '''Send the currently focused window/container to the named workspace''' subprocess.Popen(['i3-msg', 'move container to workspace', i3_name], stdout=subprocess.PIPE) @staticmethod def go_to_wp(i3_name): '''Go to the named workspace''' subprocess.Popen(['i3-msg', 'workspace', i3_name], stdout=subprocess.PIPE) @staticmethod def get_wps_str(): '''Query i3 for current workspaces and return stdout as a string''' completed_proc = subprocess.run(['i3-msg', '-t', 'get_workspaces'], stdout=subprocess.PIPE) stdout = completed_proc.stdout.decode('utf-8') return stdout @staticmethod def rename_wp(old_i3_name, new_i3_name): subprocess.run([ 'i3-msg', 'rename workspace', '"{}"'.format(old_i3_name), 'to', '"{}"'.format(new_i3_name), ], stdout=subprocess.PIPE) @staticmethod def rename_wps(old_i3_names, new_i3_names): for old_i3_name, new_i3_name in zip(old_i3_names, new_i3_names): i3Commands.rename_wp(old_i3_name, new_i3_name)
mit
-4,274,740,972,160,197,000
32.75
100
0.604938
false
3.441926
false
false
false
gigglearrows/anniesbot
pajbot/models/timer.py
1
6625
import json import logging from pajbot.models.db import DBManager, Base from pajbot.models.action import ActionParser from pajbot.tbutil import find from sqlalchemy import orm from sqlalchemy import Column, Integer, String, Boolean from sqlalchemy.dialects.mysql import TEXT log = logging.getLogger('pajbot') class Timer(Base): __tablename__ = 'tb_timer' id = Column(Integer, primary_key=True) name = Column(String(256), nullable=False) action_json = Column('action', TEXT, nullable=False) interval_online = Column(Integer, nullable=False) interval_offline = Column(Integer, nullable=False) enabled = Column(Boolean, nullable=False, default=True) def __init__(self, **options): self.id = None self.name = '??' self.action_json = '{}' self.interval_online = 5 self.interval_offline = 30 self.enabled = True self.refresh_tts() self.set(**options) def set(self, **options): self.name = options.get('name', self.name) log.debug(options) if 'action' in options: log.info('new action!') self.action_json = json.dumps(options['action']) self.action = ActionParser.parse(self.action_json) self.interval_online = options.get('interval_online', self.interval_online) self.interval_offline = options.get('interval_offline', self.interval_offline) self.enabled = options.get('enabled', self.enabled) @orm.reconstructor def init_on_load(self): self.action = ActionParser.parse(self.action_json) self.refresh_tts() def refresh_tts(self): self.time_to_send_online = self.interval_online self.time_to_send_offline = self.interval_offline def refresh_action(self): self.action = ActionParser.parse(self.action_json) def run(self, bot): self.action.run(bot, source=None, message=None) class TimerManager: def __init__(self, bot): self.bot = bot self.bot.execute_every(60, self.tick) if self.bot: self.bot.socket_manager.add_handler('timer.update', self.on_timer_update) self.bot.socket_manager.add_handler('timer.remove', self.on_timer_remove) def on_timer_update(self, data, conn): try: timer_id = int(data['timer_id']) except (KeyError, ValueError): log.warn('No timer ID found in on_timer_update') return False updated_timer = find(lambda timer: timer.id == timer_id, self.timers) if updated_timer: with DBManager.create_session_scope(expire_on_commit=False) as db_session: db_session.add(updated_timer) db_session.refresh(updated_timer) updated_timer.refresh_action() db_session.expunge(updated_timer) else: with DBManager.create_session_scope(expire_on_commit=False) as db_session: updated_timer = db_session.query(Timer).filter_by(id=timer_id).one_or_none() # Add the updated timer to the timer lists if required if updated_timer: if updated_timer not in self.timers: self.timers.append(updated_timer) if updated_timer not in self.online_timers and updated_timer.interval_online > 0: self.online_timers.append(updated_timer) updated_timer.refresh_tts() if updated_timer not in self.offline_timers and updated_timer.interval_offline > 0: self.offline_timers.append(updated_timer) updated_timer.refresh_tts() for timer in self.online_timers: if timer.enabled is False or timer.interval_online <= 0: self.online_timers.remove(timer) for timer in self.offline_timers: if timer.enabled is False or timer.interval_offline <= 0: self.offline_timers.remove(timer) def on_timer_remove(self, data, conn): try: timer_id = int(data['timer_id']) except (KeyError, ValueError): log.warn('No timer ID found in on_timer_update') return False removed_timer = find(lambda timer: timer.id == timer_id, self.timers) if removed_timer: if removed_timer in self.timers: self.timers.remove(removed_timer) if removed_timer in self.online_timers: self.online_timers.remove(removed_timer) if removed_timer in self.offline_timers: self.offline_timers.remove(removed_timer) def tick(self): if self.bot.is_online: for timer in self.online_timers: timer.time_to_send_online -= 1 timer = find(lambda timer: timer.time_to_send_online <= 0, self.online_timers) if timer: timer.run(self.bot) timer.time_to_send_online = timer.interval_online self.online_timers.remove(timer) self.online_timers.append(timer) else: for timer in self.offline_timers: timer.time_to_send_offline -= 1 timer = find(lambda timer: timer.time_to_send_offline <= 0, self.offline_timers) if timer: timer.run(self.bot) timer.time_to_send_offline = timer.interval_offline self.offline_timers.remove(timer) self.offline_timers.append(timer) def redistribute_timers(self): for x in range(0, len(self.offline_timers)): timer = self.offline_timers[x] timer.time_to_send_offline = timer.interval_offline * ((x + 1) / len(self.offline_timers)) for x in range(0, len(self.online_timers)): timer = self.online_timers[x] timer.time_to_send_online = timer.interval_online * ((x + 1) / len(self.online_timers)) def load(self): self.timers = [] with DBManager.create_session_scope(expire_on_commit=False) as db_session: self.timers = db_session.query(Timer).order_by(Timer.interval_online, Timer.interval_offline, Timer.name).all() db_session.expunge_all() self.online_timers = [timer for timer in self.timers if timer.interval_online > 0 and timer.enabled] self.offline_timers = [timer for timer in self.timers if timer.interval_offline > 0 and timer.enabled] self.redistribute_timers() log.info('Loaded {} timers ({} online/{} offline)'.format(len(self.timers), len(self.online_timers), len(self.offline_timers))) return self
mit
-3,706,695,500,516,823,600
38.201183
135
0.616453
false
3.892479
false
false
false
white-lab/pyproteome
brainrnaseq/__init__.py
1
2373
from . import cache, mapping, enrichments CELL_TYPE_COLS = { 'Homo sapiens': { 'Astrocyte': [ '8yo', '13yo', '16yo', '21yo.1', '22yo.1', '35yo', '47yo', '51yo', '53yo', '60yo', '63yo - 1', '63yo - 2', ], 'Neuron': [ '25yo', ], 'OPC': [ '22yoGC', '63yoGC - 1', '63yo GC - 2', '47yoO4', '63yoO4', ], 'New Oligodendrocytes': [ '22yoGC', '63yoGC - 1', '63yo GC - 2', '47yoO4', '63yoO4', ], 'Myelinating Oligodendrocytes': [ '22yoGC', '63yoGC - 1', '63yo GC - 2', '47yoO4', '63yoO4', ], 'Microglia': [ '45yo', '51yo.1', '63yo', ], 'Endothelia': [ '13yo.1', '47yo.1', ], }, 'Mus musculus': { 'Astrocyte': [ # 'FACS - p69', # 'FACS p70', '1 month', '4 months', '7 months', '9 months', ], 'Neuron': [ 'Neuron 3', 'Neuron 4', ], 'OPC': [ 'Oligodendrocyte precursor cell 3', 'Oligodendrocyte precursor cell 4', ], 'New Oligodendrocytes': [ 'Newly formed oligodendrocyte 3', 'Newly formed oligodendrocyte 4', ], 'Myelinating Oligodendrocytes': [ 'Myelinating oligodendrocyte 4', 'Myelinating oligodenrocyte 5', ], 'Microglia': [ 'Microglia 1', 'Microglia 2', ], 'Endothelia': [ 'Endo 1', 'Endo 2', ], }, } CELL_TYPES = [ 'Astrocyte', 'Endothelia', 'Microglia', 'Myelinating Oligodendrocytes', 'Neuron', 'New Oligodendrocytes', 'OPC', ] DEFAULT_CELL_TYPES = [ i for i in CELL_TYPES if i not in ['OPC', 'New Oligodendrocytes'] ] CELL_COLORS = colors = { 'Astrocyte': '#bfee90', 'Endothelia': '#ff9b90', 'Microglia': '#5bd3ff', 'Myelinating Oligodendrocytes': '#ff39ff', 'Neuron': '#ffc467', 'New Oligodendrocytes': 'lightpurple', 'OPC': 'darkpurple', } __all__ = [ 'cache', 'mapping', 'enrichments', 'CELL_TYPE_COLS', 'CELL_TYPES', 'DEFAULT_CELL_TYPES', 'CELL_COLORS', ]
bsd-2-clause
7,507,329,908,707,769,000
21.6
79
0.435735
false
2.926017
false
false
false
synthesio/infra-ovh-ansible-module
plugins/modules/dedicated_server_install.py
1
2843
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils.basic import AnsibleModule DOCUMENTATION = ''' --- module: dedicated_server_install short_description: Install a new dedicated server description: - Install a new dedicated server author: Synthesio SRE Team requirements: - ovh >= 0.5.0 options: service_name: required: true description: Ovh name of the server hostname: required: true description: Name of the new dedicated server template: required: true description: template to use to spawn the server ''' EXAMPLES = ''' synthesio.ovh.dedicated_server_install: service_name: "ns12345.ip-1-2-3.eu" hostname: "server01.example.net" template: "debian10_64" delegate_to: localhost ''' RETURN = ''' # ''' from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec try: from ovh.exceptions import APIError HAS_OVH = True except ImportError: HAS_OVH = False def run_module(): module_args = ovh_argument_spec() module_args.update(dict( service_name=dict(required=True), hostname=dict(required=True), template=dict(required=True) )) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) client = ovh_api_connect(module) service_name = module.params['service_name'] hostname = module.params['hostname'] template = module.params['template'] if module.check_mode: module.exit_json(msg="Installation in progress on {} as {} with template {} - (dry run mode)".format(service_name, hostname, template), changed=True) try: compatible_templates = client.get( '/dedicated/server/%s/install/compatibleTemplates' % service_name ) if template not in compatible_templates["ovh"] and template not in compatible_templates["personal"]: module.fail_json(msg="{} doesn't exist in compatibles templates".format(template)) except APIError as api_error: return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error)) details = {"details": {"language": "en", "customHostname": hostname} } try: client.post( '/dedicated/server/%s/install/start' % service_name, **details, templateName=template) module.exit_json(msg="Installation in progress on {} as {} with template {}!".format(service_name, hostname, template), changed=True) except APIError as api_error: module.fail_json(msg="Failed to call OVH API: {0}".format(api_error)) def main(): run_module() if __name__ == '__main__': main()
mit
5,904,091,304,778,387,000
27.148515
143
0.646148
false
3.841892
false
false
false
certik/sfepy
tests/test_parsing.py
1
3180
from sfepy.base.testing import TestCommon ## # 16.07.2007, c class Test( TestCommon ): ## # 16.07.2007, c def from_conf( conf, options ): return Test( conf = conf, options = options ) from_conf = staticmethod( from_conf ) ## # c: 16.07.2007, r: 08.07.2008 def test_parse_equations( self ): from sfepy.fem.parseEq import create_bnf test_strs = [ """- d_volume.i1.Omega( uc )""", """2 * dw_term.i1.Omega( uc ) = - 3.0 * dw_term2.i1.Omega2( uc )""", """d_term1.Y( fluid, u, w, Nu, dcf, mode ) + d_term2.Omega( u, w, Nu, dcf, mode ) - d_another_term.Elsewhere( w, p, Nu, dcf, mode ) = - dw_rhs.Y3.a( u, q, Nu, dcf, mode )""", """no_args() = 0""", """+ something( a, b, c ) = + something_else( c, a, d[-1] )""", """term_.a.a( u )""", """term.i1.Omega( v, du/dt ) + term2.i2.Gamma( v, dphi/dt)""" ] n_fail = 0 term_descs = [] for test_str in test_strs: term_descs[:] = [] try: bnf = create_bnf( term_descs, {} ) bnf.parseString( test_str ) except: self.report( 'failed: %s' % test_str ) if self.options.debug: raise n_fail += 1 for td in term_descs: print td self.report( '%d failure(s)' % n_fail ) if n_fail: raise AssertionError return True ## # c: 16.07.2007, r: 14.07.2008 def test_parse_regions( self ): from sfepy.fem.parseReg import create_bnf, _test_strs test_strs = ['nodes of surface -n r.Omega', 'r.Y_2 +n copy r.Y_1', 'nodes in (y <= 0.00001) & (x < 0.11)', 'nodes in ((y <= 0.00001) & (x < 0.11))', 'nodes in (((y <= 0.00001) & (x < 0.11)))', 'nodes in (((0.00001 < y) & (x < 0.11)))', 'all -n nodes in (y == 0.00001)', 'all -n nodes of surface', 'all -e r.DOmega_100', 'r.Y_1 -n nodes of surface *e r.Z_8 *n nodes in (y > 0)', 'nodes of surface +n nodes by pokus( x, y, z )', 'elements of group 6 +e nodes by fn2_3c( x )', """r.Y_1 *n (r.Y_2 +e (nodes in (y > 0) *n r.Y_32)) -n nodes of surface -e r.Y_5""", 'nodes by noargs()', 'nodes by extraargs( x, y, z, abc,3 )', 'node in r.Gamma_3', 'node 10', 'elements by afun( domain )'] stack = [] bnf = create_bnf( stack ) n_fail = 0 for test_str in test_strs: stack[:] = [] try: out = bnf.parseString( test_str ) except: self.report( 'failed: %s' % test_str ) n_fail += 1 self.report( '%d failures' % n_fail ) if n_fail: raise AssertionError return True
bsd-3-clause
-26,389,724,490,425,080
33.193548
80
0.416981
false
3.336831
true
false
false
waidyanatha/pingsam
visualize.py
1
8668
import numpy as np import datetime as dtm from dateutil import rrule import pandas as pd import csv import matplotlib.pylab as plt import sys, os #lets first create the csv file # #change this to actual csv file name pingfile="weeklylogs.csv" #paramters @plotinterval = 10 minutes plotinterval = 10 #csv file columns col_seq=0 col_pingtime=1 col_domain=2 col_state=3 # ########## FUNCTION TO SYNTHESEIZE MISSING DATA POINTS ########## # def synth_data(synthdf, interval): #create a temporary dataframe to hold the syntheseized data tmpdf = pd.DataFrame(columns=['seqnum', 'pingdatetime', 'domain', 'statenow']) #first check we have a none empty dataframe if not synthdf.empty: #pick the originating TS data point synthdf.sort_values(by='pingdatetime') #check if first timestamp starts at 00:00:00; if not add a dumy record startseqnum = synthdf.index[0] startpingdt = synthdf.iloc[0]['pingdatetime'] startdomain = synthdf.iloc[0]['domain'] startstate = synthdf.iloc[0]['statenow'] #loop through each TS data point to synthetically add new TS points #to fill the gap between two consecutive data points for i, row in synthdf.iterrows(): #initiate the synthesiezed data point to the origin nextdatapoint = 0 pingdt_plus_interval = startpingdt #stepwise loop to add syntheseized points from relative origin to the next TS data point while row['pingdatetime'] > pingdt_plus_interval + dtm.timedelta(minutes = interval) : nextdatapoint += 1 pingdt_plus_interval = startpingdt + dtm.timedelta(minutes = nextdatapoint*interval) tmpdf.loc[len(tmpdf.index)] = [startseqnum,pingdt_plus_interval,startdomain,startstate] startseqnum = i startpingdt = row['pingdatetime'] startstate = row['statenow'] #after completing through all the TS datapoints check if a none empty dataframe was created if not tmpdf.empty: tmpdf = pd.concat([tmpdf,synthdf]) tmpdf = tmpdf.set_index('seqnum') #whether null or not return a dataframe with syntheseized TS data tmpdf.dropna(thresh=2) return tmpdf # ########## PLOT HISTOGRAM TO FIGURE ########## # def plot_hist_to_fig(histdf, dname): #get date range of the plot to use in suptitile begdt = histdf['pingdatetime'].min().date() findt = histdf['pingdatetime'].max().date() #create a new x-axis index using dataframe index; starting from 1 instead of 0 histdf['pingdate'] = histdf['pingdatetime'].apply(lambda x: x.date()) downdf = pd.DataFrame(columns=['xlabel','pingdate', 'downcount']) datelist = list(histdf.pingdate.unique()) for uniquedate in datelist: xlabel = str('{:02d}'.format(uniquedate.month))+'-'+str('{:02d}'.format(uniquedate.day)) downcount = len(histdf[(histdf.statenow == '0') & (histdf.pingdate == uniquedate)]) totalcount = len(histdf[(histdf.pingdate == uniquedate)]) downdf.loc[len(downdf.index)] = [xlabel, uniquedate,100*downcount//totalcount] downdf = downdf.as_matrix() #x-axis values are in the newly generated xvalues column xl = np.array(downdf[:,0]) x = np.array(downdf[:,1]) #y-axis values (1 or 0) are in the dateframe statenow column y = np.array(downdf[:,2]) histfig, ax = plt.subplots() ax.bar(x,y,color='red',width=0.5, align="center") #to give enough spacing for the suptitle; otherwise overlaps with title histfig.subplots_adjust(top=0.87) # plt.figure(figsize=(8,6), dpi=150) #beautify the plot and name the labels, titles ax.set_title('Percentage of time Server Failed each Day', fontsize=14, fontweight='bold', color='gray') histfig.suptitle(dname+'\n'+str(begdt)+' --- '+str(findt), fontsize=10, color='blue') ax.set_xlabel('Month-Day', fontsize=12, color='gray') ax.set_ylabel('Faile Rate (%)', fontsize=12, color='gray') plt.yticks(fontsize=10, color='gray', rotation='horizontal') plt.xticks(x, xl, fontsize=10, color='gray', rotation='vertical') ax.grid(True) return histfig # ########## PLOT DOWN TIMES FREQUENCY TO FIGURE ########## # def plot_freq_to_fig(plotdf, dname): #get date range of the plot to use in suptitile begdt = plotdf['pingdatetime'].min().date() findt = plotdf['pingdatetime'].max().date() failrate = 100-(sum(100*plotdf['statenow'].astype(int))/len(plotdf)) failrate = failrate.astype(float) #create a new x-axis index using dataframe index; starting from 1 instead of 0 plotdf['xvalues'] = range(1,len(plotdf)+1) plotdf = plotdf.as_matrix() #x-axis values are in the newly generated xvalues column x = np.array(plotdf[:,3].astype(int)) #y-axis values (1 or 0) are in the dateframe statenow column y = np.array(plotdf[:,2].astype(int)) #setup to catputure the plot into a figure plotfig = plt.figure(num=None, figsize=(8, 6), dpi=150, facecolor='y', edgecolor='k') ax = plotfig.add_subplot(311) ax.fill_between(x, 0, y, color='green') ax.plot(x,y,color='green',lw=2) #to give enough spacing for the suptitle; otherwise overlaps with title plotfig.subplots_adjust(top=0.87) #beautify the plot and name the labels, titles ax.set_title('Frequency of Server Access Failure ('+str(failrate)+'%)', fontsize=14, fontweight='bold', color='gray') plotfig.suptitle(dname+'\n'+str(begdt)+' --- '+str(findt), fontsize=10, color='blue') ax.set_xlabel('Attempted Machine Accesss Times', fontsize=12, color='gray') ax.set_ylabel('Machine State', fontsize=12, color='gray') plt.yticks(y, ['UP','DOWN'], fontsize=10, color='gray', rotation='vertical') plt.xticks(fontsize=10, color='gray', rotation='horizontal') plt.ylim(0,1.1) plt.xlim(0,x.max()+10) ax.grid(True) return plotfig # ############# MAIN ################################ # print("Complile data from file the log files") #os.system('./analytics.sh') print("Reading data from file "+pingfile) with open(pingfile, 'rb') as f: data = [i.split(",") for i in f.read().split()] df = pd.DataFrame(data, columns=['seqnum', 'pingdatetime', 'domain', 'statenow']) for index, row in df.iterrows(): row[col_pingtime] = dtm.datetime.strptime(row[col_pingtime], '%Y-%m-%d:%H:%M:%S') #to avoid duplicate data and to reflect ping time to be on the minute row[col_pingtime] = row[col_pingtime].replace(second = 0) #format pingdatetime as proper datetime, set it as the indext and then order them df['pingdatetime'] = pd.to_datetime(df['pingdatetime']) df.sort_values(by='pingdatetime') df = df.set_index('seqnum') #begin processing for each unique domain print(str(len(df.index))+" data rows added to the dataframe, ready for processing ...") print ('-----------------------------------------------------') for thedomain in df.domain.unique(): #insert syntheseised data points dompingdf = df[df['domain']==thedomain] print("Begin data synthesis for "+thedomain+" with data rows = "+str(len(dompingdf.index))) amenddf = synth_data(dompingdf,plotinterval) if not amenddf.empty: #output the syntheseized dataframe to output file print(str(len(amenddf.index))+" data rows of syntheseised added to "+thedomain ) amenddf['pingdatetime'] = pd.to_datetime(amenddf.pingdatetime) amenddf = amenddf.sort(['pingdatetime']) amenddf.index = range(0,len(amenddf)) print('writing data to file: ./data/syndata_'+thedomain+'.csv') amenddf.to_csv('./data/syndata_'+thedomain+'.csv') #plot timeseries with function (need to add if conditions to check if function returns valid fig) fig = plot_freq_to_fig(amenddf, thedomain) fig.savefig('./plots/freqplot_'+thedomain+'.png', bbox_inches='tight') print ('frequency plot created in file: ./plots/freqplot_'+thedomain+'.png') fig = plot_hist_to_fig(amenddf, thedomain) fig.savefig('./plots/histplot_'+thedomain+'.png', bbox_inches='tight') print ('histogram plot created in file: ./plots/histplot_'+thedomain+'.png') print ('process complete for '+thedomain) print ('-----------------------------------------------------') else: print ("Warning: no syntheseized data was added to: "+thedomain) print ('-----------------------------------------------------') print ('End processing data for visualization !!! ')
mit
-8,745,131,300,681,286,000
48.531429
121
0.639132
false
3.552459
false
false
false
pbauman/libmesh
doc/statistics/libmesh_pagehits.py
1
10542
#!/usr/bin/env python import matplotlib.pyplot as plt import numpy as np # Import stuff for working with dates from datetime import datetime from matplotlib.dates import date2num # Hits/month, pages, and gigabytes served. # To get the Google analytics data: # .) Go to analytics.google.com. # .) There should be (as of July 2017) a "Google Analytics Home" box at the top left of the dashboard. # .) Click the "Audience Overview" link at the bottom right corner of this box. # .) Adjust date range to previous month. # .) Record the number of "Pageviews" in the "Hits" column below. # The data below are from the libmesh.github.io site, which uses the # number UA-24978333-1. # # Note: we do not have control over the analytics for the # https://www.github.com/libMesh/libmesh page. If you look at the page # source, analytics code UA-3769691-2 appears, but if I try to add # this property in my analytics account, Google assigns me the number # UA-24978333-{2,3,...} (where the last digit may change depending on # how many times you tried to add/remove this property in the # Analytics Dashboard) and there does not seem to be a straightforward # way of inserting this code into the source. There have been some # README.md based hacks for doing this in the past, but I don't think # they are particularly reliable... # Hits, pages, GB served data = [ # 'Jan 2003', 616, 616, 0 # 'Feb 2003', 2078, 2078, 0, # 'Mar 2003', 3157, 3157, 0, # 'Apr 2003', 7800, 7800, 0, # 'May 2003', 4627, 4627, 0, # 'Jun 2003', 6156, 6156, 0, # 'Jul 2003', 6389, 6389, 0, # 'Aug 2003', 10136, 10136, 0, # 'Sep 2003', 8871, 8871, 0, # 'Oct 2003', 9703, 9703, 0, # 'Nov 2003', 9802, 9802, 0, # 'Dec 2003', 9123, 9123, 0, # 'Jan 2004', 13599, 13599, 0, # 'Feb 2004', 11018, 11018, 0, # 'Mar 2004', 11713, 11713, 0, # 'Apr 2004', 14995, 14995, 0, # 'May 2004', 11285, 11285, 0, # 'Jun 2004', 12974, 12974, 0, # 'Jul 2004', 12939, 12939, 0, # 'Aug 2004', 9708, 9708, 0, # 'Sep 2004', 7994, 7994, 0, # 'Oct 2004', 6920, 6920, 0, # 'Nov 2004', 10261, 10261, 0, # 'Dec 2004', 7483, 7483, 0, # 'Jan 2005', 3184, 3184, 0, # 'Feb 2005', 37733, 14077, .4373, # 'Mar 2005', 43927, 16408, .5637, # 'Apr 2005', 29792, 8518, .2890, # 'May 2005', 51288, 17629, .5689, # 'Jun 2005', 40617, 16599, .5379, # 'Jul 2005', 29944, 10006, .3363, # 'Aug 2005', 39592, 14556, .4577, # 'Sep 2005', 57638, 14666, .4881, # 'Oct 2005', 48336, 17976, .5749, # 'Nov 2005', 49563, 15308, .5810, # 'Dec 2005', 90863, 40736, .9415, # 'Jan 2006', 46723, 13487, .5662, # 'Feb 2006', 62285, 26567, .8229, # 'Mar 2006', 47446, 14711, .6534, # 'Apr 2006', 90314, 29635, .9762, # 'May 2006', 68209, 20998, .7949, # 'Jun 2006', 50495, 17128, .6881, # 'Jul 2006', 42387, 10958, .6016, # 'Aug 2006', 55658, 11793, .6174, # 'Sep 2006', 54919, 20591, .9056, # 'Oct 2006', 52916, 17944, .9015, # 'Nov 2006', 55382, 19833, .9439, # 'Dec 2006', 54265, 22688, .9162, # 'Jan 2007', 53813, 19881, 1.0 , # 'Feb 2007', 52434, 17920, .9472, # 'Mar 2007', 61530, 21172, 1.2, # 'Apr 2007', 125578, 77539, 1.3, # 'May 2007', 182764, 129596, 1.6, # 'Jun 2007', 115730, 38571, 1.7, # 'Jul 2007', 121054, 42757, 1.8, # 'Aug 2007', 81192, 28187, 1.3, # 'Sep 2007', 143553, 39734, 2.3, # 'Oct 2007', 110449, 42111, 2.4, # 'Nov 2007', 128307, 57851, 2.3, # 'Dec 2007', 80584, 42631, 2.0, # 'Jan 2008', 69623, 34155, 2.0, # 'Feb 2008', 144881, 111751, 2.5, # 'Mar 2008', 69801, 29211, 1.9, # 'Apr 2008', 74023, 31149, 2.0, # 'May 2008', 63123, 23277, 1.8, # 'Jun 2008', 66055, 25418, 2.1, # 'Jul 2008', 60046, 22082, 2.0, # 'Aug 2008', 60206, 24543, 2.0, # 'Sep 2008', 53057, 18635, 1.6, # 'Oct 2008', 64828, 27042, 2.1, # 'Nov 2008', 72406, 29767, 2.3, # 'Dec 2008', 76248, 31690, 2.3, # 'Jan 2009', 73002, 29744, 2.0, # 'Feb 2009', 70801, 29156, 2.1, # 'Mar 2009', 78200, 31139, 2.1, # 'Apr 2009', 70888, 26182, 1.7, # 'May 2009', 67263, 26210, 1.8, # 'Jun 2009', 73146, 31328, 2.6, # 'Jul 2009', 77828, 33711, 2.4, # 'Aug 2009', 64378, 28542, 1.9, # 'Sep 2009', 76167, 33484, 2.2, # 'Oct 2009', 95727, 41062, 2.8, # 'Nov 2009', 88042, 38869, 2.5, # 'Dec 2009', 76148, 37609, 2.3, # 'Jan 2010', 268856, 45983, 3.2, # 'Feb 2010', 208210, 42680, 3.0, # 'Mar 2010', 116263, 42660, 2.6, # 'Apr 2010', 102493, 32942, 2.4, # 'May 2010', 117023, 37107, 2.5, # 'Jun 2010', 128589, 38019, 2.5, # 'Jul 2010', 87183, 34026, 2.2, # 'Aug 2010', 99161, 33199, 2.5, # 'Sep 2010', 81657, 32305, 2.5, # 'Oct 2010', 98236, 42091, 3.4, # 'Nov 2010', 115603, 48695, 3.4, # 'Dec 2010', 105030, 45570, 3.4, # 'Jan 2011', 133476, 43549, 3.1, # 'Feb 2011', 34483, 15002, 1.1, # 'Mar 2011', 0, 0, 0.0, # 'Apr 2011', 0, 0, 0.0, # 'May 2011', 0, 0, 0.0, # 'Jun 2011', 0, 0, 0.0, # 'Jul 2011', 0, 0, 0.0, 'Aug 2011', 10185, 0, 0.0, # New "Pageviews" data from google analytics, does not seem comparable to sf.net pagehits data 'Sep 2011', 10305, 0, 0.0, 'Oct 2011', 14081, 0, 0.0, 'Nov 2011', 13397, 0, 0.0, 'Dec 2011', 13729, 0, 0.0, 'Jan 2012', 11050, 0, 0.0, 'Feb 2012', 12779, 0, 0.0, 'Mar 2012', 12970, 0, 0.0, 'Apr 2012', 13051, 0, 0.0, 'May 2012', 11857, 0, 0.0, 'Jun 2012', 12584, 0, 0.0, 'Jul 2012', 12995, 0, 0.0, 'Aug 2012', 13204, 0, 0.0, 'Sep 2012', 13170, 0, 0.0, 'Oct 2012', 13335, 0, 0.0, 'Nov 2012', 11337, 0, 0.0, 'Dec 2012', 10108, 0, 0.0, # libmesh switched to github on December 10, 2012 'Jan 2013', 13029, 0, 0.0, 'Feb 2013', 10420, 0, 0.0, 'Mar 2013', 13400, 0, 0.0, 'Apr 2013', 14416, 0, 0.0, 'May 2013', 13875, 0, 0.0, 'Jun 2013', 13747, 0, 0.0, 'Jul 2013', 14019, 0, 0.0, 'Aug 2013', 10828, 0, 0.0, 'Sep 2013', 9969, 0, 0.0, 'Oct 2013', 13083, 0, 0.0, 'Nov 2013', 12938, 0, 0.0, 'Dec 2013', 9079, 0, 0.0, 'Jan 2014', 9736, 0, 0.0, 'Feb 2014', 11824, 0, 0.0, 'Mar 2014', 10861, 0, 0.0, 'Apr 2014', 12711, 0, 0.0, 'May 2014', 11177, 0, 0.0, 'Jun 2014', 10738, 0, 0.0, 'Jul 2014', 10349, 0, 0.0, 'Aug 2014', 8877, 0, 0.0, 'Sep 2014', 9226, 0, 0.0, 'Oct 2014', 8052, 0, 0.0, # Google analytics number moved over to libmesh.github.io in Oct 2014 'Nov 2014', 9243, 0, 0.0, 'Dec 2014', 10714, 0, 0.0, 'Jan 2015', 11508, 0, 0.0, 'Feb 2015', 11278, 0, 0.0, 'Mar 2015', 13305, 0, 0.0, 'Apr 2015', 12347, 0, 0.0, 'May 2015', 11368, 0, 0.0, 'Jun 2015', 11203, 0, 0.0, 'Jul 2015', 10419, 0, 0.0, 'Aug 2015', 11282, 0, 0.0, 'Sep 2015', 13535, 0, 0.0, 'Oct 2015', 12912, 0, 0.0, 'Nov 2015', 13894, 0, 0.0, 'Dec 2015', 11694, 0, 0.0, 'Jan 2016', 11837, 0, 0.0, 'Feb 2016', 14102, 0, 0.0, 'Mar 2016', 13212, 0, 0.0, 'Apr 2016', 13355, 0, 0.0, 'May 2016', 12486, 0, 0.0, 'Jun 2016', 13973, 0, 0.0, 'Jul 2016', 10688, 0, 0.0, 'Aug 2016', 10048, 0, 0.0, 'Sep 2016', 10847, 0, 0.0, 'Oct 2016', 10984, 0, 0.0, 'Nov 2016', 12233, 0, 0.0, 'Dec 2016', 11430, 0, 0.0, 'Jan 2017', 10327, 0, 0.0, 'Feb 2017', 11039, 0, 0.0, 'Mar 2017', 12986, 0, 0.0, 'Apr 2017', 9773, 0, 0.0, 'May 2017', 10880, 0, 0.0, 'Jun 2017', 9179, 0, 0.0, 'Jul 2017', 8344, 0, 0.0, 'Aug 2017', 8617, 0, 0.0, 'Sep 2017', 8576, 0, 0.0, 'Oct 2017', 11255, 0, 0.0, 'Nov 2017', 10362, 0, 0.0, 'Dec 2017', 7948, 0, 0.0, 'Jan 2018', 9376, 0, 0.0, 'Feb 2018', 8864, 0, 0.0, 'Mar 2018', 10339, 0, 0.0, 'Apr 2018', 10958, 0, 0.0, 'May 2018', 10151, 0, 0.0, 'Jun 2018', 8981, 0, 0.0, 'Jul 2018', 8619, 0, 0.0, 'Aug 2018', 9226, 0, 0.0, 'Sep 2018', 8507, 0, 0.0, 'Oct 2018', 9150, 0, 0.0, 'Nov 2018', 8135, 0, 0.0, 'Dec 2018', 7522, 0, 0.0, 'Jan 2019', 8643, 0, 0.0, 'Feb 2019', 8729, 0, 0.0, 'Mar 2019', 7916, 0, 0.0, ] # Extract number of hits/month n_hits_month = data[1::4] # Divide by 1000 for plotting... n_hits_month = np.divide(n_hits_month, 1000.) # Extract list of date strings date_strings = data[0::4] # Convert date strings into numbers date_nums = [] for d in date_strings: date_nums.append(date2num(datetime.strptime(d, '%b %Y'))) # Get a reference to the figure fig = plt.figure() # 111 is equivalent to Matlab's subplot(1,1,1) command ax = fig.add_subplot(111) # Make the bar chart. We have one number/month, there are about 30 # days in each month, this defines the bar width... # The color used comes from sns.color_palette("muted").as_hex() They # are the "same basic order of hues as the default matplotlib color # cycle but more attractive colors." ax.plot(date_nums, n_hits_month, marker='o', linewidth=2, color=u'#4878cf') # Create title fig.suptitle('libmesh.github.io Hits/Month (in Thousands)') # Set up x-tick locations -- August of each year ticks_names = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019'] # Get numerical values for the names tick_nums = [] for x in ticks_names: tick_nums.append(date2num(datetime.strptime('Jan ' + x, '%b %Y'))) # Set tick labels and positions ax.set_xticks(tick_nums) ax.set_xticklabels(ticks_names) # Set x limits for the plot plt.xlim(date_nums[0], date_nums[-1]+30); # Make x-axis ticks point outward ax.get_xaxis().set_tick_params(direction='out') # Save as PDF plt.savefig('libmesh_pagehits.pdf') # Local Variables: # python-indent: 2 # End:
lgpl-2.1
-5,319,884,989,136,103,000
36.119718
131
0.525232
false
2.376465
false
false
false
not-na/peng3d
peng3d/gui/layout.py
1
8877
#!/usr/bin/env python # -*- coding: utf-8 -*- # # layout.py # # Copyright 2020 notna <[email protected]> # # This file is part of peng3d. # # peng3d is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # peng3d is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with peng3d. If not, see <http://www.gnu.org/licenses/>. # __all__ = [ "Layout", "GridLayout", "LayoutCell", ] import peng3d from peng3d import util from peng3d.util import WatchingList try: import pyglet from pyglet.gl import * except ImportError: pass # Headless mode class Layout(util.ActionDispatcher): """ Base Layout class. This class does not serve any purpose directly other than to be a common base class for all layouts. Note that layouts can be nested, e.g. usually the first layouts parent is a SubMenu and sub-layouts get a LayoutCell of their parent layout as their parent. """ def __init__(self, peng, parent): self.peng = peng self.parent = parent @property def pos(self): return self.parent.pos @property def size(self): return self.parent.size class GridLayout(Layout): """ Grid-based layout helper class. This class provides a grid-like layout to its sub-widgets. A border between widgets can be defined. Additionally, all widgets using this layout should automatically scale with screen size. """ def __init__(self, peng, parent, res, border): super().__init__(peng, parent) self.res = res self.bordersize = border @property def cell_size(self): """ Helper property defining the current size of cells in both x and y axis. :return: 2-tuple of float """ return self.size[0]/self.res[0], self.size[1]/self.res[1] def get_cell(self, pos, size, anchor_x="left", anchor_y="bottom", border=1): """ Returns a grid cell suitable for use as the ``pos`` parameter of any widget. The ``size`` parameter of the widget will automatically be overwritten. :param pos: Grid position, in cell :param size: Size, in cells :param anchor_x: either ``left``\\ , ``center`` or ``right`` :param anchor_y: either ``bottom``\\ , ``center`` or ``top`` :return: LayoutCell subclass """ return _GridCell(self.peng, self, pos, size, anchor_x, anchor_y, border) class LayoutCell(object): """ Base Layout Cell. Not to be used directly. Usually subclasses of this class are returned by layouts. Instances can be passed to Widgets as the ``pos`` argument. The ``size`` argument will be automatically overridden. Note that manually setting ``size`` will override the size set by the layout cell, though the position will be kept. """ @property def pos(self): """ Property accessing the position of the cell. This usually refers to the bottom-left corner, but may change depending on arguments passed during creation. Note that results can be floats. :return: 2-tuple of ``(x,y)`` """ raise NotImplementedError("pos property has to be overridden") @property def size(self): """ Property accessing the size of the cell. Note that results can be floats. :return: 2-tuple of ``(width, height)`` """ raise NotImplementedError("size property has to be overridden") class DumbLayoutCell(LayoutCell): """ Dumb layout cell that behaves like a widget. Note that this class is not actually widget and should only be used as the ``pos`` argument to a widget or the ``parent`` to another Layout. It can be used to create, for example, a :py:class:`GridLayout()` over only a portion of the screen. Even though setting the :py:attr:`pos` and :py:attr:`size` attributes is possible, sometimes a redraw cannot be triggered correctly if e.g. the parent is not submenu. """ def __init__(self, parent, pos, size): self.parent = parent self._pos = pos self._size = size @property def pos(self): """ Property that will always be a 2-tuple representing the position of the widget. Note that this method may call the method given as ``pos`` in the initializer. The returned object will actually be an instance of a helper class to allow for setting only the x/y coordinate. This property also respects any :py:class:`Container` set as its parent, any offset will be added automatically. Note that setting this property will override any callable set permanently. """ if isinstance(self._pos, list) or isinstance(self._pos, tuple): r = self._pos elif callable(self._pos): w, h = self.parent.size[:] r = self._pos(w, h, *self.size) elif isinstance(self._pos, LayoutCell): r = self._pos.pos else: raise TypeError("Invalid position type") ox, oy = self.parent.pos r = r[0] + ox, r[1] + oy # if isinstance(self.submenu,ScrollableContainer) and not self._is_scrollbar:# and self.name != "__scrollbar_%s"%self.submenu.name: # Widget inside scrollable container and not the scrollbar # r = r[0],r[1]+self.submenu.offset_y return WatchingList(r, self._wlredraw_pos) @pos.setter def pos(self, value): self._pos = value if hasattr(self.parent, "redraw"): self.parent.redraw() @property def size(self): """ Similar to :py:attr:`pos` but for the size instead. """ if isinstance(getattr(self, "_pos", None), LayoutCell): s = self._pos.size elif isinstance(self._size, list) or isinstance(self._size, tuple): s = self._size elif callable(self._size): w, h = self.parent.size[:] s = self._size(w, h) else: raise TypeError("Invalid size type") s = s[:] if s[0] == -1 or s[1] == -1: raise ValueError("Cannot set size to -1 in DumbLayoutCell") # Prevents crashes with negative size s = [max(s[0], 0), max(s[1], 0)] return WatchingList(s, self._wlredraw_size) @size.setter def size(self, value): self._size = value if hasattr(self.parent, "redraw"): self.parent.redraw() def _wlredraw_pos(self,wl): self._pos = wl[:] if hasattr(self.parent, "redraw"): self.parent.redraw() def _wlredraw_size(self,wl): self._size = wl[:] if hasattr(self.parent, "redraw"): self.parent.redraw() class _GridCell(LayoutCell): def __init__(self, peng, parent, offset, size, anchor_x, anchor_y, border=1): self.peng = peng self.parent = parent self.offset = offset self._size = size self.anchor_x = anchor_x self.anchor_y = anchor_y self.border = border @property def pos(self): dx, dy = self.parent.bordersize dx *= self.border dy *= self.border px, py = self.parent.pos # Parent position in px oxc, oyc = self.offset # Offset in cells csx, csy = self.parent.cell_size # Cell size in px ox, oy = oxc*csx, oyc*csy # Offset in px sxc, sxy = self._size # Size in cells sx, sy = sxc*csx, sxy*csy # Size in px if self.anchor_x == "left": x = px+ox+dx/2 elif self.anchor_x == "center": x = px+ox+sx/2 elif self.anchor_x == "right": x = px+ox+sx-dx/2 else: raise ValueError(f"Invalid anchor_x of {self.anchor_x}") if self.anchor_y == "bottom": y = py+oy+dy/2 elif self.anchor_y == "center": y = py+oy+sy/2 elif self.anchor_y == "top": y = py+oy+sy-dy/2 else: raise ValueError(f"Invalid anchor_y of {self.anchor_y}") return x, y @property def size(self): dx, dy = self.parent.bordersize csx, csy = self.parent.cell_size # Cell size in px sxc, sxy = self._size # Size in cells sx, sy = sxc * csx-dx*self.border, sxy * csy-dy*self.border return sx, sy
gpl-2.0
-6,435,422,355,169,676,000
30.038462
198
0.600315
false
3.806604
false
false
false
andrewgiessel/folium
folium/utilities.py
1
19979
# -*- coding: utf-8 -*- """ Utilities ------- Utility module for Folium helper functions. """ from __future__ import absolute_import from __future__ import print_function from __future__ import division import time import math import zlib import struct import json import base64 from jinja2 import Environment, PackageLoader try: import pandas as pd except ImportError: pd = None try: import numpy as np except ImportError: np = None from folium.six import iteritems, text_type, binary_type def get_templates(): """Get Jinja templates.""" return Environment(loader=PackageLoader('folium', 'templates')) def legend_scaler(legend_values, max_labels=10.0): """ Downsamples the number of legend values so that there isn't a collision of text on the legend colorbar (within reason). The colorbar seems to support ~10 entries as a maximum. """ if len(legend_values) < max_labels: legend_ticks = legend_values else: spacer = int(math.ceil(len(legend_values)/max_labels)) legend_ticks = [] for i in legend_values[::spacer]: legend_ticks += [i] legend_ticks += ['']*(spacer-1) return legend_ticks def linear_gradient(hexList, nColors): """ Given a list of hexcode values, will return a list of length nColors where the colors are linearly interpolated between the (r, g, b) tuples that are given. Example: linear_gradient([(0, 0, 0), (255, 0, 0), (255, 255, 0)], 100) """ def _scale(start, finish, length, i): """ Return the value correct value of a number that is in between start and finish, for use in a loop of length *length*. """ base = 16 fraction = float(i) / (length - 1) raynge = int(finish, base) - int(start, base) thex = hex(int(int(start, base) + fraction * raynge)).split('x')[-1] if len(thex) != 2: thex = '0' + thex return thex allColors = [] # Separate (R, G, B) pairs. for start, end in zip(hexList[:-1], hexList[1:]): # Linearly intepolate between pair of hex ###### values and # add to list. nInterpolate = 765 for index in range(nInterpolate): r = _scale(start[1:3], end[1:3], nInterpolate, index) g = _scale(start[3:5], end[3:5], nInterpolate, index) b = _scale(start[5:7], end[5:7], nInterpolate, index) allColors.append(''.join(['#', r, g, b])) # Pick only nColors colors from the total list. result = [] for counter in range(nColors): fraction = float(counter) / (nColors - 1) index = int(fraction * (len(allColors) - 1)) result.append(allColors[index]) return result def color_brewer(color_code, n=6): """ Generate a colorbrewer color scheme of length 'len', type 'scheme. Live examples can be seen at http://colorbrewer2.org/ """ maximum_n = 253 scheme_info = {'BuGn': 'Sequential', 'BuPu': 'Sequential', 'GnBu': 'Sequential', 'OrRd': 'Sequential', 'PuBu': 'Sequential', 'PuBuGn': 'Sequential', 'PuRd': 'Sequential', 'RdPu': 'Sequential', 'YlGn': 'Sequential', 'YlGnBu': 'Sequential', 'YlOrBr': 'Sequential', 'YlOrRd': 'Sequential', 'BrBg': 'Diverging', 'PiYG': 'Diverging', 'PRGn': 'Diverging', 'PuOr': 'Diverging', 'RdBu': 'Diverging', 'RdGy': 'Diverging', 'RdYlBu': 'Diverging', 'RdYlGn': 'Diverging', 'Spectral': 'Diverging', 'Accent': 'Qualitative', 'Dark2': 'Qualitative', 'Paired': 'Qualitative', 'Pastel1': 'Qualitative', 'Pastel2': 'Qualitative', 'Set1': 'Qualitative', 'Set2': 'Qualitative', 'Set3': 'Qualitative', } schemes = {'BuGn': ['#EDF8FB', '#CCECE6', '#CCECE6', '#66C2A4', '#41AE76', '#238B45', '#005824'], 'BuPu': ['#EDF8FB', '#BFD3E6', '#9EBCDA', '#8C96C6', '#8C6BB1', '#88419D', '#6E016B'], 'GnBu': ['#F0F9E8', '#CCEBC5', '#A8DDB5', '#7BCCC4', '#4EB3D3', '#2B8CBE', '#08589E'], 'OrRd': ['#FEF0D9', '#FDD49E', '#FDBB84', '#FC8D59', '#EF6548', '#D7301F', '#990000'], 'PuBu': ['#F1EEF6', '#D0D1E6', '#A6BDDB', '#74A9CF', '#3690C0', '#0570B0', '#034E7B'], 'PuBuGn': ['#F6EFF7', '#D0D1E6', '#A6BDDB', '#67A9CF', '#3690C0', '#02818A', '#016450'], 'PuRd': ['#F1EEF6', '#D4B9DA', '#C994C7', '#DF65B0', '#E7298A', '#CE1256', '#91003F'], 'RdPu': ['#FEEBE2', '#FCC5C0', '#FA9FB5', '#F768A1', '#DD3497', '#AE017E', '#7A0177'], 'YlGn': ['#FFFFCC', '#D9F0A3', '#ADDD8E', '#78C679', '#41AB5D', '#238443', '#005A32'], 'YlGnBu': ['#FFFFCC', '#C7E9B4', '#7FCDBB', '#41B6C4', '#1D91C0', '#225EA8', '#0C2C84'], 'YlOrBr': ['#FFFFD4', '#FEE391', '#FEC44F', '#FE9929', '#EC7014', '#CC4C02', '#8C2D04'], 'YlOrRd': ['#FFFFB2', '#FED976', '#FEB24C', '#FD8D3C', '#FC4E2A', '#E31A1C', '#B10026'], 'BrBg': ['#8c510a', '#d8b365', '#f6e8c3', '#c7eae5', '#5ab4ac', '#01665e'], 'PiYG': ['#c51b7d', '#e9a3c9', '#fde0ef', '#e6f5d0', '#a1d76a', '#4d9221'], 'PRGn': ['#762a83', '#af8dc3', '#e7d4e8', '#d9f0d3', '#7fbf7b', '#1b7837'], 'PuOr': ['#b35806', '#f1a340', '#fee0b6', '#d8daeb', '#998ec3', '#542788'], 'RdBu': ['#b2182b', '#ef8a62', '#fddbc7', '#d1e5f0', '#67a9cf', '#2166ac'], 'RdGy': ['#b2182b', '#ef8a62', '#fddbc7', '#e0e0e0', '#999999', '#4d4d4d'], 'RdYlBu': ['#d73027', '#fc8d59', '#fee090', '#e0f3f8', '#91bfdb', '#4575b4'], 'RdYlGn': ['#d73027', '#fc8d59', '#fee08b', '#d9ef8b', '#91cf60', '#1a9850'], 'Spectral': ['#d53e4f', '#fc8d59', '#fee08b', '#e6f598', '#99d594', '#3288bd'], 'Accent': ['#7fc97f', '#beaed4', '#fdc086', '#ffff99', '#386cb0', '#f0027f'], 'Dark2': ['#1b9e77', '#d95f02', '#7570b3', '#e7298a', '#66a61e', '#e6ab02'], 'Paired': ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c'], 'Pastel1': ['#fbb4ae', '#b3cde3', '#ccebc5', '#decbe4', '#fed9a6', '#ffffcc'], 'Pastel2': ['#b3e2cd', '#fdcdac', '#cbd5e8', '#f4cae4', '#e6f5c9', '#fff2ae'], 'Set1': ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33'], 'Set2': ['#66c2a5', '#fc8d62', '#8da0cb', '#e78ac3', '#a6d854', '#ffd92f'], 'Set3': ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3', '#fdb462'], } # Raise an error if the n requested is greater than the maximum. if n > maximum_n: raise ValueError("The maximum number of colors in a" " ColorBrewer sequential color series is 253") # Only if n is greater than six do we interpolate values. if n > 6: if color_code not in schemes: color_scheme = None else: # Check to make sure that it is not a qualitative scheme. if scheme_info[color_code] == 'Qualitative': raise ValueError("Expanded color support is not available" " for Qualitative schemes, restrict" " number of colors to 6") else: color_scheme = linear_gradient(schemes.get(color_code), n) else: color_scheme = schemes.get(color_code, None) return color_scheme def transform_data(data): """ Transform Pandas DataFrame into JSON format. Parameters ---------- data: DataFrame or Series Pandas DataFrame or Series Returns ------- JSON compatible dict Example ------- >>> transform_data(df) """ if pd is None: raise ImportError("The Pandas package is required" " for this functionality") if np is None: raise ImportError("The NumPy package is required" " for this functionality") def type_check(value): """ Type check values for JSON serialization. Native Python JSON serialization will not recognize some Numpy data types properly, so they must be explicitly converted. """ if pd.isnull(value): return None elif (isinstance(value, pd.tslib.Timestamp) or isinstance(value, pd.Period)): return time.mktime(value.timetuple()) elif isinstance(value, (int, np.integer)): return int(value) elif isinstance(value, (float, np.float_)): return float(value) elif isinstance(value, str): return str(value) else: return value if isinstance(data, pd.Series): json_data = [{type_check(x): type_check(y) for x, y in iteritems(data)}] elif isinstance(data, pd.DataFrame): json_data = [{type_check(y): type_check(z) for x, y, z in data.itertuples()}] return json_data def split_six(series=None): """ Given a Pandas Series, get a domain of values from zero to the 90% quantile rounded to the nearest order-of-magnitude integer. For example, 2100 is rounded to 2000, 2790 to 3000. Parameters ---------- series: Pandas series, default None Returns ------- list """ if pd is None: raise ImportError("The Pandas package is required" " for this functionality") if np is None: raise ImportError("The NumPy package is required" " for this functionality") def base(x): if x > 0: base = pow(10, math.floor(math.log10(x))) return round(x/base)*base else: return 0 quants = [0, 50, 75, 85, 90] # Some weirdness in series quantiles a la 0.13. arr = series.values return [base(np.percentile(arr, x)) for x in quants] def mercator_transform(data, lat_bounds, origin='upper', height_out=None): """Transforms an image computed in (longitude,latitude) coordinates into the a Mercator projection image. Parameters ---------- data: numpy array or equivalent list-like object. Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA) lat_bounds : length 2 tuple Minimal and maximal value of the latitude of the image. origin : ['upper' | 'lower'], optional, default 'upper' Place the [0,0] index of the array in the upper left or lower left corner of the axes. height_out : int, default None The expected height of the output. If None, the height of the input is used. """ if np is None: raise ImportError("The NumPy package is required" " for this functionality") mercator = lambda x: np.arcsinh(np.tan(x*np.pi/180.))*180./np.pi array = np.atleast_3d(data).copy() height, width, nblayers = array.shape lat_min, lat_max = lat_bounds if height_out is None: height_out = height # Eventually flip the image if origin == 'upper': array = array[::-1, :, :] lats = (lat_min + np.linspace(0.5/height, 1.-0.5/height, height) * (lat_max-lat_min)) latslats = (mercator(lat_min) + np.linspace(0.5/height_out, 1.-0.5/height_out, height_out) * (mercator(lat_max)-mercator(lat_min))) out = np.zeros((height_out, width, nblayers)) for i in range(width): for j in range(4): out[:, i, j] = np.interp(latslats, mercator(lats), array[:, i, j]) # Eventually flip the image. if origin == 'upper': out = out[::-1, :, :] return out def image_to_url(image, mercator_project=False, colormap=None, origin='upper', bounds=((-90, -180), (90, 180))): """Infers the type of an image argument and transforms it into a URL. Parameters ---------- image: string, file or array-like object * If string, it will be written directly in the output file. * If file, it's content will be converted as embedded in the output file. * If array-like, it will be converted to PNG base64 string and embedded in the output. origin : ['upper' | 'lower'], optional, default 'upper' Place the [0, 0] index of the array in the upper left or lower left corner of the axes. colormap : callable, used only for `mono` image. Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)] for transforming a mono image into RGB. It must output iterables of length 3 or 4, with values between 0. and 1. Hint : you can use colormaps from `matplotlib.cm`. mercator_project : bool, default False, used for array-like image. Transforms the data to project (longitude,latitude) coordinates to the Mercator projection. bounds: list-like, default ((-90, -180), (90, 180)) Image bounds on the map in the form [[lat_min, lon_min], [lat_max, lon_max]]. Only used if mercator_project is True. """ if hasattr(image, 'read'): # We got an image file. if hasattr(image, 'name'): # We try to get the image format from the file name. fileformat = image.name.lower().split('.')[-1] else: fileformat = 'png' url = "data:image/{};base64,{}".format( fileformat, base64.b64encode(image.read()).decode('utf-8')) elif (not (isinstance(image, text_type) or isinstance(image, binary_type))) and hasattr(image, '__iter__'): # We got an array-like object. if mercator_project: data = mercator_transform(image, [bounds[0][0], bounds[1][0]], origin=origin) else: data = image png = write_png(data, origin=origin, colormap=colormap) url = "data:image/png;base64," + base64.b64encode(png).decode('utf-8') else: # We got an URL. url = json.loads(json.dumps(image)) return url.replace('\n', ' ') def write_png(data, origin='upper', colormap=None): """ Transform an array of data into a PNG string. This can be written to disk using binary I/O, or encoded using base64 for an inline PNG like this: >>> png_str = write_png(array) >>> "data:image/png;base64,"+png_str.encode('base64') Inspired from http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image Parameters ---------- data: numpy array or equivalent list-like object. Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA) origin : ['upper' | 'lower'], optional, default 'upper' Place the [0,0] index of the array in the upper left or lower left corner of the axes. colormap : callable, used only for `mono` image. Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)] for transforming a mono image into RGB. It must output iterables of length 3 or 4, with values between 0. and 1. Hint: you can use colormaps from `matplotlib.cm`. Returns ------- PNG formatted byte string """ if np is None: raise ImportError("The NumPy package is required" " for this functionality") if colormap is None: colormap = lambda x: (x, x, x, 1) array = np.atleast_3d(data) height, width, nblayers = array.shape if nblayers not in [1, 3, 4]: raise ValueError("Data must be NxM (mono), " "NxMx3 (RGB), or NxMx4 (RGBA)") assert array.shape == (height, width, nblayers) if nblayers == 1: array = np.array(list(map(colormap, array.ravel()))) nblayers = array.shape[1] if nblayers not in [3, 4]: raise ValueError("colormap must provide colors of" "length 3 (RGB) or 4 (RGBA)") array = array.reshape((height, width, nblayers)) assert array.shape == (height, width, nblayers) if nblayers == 3: array = np.concatenate((array, np.ones((height, width, 1))), axis=2) nblayers = 4 assert array.shape == (height, width, nblayers) assert nblayers == 4 # Normalize to uint8 if it isn't already. if array.dtype != 'uint8': array = array * 255./array.max(axis=(0, 1)).reshape((1, 1, 4)) array = array.astype('uint8') # Eventually flip the image. if origin == 'lower': array = array[::-1, :, :] # Transform the array to bytes. raw_data = b''.join([b'\x00' + array[i, :, :].tobytes() for i in range(height)]) def png_pack(png_tag, data): chunk_head = png_tag + data return (struct.pack("!I", len(data)) + chunk_head + struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([ b'\x89PNG\r\n\x1a\n', png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def _camelify(out): return (''.join(["_" + x.lower() if i < len(out)-1 and x.isupper() and out[i+1].islower() # noqa else x.lower() + "_" if i < len(out)-1 and x.islower() and out[i+1].isupper() # noqa else x.lower() for i, x in enumerate(list(out))])).lstrip('_').replace('__', '_') # noqa def _parse_size(value): try: if isinstance(value, int) or isinstance(value, float): value_type = 'px' value = float(value) assert value > 0 else: value_type = '%' value = float(value.strip('%')) assert 0 <= value <= 100 except: msg = "Cannot parse value {!r} as {!r}".format raise ValueError(msg(value, value_type)) return value, value_type def _locations_mirror(x): """Mirrors the points in a list-of-list-of-...-of-list-of-points. For example: >>> _locations_mirror([[[1, 2], [3, 4]], [5, 6], [7, 8]]) [[[2, 1], [4, 3]], [6, 5], [8, 7]] """ if hasattr(x, '__iter__'): if hasattr(x[0], '__iter__'): return list(map(_locations_mirror, x)) else: return list(x[::-1]) else: return x def _locations_tolist(x): """Transforms recursively a list of iterables into a list of list. """ if hasattr(x, '__iter__'): return list(map(_locations_tolist, x)) else: return x
mit
-2,512,162,047,460,299,000
34.740608
101
0.516743
false
3.55056
false
false
false
FDelporte/PiGameConsole
Main.py
1
5104
''' Created on 22/09/2017 @author: Frank Delporte ''' import thread import Tkinter as tk import tkFont import time from ButtonHandler import * from KeyReader import * from PongGui import * from SlideShow import * from ConsoleMenu import * from Legend import * try: import keyboard # pip install keyboard keyAvailable = True except ImportError: keyAvailable = False class PiGameConsole(): # general vars pongBusy = False slideShowBusy = False keepRunning = False # frame holders menu = None legend = None win = None slideShow = None pong = None def __init__(self): print("PiGameConsole initiated") def preventScreensaver(self): while (self.keepRunning): if keyAvailable == True: keyboard.write('A', delay=0) time.sleep(10) def checkInput(self): btn = ButtonHandler() key = KeyReader() while (self.keepRunning): if btn.getButton(2) == True or key.getKey("1") == True: #print("Controller red") if self.slideShowBusy == True and self.slideShow != None: self.slideShow.stop() self.startPong() elif self.pongBusy == True and self.pong != None: self.pong.stop() self.startSlideShow() if btn.getButton(1) == True or key.getKey("2") == True: #print("Controller green") print("Controller green") if btn.getButton(4) == True or key.getKey("3") == True: #print("Player1 red") if self.pongBusy == True and self.pong != None: self.pong.move_player(1, "up") if btn.getButton(3) == True or key.getKey("4") == True: #print("Player1 green") if self.pongBusy == True and self.pong != None: self.pong.move_player(1, "down") if btn.getButton(6) == True or key.getKey("5") == True: #print("Player2 red") if self.pongBusy == True and self.pong != None: self.pong.move_player(2, "up") if btn.getButton(5) == True or key.getKey("6") == True: #print("Player2 green") if self.pongBusy == True and self.pong != None: self.pong.move_player(2, "down") time.sleep(0.1) def startGUI(self): # Start the GUI self.win = tk.Tk() self.win.title("PI Gaming console") self.win.attributes("-fullscreen", True) self.exitButton = tk.Button(self.win, text = "Quit", command = self.exitProgram) self.exitButton.grid(row = 0, column = 0, sticky=tk.NW, padx=(10, 0), pady=(10, 0)) self.menu = ConsoleMenu(self.win, 300, 250) self.menu.grid(row = 1, column = 0, sticky=tk.NW, padx=(10, 10), pady=(0, 0)) self.legend = Legend(self.win, 300, 400) self.legend.grid(row = 2, column = 0, sticky=tk.NW, padx=(10, 10), pady=(0, 0)) self.startSlideShow() self.win.mainloop() def exitProgram(self): self.keepRunning = False print "Finished" self.win.quit() def clearWindow(self): if self.slideShow != None: self.slideShow.stop() self.slideShow = None if self.pong != None: self.pong.stop() self.pong = None self.slideShowBusy = False self.pongBusy = False time.sleep(0.5) def startSlideShow(self): self.clearWindow() self.menu.setSelected(1) self.legend.setLegend(1) self.slideShow = SlideShow(self.win, self.win.winfo_screenwidth() - 300, self.win.winfo_screenheight() - 50) self.slideShow.grid(row = 0, column = 2, rowspan = 3, sticky=tk.NSEW, pady=(10, 10)) self.slideShowBusy = True def startPong(self): self.clearWindow() self.menu.setSelected(2) self.legend.setLegend(2) self.pong = PongGui(self.win, self.win.winfo_screenwidth() - 300, self.win.winfo_screenheight() - 50) self.pong.grid(row = 0, column = 2, rowspan = 3, sticky=tk.NSEW, pady=(10, 10)) self.pongBusy = True if __name__ == "__main__": piGameConsole = PiGameConsole() # Start a thread to check if a game is running piGameConsole.keepRunning = True thread.start_new_thread(piGameConsole.preventScreensaver, ()) thread.start_new_thread(piGameConsole.checkInput, ()) piGameConsole.startGUI()
apache-2.0
330,923,120,042,564,540
30.121951
116
0.508817
false
4.054011
false
false
false
xiiicyw/Data-Wrangling-with-MongoDB
Lesson_4_Problem_Set/03-Updating_Schema/update.py
1
3482
#!/usr/bin/env python # -*- coding: utf-8 -*- """ In this problem set you work with another type of infobox data, audit it, clean it, come up with a data model, insert it into a MongoDB and then run some queries against your database. The set contains data about Arachnid class. The data is already in the database. But you have been given a task to also include 'binomialAuthority' information in the data, so you have to go through the data and update the existing entries. The following things should be done in the function add_field: - process the csv file and extract 2 fields - 'rdf-schema#label' and 'binomialAuthority_label' - clean up the 'rdf-schema#label' same way as in the first exercise - removing redundant "(spider)" suffixes - return a dictionary, with 'label' being the key, and 'binomialAuthority_label' the value - if 'binomialAuthority_label' is "NULL", skip the item The following should be done in the function update_db: - query the database by using the field 'label' - update the data, by adding a new item under 'classification' with a key 'binomialAuthority' The resulting data should look like this: - the output structure should be as follows: { 'label': 'Argiope', 'uri': 'http://dbpedia.org/resource/Argiope_(spider)', 'description': 'The genus Argiope includes rather large and spectacular spiders that often ...', 'name': 'Argiope', 'synonym': ["One", "Two"], 'classification': { 'binomialAuthority': None, 'family': 'Orb-weaver spider', 'class': 'Arachnid', 'phylum': 'Arthropod', 'order': 'Spider', 'kingdom': 'Animal', 'genus': None } } """ import codecs import csv import json import pprint DATAFILE = 'arachnid.csv' FIELDS ={'rdf-schema#label': 'label', 'binomialAuthority_label': 'binomialAuthority'} def add_field(filename, fields): process_fields = fields.keys() data = {} with open(filename, "r") as f: reader = csv.DictReader(f) for i in range(3): l = reader.next() # YOUR CODE HERE for line in reader: # YOUR CODE HERE for field in process_fields: element = line[field] if field == 'rdf-schema#label' and element.find('(') != -1: element = element.split('(')[0].strip() key = line['rdf-schema#label'] value = line['binomialAuthority_label'] if value != "NULL": data[key] = value return data def update_db(data, db): # YOUR CODE HERE for element in data: query = db.arachnid.update({'label': element}, {"$set": {"classification.binomialAuthority" : data[element]}}) def test(): # Please change only the add_field and update_db functions! # Changes done to this function will not be taken into account # when doing a Test Run or Submit, they are just for your own reference # and as an example for running this code locally! data = add_field(DATAFILE, FIELDS) from pymongo import MongoClient client = MongoClient("mongodb://localhost:27017") db = client.examples update_db(data, db) updated = db.arachnid.find_one({'label': 'Opisthoncana'}) assert updated['classification']['binomialAuthority'] == 'Embrik Strand' pprint.pprint(data) if __name__ == "__main__": test()
agpl-3.0
-41,363,206,728,056,340
32.815534
108
0.63297
false
3.903587
false
false
false
yunhaowang/IDP-APA
utilities/py_idpapa_assign_sr.py
1
7753
#!/usr/bin/env python import sys,re,time,argparse from multiprocessing import cpu_count,Pool def main(args): # print >>sys.stdout, "Start analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S") output_gpd = args.output iso_list = get_iso_info(args.isoform) p = Pool(processes=args.cpu) csize = 100 results = p.imap(func=assignment,iterable=generate_tx(args.short_reads,iso_list),chunksize=csize) for res in results: if not res: continue output_gpd.write(res+"\n") output_gpd.close() # print >>sys.stdout, "Finish analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S") def generate_tx(input_sr,iso_list): z = 0 for line in input_sr: z += 1 yield (line,z,iso_list) # align first mate without splice alignment def align_first_mate_s(strand,iso_exon_start,iso_exon_end,sr_exon_start,sr_exon_end): indic = "mismatch" if strand == "+": if int(sr_exon_start.split(",")[0]) >= int(iso_exon_start.split(",")[-2]) and int(sr_exon_end.split(",")[0]) <= int(iso_exon_end.split(",")[-2]): indic = "match" else: indic = "mismatch" else: if int(sr_exon_start.split(",")[0]) >= int(iso_exon_start.split(",")[0]) and int(sr_exon_end.split(",")[0]) <= int(iso_exon_end.split(",")[0]): indic = "match" else: indic = "mismatch" return indic # align first mate with splice alignment def align_first_mate_m(strand,iso_exon_number,iso_exon_start,iso_exon_end,sr_exon_number,sr_exon_start,sr_exon_end): iso_junc_list = [] sr_junc_list = [] indic = "mismatch" for i in range(0,int(iso_exon_number)-1): iso_junc_list.append(iso_exon_end.split(",")[i]) iso_junc_list.append(iso_exon_start.split(",")[i+1]) iso_junc_set = "," + ",".join(iso_junc_list) + "," iso_whole_set = "," + iso_exon_start.split(",")[0] + iso_junc_set + iso_exon_end.split(",")[-2] + "," for i in range(0,int(sr_exon_number)-1): sr_junc_list.append(sr_exon_end.split(",")[i]) sr_junc_list.append(sr_exon_start.split(",")[i+1]) sr_junc_set = "," + ",".join(sr_junc_list) + "," if strand == "+": pattern = sr_junc_set + "$" if int(sr_exon_end.split(",")[-2]) <= int(iso_exon_end.split(",")[-2]) and re.search(pattern,iso_junc_set) and int(sr_exon_start.split(",")[0]) >= int(iso_whole_set.split(sr_junc_set)[0].split(",")[-1]): indic = "match" else: indic = "mismatch" else: pattern = "^" + sr_junc_set if int(sr_exon_start.split(",")[0]) >= int(iso_exon_start.split(",")[0]) and re.search(pattern,iso_junc_set) and int(sr_exon_end.split(",")[-2]) <= int(iso_whole_set.split(sr_junc_set)[1].split(",")[0]): indic = "match" else: indic = "mismatch" return indic # align second mate without splice alignment def align_second_mate_s(iso_exon_number,iso_exon_start,iso_exon_end,sr_exon_start,sr_exon_end): indic = "mismatch" if int(iso_exon_number) == 1: if int(sr_exon_start.split(",")[0]) >= int(iso_exon_start.split(",")[0]) and int(sr_exon_end.split(",")[0]) <= int(iso_exon_end.split(",")[0]): indic = "match" else: indic = "mismatch" else: for i in range(0,int(iso_exon_number)): if int(sr_exon_start.split(",")[0]) >= int(iso_exon_start.split(",")[i]) and int(sr_exon_end.split(",")[0]) <= int(iso_exon_end.split(",")[i]): indic = "match" break else: indic = "mismatch" return indic # align second mate with splice alignment def align_second_mate_m(iso_exon_number,iso_exon_start,iso_exon_end,sr_exon_number,sr_exon_start,sr_exon_end): iso_junc_list = [] sr_junc_list = [] indic = "mismatch" for i in range(0,int(iso_exon_number)-1): iso_junc_list.append(iso_exon_end.split(",")[i]) iso_junc_list.append(iso_exon_start.split(",")[i+1]) iso_junc_set = "," + ",".join(iso_junc_list) + "," iso_whole_set = "," + iso_exon_start.split(",")[0] + iso_junc_set + iso_exon_end.split(",")[-2] + "," for i in range(0,int(sr_exon_number)-1): sr_junc_list.append(sr_exon_end.split(",")[i]) sr_junc_list.append(sr_exon_start.split(",")[i+1]) sr_junc_set = "," + ",".join(sr_junc_list) + "," if re.search(sr_junc_set,iso_junc_set) and len(iso_whole_set.split(sr_junc_set)[0].split(","))%2 == 0 and int(sr_exon_start.split(",")[0]) >= int(iso_whole_set.split(sr_junc_set)[0].split(",")[-1]) and int(sr_exon_end.split(",")[-2]) <= int(iso_whole_set.split(sr_junc_set)[1].split(",")[0]): indic = "match" else: indic = "mismatch" return indic # extract pseudo isoform information def get_iso_info(iso_gpd): iso_list = [] for line in iso_gpd: iso_list.append(line.strip()) return iso_list iso_gpd.close() def assignment(inputs): (line,z,iso_list) = inputs read_id,chr,strand,start,end,mapq_1,sf_1,exon_number_1,exon_start_1,exon_end_1,mapq_2,sf_2,exon_number_2,exon_start_2,exon_end_2 = line.rstrip("\n").split("\t") sr_info = line.rstrip("\n") sr_polya_iso = [] for iso in iso_list: gene_id,isoform_id,iso_chr,iso_strand,tss,tts,cds_start,cds_end,exon_number,exon_start,exon_end = iso.split("\t") if iso_chr == chr and iso_strand == strand and int(tss) <= int(start) and int(tts) >= int(end) and int(exon_number) >= int(exon_number_1) and int(exon_number) >= int(exon_number_2): if int(exon_number_1) == 1 and int(exon_number_2) == 1: indic_1 = align_first_mate_s(strand,exon_start,exon_end,exon_start_1,exon_end_1) indic_2 = align_second_mate_s(exon_number,exon_start,exon_end,exon_start_2,exon_end_2) if indic_1 == "match" and indic_2 == "match": sr_polya_iso.append(isoform_id) elif int(exon_number_1) == 1 and int(exon_number_2) > 1: indic_1 = align_first_mate_s(strand,exon_start,exon_end,exon_start_1,exon_end_1) indic_2 = align_second_mate_m(exon_number,exon_start,exon_end,exon_number_2,exon_start_2,exon_end_2) if indic_1 == "match" and indic_2 == "match": sr_polya_iso.append(isoform_id) elif int(exon_number_1) > 1 and int(exon_number_2) == 1: indic_1 = align_first_mate_m(strand,exon_number,exon_start,exon_end,exon_number_1,exon_start_1,exon_end_1) indic_2 = align_second_mate_s(exon_number,exon_start,exon_end,exon_start_2,exon_end_2) if indic_1 == "match" and indic_2 == "match": sr_polya_iso.append(isoform_id) else: indic_1 = align_first_mate_m(strand,exon_number,exon_start,exon_end,exon_number_1,exon_start_1,exon_end_1) indic_2 = align_second_mate_m(exon_number,exon_start,exon_end,exon_number_2,exon_start_2,exon_end_2) if indic_1 == "match" and indic_2 == "match": sr_polya_iso.append(isoform_id) if sr_polya_iso != []: return line.rstrip("\n") + "\t" + ",".join(sr_polya_iso) else: return None def do_inputs(): output_gpd_format = ''' 1. read id 2. chromosome 3. strand 4. start site of alignment of fragment 5. end site of alignment of fragment 6. MAPQ of read1 (mate1) 7. Number of nucleotides that are softly-clipped by aligner (mate1) 8. exon number (mate1) 9. exon start set (mate1) 10. exon end set (mate1) 11. MAPQ of read1 (mate2) 12. Number of nucleotides that are softly-clipped by aligner (mate2) 13. exon number (mate2) 14. exon start set (mate2) 15. exon end set (mate2) 16. isoform set containing this polyA site''' parser = argparse.ArgumentParser(description="Function: assign the polyA sites identified by short reads to specific isoforms",formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-r','--short_reads',type=argparse.FileType('r'),required=True,help="Short reads gpd file") parser.add_argument('-i','--isoform',type=argparse.FileType('r'),required=True,help="Input: isoform gpd file") parser.add_argument('-o','--output',type=argparse.FileType('w'),required=True,help="Output: short reads with assigned isoforms") parser.add_argument('-p','--cpu',type=int,default=cpu_count(),help="Number of process") args = parser.parse_args() return args if __name__=="__main__": args = do_inputs() main(args)
apache-2.0
-6,389,944,135,169,664,000
43.815029
293
0.659229
false
2.449605
false
false
false
CybOXProject/python-cybox
cybox/objects/win_prefetch_object.py
1
2182
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. from mixbox import entities from mixbox import fields import cybox.bindings.win_prefetch_object as win_prefetch_binding from cybox.objects.device_object import Device from cybox.objects.win_volume_object import WinVolume from cybox.common import String, DateTime, Long, ObjectProperties class AccessedFileList(entities.EntityList): _binding = win_prefetch_binding _binding_class = win_prefetch_binding.AccessedFileListType _namespace = "http://cybox.mitre.org/objects#WinPrefetchObject-2" accessed_file = fields.TypedField("Accessed_File", String, multiple=True) class AccessedDirectoryList(entities.EntityList): _binding = win_prefetch_binding _binding_class = win_prefetch_binding.AccessedDirectoryListType _namespace = "http://cybox.mitre.org/objects#WinPrefetchObject-2" accessed_directory = fields.TypedField("Accessed_Directory", String, multiple=True) class Volume(entities.Entity): _binding = win_prefetch_binding _binding_class = win_prefetch_binding.VolumeType _namespace = "http://cybox.mitre.org/objects#WinPrefetchObject-2" volumeitem = fields.TypedField("VolumeItem", WinVolume, multiple=True) deviceitem = fields.TypedField("DeviceItem", Device, multiple=True) class WinPrefetch(ObjectProperties): _binding = win_prefetch_binding _binding_class = win_prefetch_binding.WindowsPrefetchObjectType _namespace = "http://cybox.mitre.org/objects#WinPrefetchObject-2" _XSI_NS = "WinPrefetchObj" _XSI_TYPE = "WindowsPrefetchObjectType" application_file_name = fields.TypedField("Application_File_Name", String) prefetch_hash = fields.TypedField("Prefetch_Hash", String) times_executed = fields.TypedField("Times_Executed", Long) first_run = fields.TypedField("First_Run", DateTime) last_run = fields.TypedField("Last_Run", DateTime) volume = fields.TypedField("Volume", WinVolume) accessed_file_list = fields.TypedField("Accessed_File_List", AccessedFileList) accessed_directory_list = fields.TypedField("Accessed_Directory_List", AccessedDirectoryList)
bsd-3-clause
-1,243,254,732,700,253,700
40.961538
97
0.762603
false
3.673401
false
false
false
Hannimal/raspicar
ps3Controller/ps3joy.py
1
1520
#!/usr/bin/env python # coding: Latin-1 import sys import smbus import time bus = smbus.SMBus(1) address = 0x2a try: pipe = open('/dev/input/js0', 'r') print('/dev/input/js0 Available') except: print('/dev/input/js0 not Available') sys.exit(0) msg = [] position = [0,0,0,0] def StringToBytes(val): retVal = [] for c in val: retVal.append(ord(c)) return retVal def sendData(val): try: #print(val) bus.write_i2c_block_data(address, 1, val) except: pass def getRange(device): status = bus.read_byte(device) #time.sleep(0.01) return status while 1: try: for char in pipe.read(1): msg += [char] #print(msg) if len(msg) == 8: # Button event if 6th byte is 1 if ord(msg[6]) == 1: position[3] = ord(msg[7]) position[2] = ord(msg[4]) print(getRange(address)) # Axis event if 6th byte is 2 if ord(msg[6]) == 2: # define Axis if ord(msg[7]) == 2: # define right joy position[0] = ord(msg[5]) if ord(msg[7]) == 1: # define left joy position[1] = ord(msg[5]) sendData(position) msg = [] except KeyboardInterrupt: sendData([0,0]) raise except: print ('Lost Connection') sendData([0,0]) sys.exit(0)
unlicense
6,163,764,360,669,899,000
23.126984
59
0.483553
false
3.478261
false
false
false
shouldmakemusic/yaas
controller/RedFrameController.py
1
3038
# Copyright (C) 2015 Manuel Hirschauer ([email protected]) # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # For questions regarding this module contact # Manuel Hirschauer <[email protected]> """ Control the behavior of the red frame """ from YaasController import * class RedFrameController (YaasController): """ Control the behavior of the red frame """ def __init__(self, yaas): YaasController.__init__(self, yaas) self.log.debug("(RedFrameController) init") def play_clip(self, params, value): """ Plays the xth clip in the red frame At the moment this works only for the track style red frame Has to be tested when triing different styles for the red frame @param params[0]: clip_number """ self.log.verbose("(RedFrameController) play_clip called") clip_number = params[0] self.log.verbose("(RedFrameController) for clip " + str(clip_number)) self.log.verbose("(RedFrameController) scene_offset: " + str(self.yaas.get_session()._scene_offset)) #if (clip_number > 4): """clip_number = clip_number -1""" clip_number = self.yaas.get_session()._scene_offset + clip_number self.log.verbose("(RedFrameController) calculated number " + str(clip_number)) self.song_helper().get_selected_track().fire(clip_number); def move_track_view_vertical(self, params, value): """ Moves the current position down or up @param params[0]: True ? down : up """ self.log.verbose("(RedFrameController) move_track_view_vertical called") down = params[0] self.log.verbose("(RedFrameController) down? " + str(down)) self.view_helper().move_track_view_vertical(down) def move_track_view_horizontal(self, params, value): """ Moves the red frame left or right @param params[0]: True ? right : left """ self.log.verbose("(RedFrameController) move_track_view_horizontal called") right = params[0] self.log.verbose("(RedFrameController) right? " + str(right)) self.view_helper().move_track_view_horizontal(right)
gpl-2.0
-3,350,587,529,291,833,000
38.973684
108
0.634628
false
4.077852
false
false
false
3dfxsoftware/cbss-addons
report_profit/wizard/wiz_trial_cost.py
1
3732
#!/usr/bin/python # -*- encoding: utf-8 -*- ########################################################################### # Module Writen to OpenERP, Open Source Management Solution # Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>). # All Rights Reserved # Credits###################################################### # Coded by: [email protected] # Planified by: Nhomar Hernandez # Audited by: Vauxoo C.A. ############################################################################# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ########################################################################## from openerp.osv import osv, fields import openerp.tools as tools from openerp.tools.translate import _ import openerp.netsvc as netsvc import time import datetime from mx.DateTime import * class trial_cost(osv.TransientModel): logger = netsvc.Logger() _name = "trial.cost" _columns = { 'date_start': fields.date('Start Date', required=True), 'period_length': fields.integer('Period length (days)', required=True), 'user_res_id': fields.many2one('res.users', 'Salesman'), 'partner_res_id': fields.many2one('res.partner', 'Partner'), 'cat_res_id': fields.many2one('product.category', 'Category'), 'u_check': fields.boolean('Check salesman?'), 'p_check': fields.boolean('Check partner?'), 'c_check': fields.boolean('Check category?'), } _defaults = { 'period_length': lambda *a: 30, } def action_print(self, cr, uid, ids, data, context=None): if context is None: context = {} data = {} data['ids'] = context.get('active_ids', []) data['model'] = context.get('active_model', 'ir.ui.menu') data['form'] = self.read(cr, uid, ids[0]) form = data['form'] if not form['u_check'] and not form['p_check'] and not form['c_check']: raise osv.except_osv(_('User Error'), _( 'You must check one box !')) res = {} period_length = data['form']['period_length'] if period_length <= 0: raise osv.except_osv(_('UserError'), _( 'You must enter a period length that cannot be 0 or below !')) start = datetime.date.fromtimestamp(time.mktime( time.strptime(data['form']['date_start'], "%Y-%m-%d"))) start = DateTime(int(start.year), int(start.month), int(start.day)) for i in range(4)[::-1]: stop = start - RelativeDateTime(days=period_length) res[str(i)] = { 'name': str((4-(i+1))*period_length) + '-' + str((4-i)*period_length), 'stop': start.strftime('%Y-%m-%d'), 'start': stop.strftime('%Y-%m-%d'), } start = stop - RelativeDateTime(days=1) data['form'].update(res) return {'type': 'ir.actions.report.xml', 'report_name': 'profit.trial.cost', 'datas': data} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
gpl-2.0
1,987,104,676,452,728,800
40.010989
79
0.55761
false
3.949206
false
false
false
gannetson/sportschooldeopenlucht
apps/fund/migrations/0002_add_recurring_direct_debit_payment.py
1
17485
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'RecurringDirectDebitPayment' db.create_table(u'fund_recurringdirectdebitpayment', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['accounts.BlueBottleUser'], unique=True)), ('active', self.gf('django.db.models.fields.BooleanField')(default=False)), ('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)), ('updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=35)), ('city', self.gf('django.db.models.fields.CharField')(max_length=35)), ('account', self.gf('apps.fund.fields.DutchBankAccountField')(max_length=10)), )) db.send_create_signal(u'fund', ['RecurringDirectDebitPayment']) def backwards(self, orm): # Deleting model 'RecurringDirectDebitPayment' db.delete_table(u'fund_recurringdirectdebitpayment') models = { u'accounts.bluebottleuser': { 'Meta': {'object_name': 'BlueBottleUser'}, 'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}), 'availability': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}), 'available_time': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'contribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}), 'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'cowry.payment': { 'Meta': {'object_name': 'Payment'}, 'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3'}), 'fee': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'payment_method_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}), 'payment_submethod_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '15', 'db_index': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}) }, u'fund.customvoucherrequest': { 'Meta': {'object_name': 'CustomVoucherRequest'}, 'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.BlueBottleUser']", 'null': 'True'}), 'contact_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}), 'contact_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'contact_phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}), 'number': ('django.db.models.fields.PositiveIntegerField', [], {}), 'organization': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}), 'value': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}) }, u'fund.donation': { 'Meta': {'object_name': 'Donation'}, 'amount': ('django.db.models.fields.PositiveIntegerField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'donation_type': ('django.db.models.fields.CharField', [], {'default': "'one_off'", 'max_length': '20', 'db_index': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.BlueBottleUser']", 'null': 'True', 'blank': 'True'}) }, u'fund.order': { 'Meta': {'object_name': 'Order'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'payments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'orders'", 'symmetrical': 'False', 'to': u"orm['cowry.Payment']"}), 'recurring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'current'", 'max_length': '20', 'db_index': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.BlueBottleUser']", 'null': 'True', 'blank': 'True'}) }, u'fund.orderitem': { 'Meta': {'object_name': 'OrderItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fund.Order']"}) }, u'fund.recurringdirectdebitpayment': { 'Meta': {'object_name': 'RecurringDirectDebitPayment'}, 'account': ('apps.fund.fields.DutchBankAccountField', [], {'max_length': '10'}), 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '35'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '35'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['accounts.BlueBottleUser']", 'unique': 'True'}) }, u'fund.voucher': { 'Meta': {'object_name': 'Voucher'}, 'amount': ('django.db.models.fields.PositiveIntegerField', [], {}), 'code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'currency': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}), 'donations': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['fund.Donation']", 'symmetrical': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}), 'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}), 'receiver': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'receiver'", 'null': 'True', 'to': u"orm['accounts.BlueBottleUser']"}), 'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'receiver_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'sender': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sender'", 'null': 'True', 'to': u"orm['accounts.BlueBottleUser']"}), 'sender_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'sender_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}) }, u'projects.partnerorganization': { 'Meta': {'object_name': 'PartnerOrganization'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, u'projects.project': { 'Meta': {'ordering': "['title']", 'object_name': 'Project'}, 'coach': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team_member'", 'null': 'True', 'to': u"orm['accounts.BlueBottleUser']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['accounts.BlueBottleUser']"}), 'partner_organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.PartnerOrganization']", 'null': 'True', 'blank': 'True'}), 'phase': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'popularity': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}) }, u'taggit.tag': { 'Meta': {'object_name': 'Tag'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, u'taggit.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"}) } } complete_apps = ['fund']
bsd-3-clause
2,081,662,664,729,660,000
83.883495
196
0.561796
false
3.66178
false
false
false
khosrow/metpx
sundew/lib/MasterConfigurator.py
1
10676
""" MetPX Copyright (C) 2004-2007 Environment Canada MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file named COPYING in the root of the source directory tree. """ """ ############################################################################################# # Name: MasterConfigurator.py # # Author: Daniel Lemay # # Date: 2007-11-15 # # Description: # ############################################################################################# """ import sys, os, os.path, commands, re, time, fnmatch import PXPaths from SystemManager import SystemManager from PXManager import PXManager class MasterConfigurator(object): def __init__(self, rootPath=""): if os.path.isdir('/users/dor/aspy/dan/data/master/'): self.rootPath = '/users/dor/aspy/dan/data/master/' # developpment machine elif rootPath: self.rootPath = os.path.normpath(rootPath) + '/' else: self.rootPath = '/apps/master/' # path under wich are the clusters and all the configs. files self.types = ['source', 'client', 'sourlient'] # Possible type of flows self.initAll() def initAll(self): self.clusters = [] # cluster names (same as dsh) self.dupSources = [] # Duplicate sources (when you combine sources from all clusters) self.dupClients = [] # Duplicate clients (when you combine clients from all clusters) self.dupSourlients = [] # Duplicate sourlients (when you combine sourlients from all clusters) self.dupFlows = [] # Duplicate flows (when you combine flows (sources, clients, sourlients) from all clusters) self.allSources = [] # All sources from all clusters (with duplicates removed) self.allClients = [] # All clients from all clusters (with duplicates removed) self.allSourlients = [] # All sourlients from all clusters (with duplicated removed) self.allFlows = [] # All flows (sources, clients, sourlients) from all clusters (with duplicated removed) self.sourceCluster = {} # A mapping from a source to it's cluster self.clientCluster = {} # A mapping from a client to it's cluster self.sourlientCluster = {} # A mapping from a sourlient to it's cluster self.flowCluster = {} # A mapping from a flow to it's cluster def printClusterInfos(self, flowCluster): keys = flowCluster.keys() keys.sort() for key in keys: print "%s: %s" % (key, flowCluster[key]) def setMachine(self, machine): self.machine = machine def setUser(self, user): self.user = user def setClusters(self, list): self.clusters = list def findClient(self, clusters=None, ip="", name=""): """ clusters: a list of clusters (ex: ['pds', 'px', 'pxatx']) ip: IP address (ex: '192.168.1.1') name: hostname (ex: 'metmgr') Only one argument in (ip, name) must be non null """ import socket clusters = clusters or self.clusters cliClust = [] if ip: try: # get the first part of the fully qualified domain name name = socket.gethostbyaddr(ip)[0].split('.')[0] except: pass elif name: try: ip = socket.gethostbyname(name) except: pass for cluster in clusters: clusterRoot = self.rootPath + cluster PXPaths.normalPaths(clusterRoot) if ip and name: command = "grep -l -E '%s|%s' %s" % (ip, name, PXPaths.TX_CONF + "*.conf") elif ip: command = "grep -l -E '%s' %s" % (ip, PXPaths.TX_CONF + "*.conf") elif name: command = "grep -l -E '%s' %s" % (name, PXPaths.TX_CONF + "*.conf") #print "%s" % cluster.upper() output = commands.getoutput(command) clients = [ (os.path.basename(cli)[:-5], cluster) for cli in output.split()] cliClust.extend(clients) PXPaths.normalPaths() # Reset PXPaths variables return cliClust def getTypeCluster(self, flow, init=False): """ When init is not False, it is a cluster list flow is the name of a client, source, sourlient return a list of tuple getTypeCluster('aftn') => [('sourlient', 'pxatx')] getTypeCluster('pds5') => [('source', 'pxatx')] getTypeCluster('metmgr3') => [('client', 'pds'), ('client', 'pxatx')] """ if init: self.initAll() self.clusters = init self.getAllFlows() return self.flowCluster.get(flow, []) def getType(self, flow, init=False): """ When init is not False, it is a cluster list flow is the name of a client, source, sourlient return type of the flow getType('aftn') => 'sourlient' getType('pds5') => 'source' getType('metmgr3') => 'client' """ if init: self.initAll() self.clusters = init self.getAllFlows() type_cluster = self.flowCluster.get(flow, []) if len(type_cluster) == 1: return type_cluster[0][0] else: return len(type_cluster) def getCluster(self, flow, init=False): """ When init is not False, it is a cluster list flow is the name of a client, source, sourlient return the cluster's name on which the flow is present or the number of clusters, if more than one. getCluster('aftn') => 'pxatx' getCluster('pds5') => 'pxatx' gettCluster('metmgr3') => 2 """ if init: self.initAll() self.clusters = init self.getAllFlows() type_cluster = self.flowCluster.get(flow, []) if len(type_cluster) == 1: return type_cluster[0][1] else: return len(type_cluster) def createFlowDict(self): mergedDict = SystemManager.mergeTwoDict(self.sourceCluster, self.clientCluster) return SystemManager.mergeTwoDict(mergedDict, self.sourlientCluster) def getAllFlows(self, noPrint=True): if noPrint: iprint = lambda *x: None else: iprint = lambda *x:sys.stdout.write(" ".join(map(str, x)) + '\n') allSources = [] allClients = [] allSourlients = [] allFlows = [] if not os.path.isdir(self.rootPath): return 1 for cluster in self.clusters: pxm = PXManager(self.rootPath + cluster + '/') if pxm.initNames(): #print (self.rootPath + cluster + " inexistant!") continue clients, sourlients, sources, aliases = pxm.getFlowNames(tuple=True) # Populate flowCluster for current cluster pxm.getFlowDict(self.sourceCluster, sources, 'source', cluster) pxm.getFlowDict(self.clientCluster, clients, 'client', cluster) pxm.getFlowDict(self.sourlientCluster, sourlients, 'sourlient', cluster) allSources.extend(sources) allClients.extend(clients) allSourlients.extend(sourlients) iprint("%s" % (80*'#')) iprint("CLUSTER %s" % cluster.upper()) iprint("%s" % (80*'#')) iprint("sources (%s): %s" % (len(sources), sources)) iprint("clients (%s): %s" % (len(clients), clients)) iprint("sourlients (%s): %s" % (len(sourlients), sourlients)) #print "aliases: %s" % aliases iprint() pxm = PXManager() pxm.initNames() self.flowCluster = self.createFlowDict() self.dupSources = pxm.identifyDuplicate(allSources) self.dupClients = pxm.identifyDuplicate(allClients) self.dupSourlients = pxm.identifyDuplicate(allSourlients) self.allSources = pxm.removeDuplicate(allSources) self.allClients = pxm.removeDuplicate(allClients) self.allSourlients = pxm.removeDuplicate(allSourlients) self.allFlows.extend(allSources) self.allFlows.extend(allClients) self.allFlows.extend(allSourlients) self.dupFlows = pxm.identifyDuplicate(allFlows) self.allFlows = pxm.removeDuplicate(allFlows) iprint("Duplicate between sources from all clusters: %s" % self.dupSources) iprint("Duplicate between clients from all clusters: %s" % self.dupClients) iprint("Duplicate between sourlients from all clusters: %s" % self.dupSourlients) iprint("Duplicate beetween flows (sources, clients, sourlients) from all clusters: %s" % self.dupFlows) iprint() keys = self.flowCluster.keys() keys.sort() for key in keys: if len(self.flowCluster[key]) > 1: iprint("%s: %s" % (key, self.flowCluster[key])) iprint("source cluster(%s)" % len(self.sourceCluster)) iprint(self.sourceCluster) iprint("client cluster(%s)" % len(self.clientCluster)) iprint(self.clientCluster) iprint("sourlient cluster(%s)" % len(self.sourlientCluster)) iprint(self.sourlientCluster) iprint("flow cluster(%s)" % len(self.flowCluster)) iprint() if __name__ == '__main__': mc = MasterConfigurator() mc.setClusters(['px', 'pds', 'pxatx']) mc.getAllFlows(noPrint=True) print("%s: %s" % ('metmgr1', mc.getTypeCluster('metmgr1'))) print mc.getType('metmgr1') print mc.getCluster('metmgr1') print("%s: %s" % ('aftn', mc.getTypeCluster('aftn'))) print("%s: %s" % ('pds5', mc.getTypeCluster('pds5'))) print("%s: %s" % ('metmgr3', mc.getTypeCluster('metmgr3'))) print mc.getType('metmgr3') print mc.getCluster('metmgr3') print("%s: %s" % ('px-stage', mc.getTypeCluster('px-stage'))) print mc.getType('px-stage') print mc.getCluster('px-stage') print("%s: %s" % ('pds_metser', mc.getTypeCluster('pds_metser'))) print mc.getType('pds_metser') print mc.getCluster('pds_metser') #print mc.sourceCluster #print mc.clientCluster #print mc.sourlientCluster #print mc.flowCluster mc1 = MasterConfigurator() print mc1.getType('metmgr1', ['px', 'pds', 'pxatx']) print mc1.getCluster('metmgr1') mc1.findClient(ip='199.212.17.60', clusters=['px', 'pxatx', 'pds'])
gpl-2.0
-6,967,763,091,637,412,000
35.941176
133
0.569314
false
3.757832
false
false
false
saicoco/mxnet_image_caption
old/main.py
1
6142
# -*- conding=utf-8 -*- """ train module """ import mxnet as mx import numpy as np import json import config import logging import time import collections from sym import vgg16_fc7, caption_module from data_provider import caption_dataIter, init_cnn from mxnet.model import save_checkpoint import argparse logging.basicConfig(level=logging.INFO) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--epoches', default=20, type=int, help="epoches in training-stage", dest='epoches') parser.add_argument('--batch_size', default=50, type=int, help="batch_size in training-stage", dest='batch_size') parser.add_argument('--num_hidden', default=256, type=int, help="the number of hidden unit", dest='num_hidden') parser.add_argument('--lr', default=0.01, type=float, help="learning rate in training-stage", dest='lr') parser.add_argument('--freq_val', default=5, type=int, help="frequence of validation", dest='freq_val') parser.add_argument('--num_embed', default=256, type=int, help="the number of embedding dimension", dest='num_embed') parser.add_argument('--num_lstm_layer', default=256, type=int, help="the number of hidden_unit", dest='num_lstm_layer') parser.add_argument('--gpu', default=None, type=str, help="wether run on gpu device", dest='gpu') parser.add_argument('--prefix', default='./checkpoint/train', type=str, help="prefix of save checkpoint", dest='prefix') parser.add_argument('--period', default=5, type=int, help="times to save checkpoint in training-stage", dest='period') return parser.parse_args() class callbacks: def __init__(self, nbatch, eval_metric, epoch): self.nbatch = nbatch self.eval_metric = eval_metric self.epoch = epoch def main(args): learning_rate = args.lr epoches = args.epoches batch_size = args.batch_size num_hidden = args.num_hidden num_embed = args.num_embed num_lstm_layer = args.num_lstm_layer freq_val = args.freq_val val_flag = True if args.freq_val > 0 else False ctx = mx.cpu(0) if args.gpu is None else mx.gpu(int(args.gpu)) prefix = args.prefix period = args.period with open(config.text_root, 'r') as f: captions = json.load(f) buckets = [10, 20, 30] # buckets = None train_data = caption_dataIter( captions=captions, batch_size=batch_size, mode='train') val_data = caption_dataIter( captions=captions, batch_size=batch_size, mode='val') ########################################################################## ########################### custom train process ######################### ########################################################################## cnn_shapes = { 'image_data': (batch_size, 3, 224, 224) } cnn_sym = vgg16_fc7('image_data') cnn_exec = cnn_sym.simple_bind(ctx=ctx, is_train=False, **cnn_shapes) lstm = caption_module(num_lstm_layer=num_lstm_layer, seq_len=train_data.sent_length+2, vocab_size=train_data.vocab_size, num_hidden=num_hidden, num_embed=num_embed, batch_size=batch_size) lstm_shapes = { 'image_feature': (batch_size, 4096), 'word_data': (batch_size, train_data.sent_length+2), 'softmax_label': (batch_size, train_data.sent_length+2) } lstm_exec = lstm.simple_bind( ctx=ctx, is_train=True, **lstm_shapes) # init params pretrain = mx.nd.load(config.vgg_pretrain) init_cnn(cnn_exec, pretrain) # init optimazer optimazer = mx.optimizer.create('adam') optimazer.lr = learning_rate updater = mx.optimizer.get_updater(optimazer) # init metric perplexity = mx.metric.Perplexity(ignore_label=-1) perplexity.reset() # callback params = callbacks(nbatch=0, eval_metric=perplexity, epoch=0) speedometer = mx.callback.Speedometer(batch_size=batch_size, frequent=20) for epoch in range(epoches): for i, batch in enumerate(train_data): # cnn forward, get image_feature cnn_exec.arg_dict['image_data'] = batch.data[0] cnn_exec.forward() image_feature = cnn_exec.outputs[0] # lstm forward lstm_exec.arg_dict['image_feature'] = image_feature lstm_exec.arg_dict['word_data'] = batch.data[1] lstm_exec.arg_dict['softmax_label'] = batch.label lstm_exec.forward(is_train=True) print batch.label params.eval_metric.update(labels=batch.label, preds=lstm_exec.outputs) lstm_exec.backward() params.epoch = epoch params.nbatch += 1 speedometer(params) for j, name in enumerate(lstm.list_arguments()): if name not in lstm_shapes.keys(): updater(j, lstm_exec.grad_dict[ name], lstm_exec.arg_dict[name]) train_data.reset() params.nbatch = 0 if val_flag and epoch % freq_val == 0: for i, batch in enumerate(val_data): # cnn forward, get image_feature cnn_exec.arg_dict['image_data'] = batch.data[0] cnn_exec.forward() image_feature = cnn_exec.outputs[0] # lstm forward lstm_exec.arg_dict['image_feature'] = image_feature lstm_exec.arg_dict['word_data'] = batch.data[1] lstm_exec.arg_dict['softmax_label'] = batch.label lstm_exec.forward(is_train=False) params.eval_metric.update(labels=batch.label, preds=lstm_exec.outputs) params.epoch = epoch params.nbatch += 1 speedometer(params) params.nbatch = 0 val_data.reset() if period: save_checkpoint(prefix=prefix, epoch=epoch, symbol=lstm, arg_params=lstm_exec.arg_dict, aux_params=lstm_exec.aux_dict) if __name__ == '__main__': args = parse_args() main(args)
mit
-3,475,309,289,568,188,000
38.371795
126
0.588733
false
3.625738
false
false
false
gghezzo/prettypython
PythonEveryDay2015/bigballs.py
1
1044
# Teaching Python Classes by Peter Farrell # From http://hackingmathclass.blogspot.com/2015/08/finally-some-class.html # Typer: Ginny C Ghezzo # What I learned: # why doesn't the first import bring in locals ?? import pygame from pygame.locals import * black = (0,0,0) white = (255,255,255) green = (0,255, 0) # ball position xcor = 100 ycor = 100 # velocity xvel = 2 yvel = 1 diameter = 20 pygame.init() screen = pygame.display.set_mode((600,500)) pygame.display.set_caption('Classy Balls') done = False # loop until close is clicked clock = pygame.time.Clock() # used to manage the screen updates while not done: for event in pygame.event.get(): if event.type == QUIT: done = True screen.fill(black) if xcor < 0 or xcor > 600 - diameter: xvel = -xvel # make it go the opposite direction if ycor < 0 or ycor > 500 - diameter: yvel = -yvel xcor += xvel ycor += yvel pygame.draw.ellipse(screen, white, [xcor,ycor,diameter,diameter]) pygame.display.update() clock.tick(120) pygame.quit()
mit
98,853,344,471,153,950
24.487805
75
0.681992
false
2.704663
false
false
false
panholt/sparkpy
sparkpy/models/webhook.py
1
2523
# -*- coding: utf-8 -*- from .base import SparkBase, SparkProperty from .time import SparkTime class SparkWebhook(SparkBase): ''' Cisco Spark Webhook Model :param session: SparkSession object :type session: `SparkSession` :param \**kwargs: All standard Spark API properties for a Webhook ''' # | Start of class attributes |-------------------------------------------| API_BASE = 'https://api.ciscospark.com/v1/webhooks/' WEBHOOK_RESOURCES = ['memberships', 'messages', 'rooms', 'all'] WEBHOOK_EVENTS = ['created', 'updated', 'deleted', 'all'] WEBHOOK_FILTERS = {'memberships': ['roomId', 'personId', 'personEmail', 'isModerator'], 'messages': ['roomId', 'roomType', 'personId', 'personEmail', 'mentionedPeople', 'hasFiles'], 'rooms': ['type', 'isLocked']} PROPERTIES = {'id': SparkProperty('id'), 'name': SparkProperty('name', mutable=True), 'targetUrl': SparkProperty('targetUrl', mutable=True), 'event': SparkProperty('event'), 'resource': SparkProperty('resource'), 'filter': SparkProperty('filter', optional=True), 'secret': SparkProperty('secret', optional=True), 'orgId': SparkProperty('orgId', optional=True), 'createdBy': SparkProperty('createdBy', optional=True), 'appId': SparkProperty('appId', optional=True), 'ownedBy': SparkProperty('ownedBy', optional=True), 'status': SparkProperty('status', optional=True), 'created': SparkProperty('created', optional=True)} # | Start of instance attributes |----------------------------------------| def __init__(self, *args, **kwargs): super().__init__(*args, path='webhooks', **kwargs) def update(self, name, targetUrl): data = {'name': name, 'targetUrl': targetUrl} self.parent.session.put(self.API_BASE, json=data) return def __repr__(self): return f'SparkWebhook("{self.id}")' def __str__(self): return f'SparkWebhook({self.name})'
mit
2,799,580,334,978,640,400
41.05
79
0.479191
false
4.769376
false
false
false
chris48s/UK-Polling-Stations
polling_stations/apps/feedback/migrations/0001_initial.py
1
1210
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django_extensions.db.fields class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Feedback', fields=[ ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)), ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(verbose_name='modified', auto_now=True)), ('found_useful', models.CharField(choices=[('YES', 'Yes'), ('NO', 'No')], max_length=100, blank=True)), ('comments', models.TextField(blank=True)), ('source_url', models.CharField(max_length=800, blank=True)), ('token', models.CharField(max_length=100, blank=True)), ], options={ 'ordering': ('-modified', '-created'), 'abstract': False, 'get_latest_by': 'modified', }, ), ]
bsd-3-clause
8,918,969,895,613,432,000
38.032258
124
0.567769
false
4.368231
false
false
false
Abjad/abjad
abjad/obgc.py
1
30314
import typing from . import _iterate, mutate, typings from .attach import attach, detach from .bundle import LilyPondFormatBundle from .duration import Duration from .overrides import LilyPondLiteral, tweak from .parentage import Parentage from .pitch.sets import PitchSet from .score import Chord, Container, Note, Voice from .select import Selection from .spanners import beam, slur from .tag import Tag class OnBeatGraceContainer(Container): r""" On-beat grace container. .. note:: On-beat grace containers must be included in a named voice. .. container:: example On-beat grace containers implement custom formatting not available in LilyPond: >>> music_voice = abjad.Voice("c'4 d'4 e'4 f'4", name="Music_Voice") >>> string = "<d' g'>8 a' b' c'' d'' c'' b' a' b' c'' d''" >>> container = abjad.on_beat_grace_container( ... string, music_voice[1:3], leaf_duration=(1, 24) ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> staff = abjad.Staff([music_voice]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t d' g' >8 * 1/3 - \accent [ ( a'8 * 1/3 b'8 * 1/3 c''8 * 1/3 d''8 * 1/3 c''8 * 1/3 b'8 * 1/3 a'8 * 1/3 b'8 * 1/3 c''8 * 1/3 d''8 * 1/3 ) ] } \context Voice = "Music_Voice" { \voiceTwo d'4 e'4 } >> \oneVoice f'4 } } """ ### CLASS VARIABLES ### __slots__ = ("_leaf_duration",) ### INITIALIZER ### def __init__( self, components=None, identifier: str = None, leaf_duration: typings.DurationTyping = None, name: str = None, tag: Tag = None, ) -> None: super().__init__(components, identifier=identifier, name=name, tag=tag) if leaf_duration is not None: leaf_duration = Duration(leaf_duration) self._leaf_duration = leaf_duration ### SPECIAL METHODS ### def __getnewargs__(self): """ Gets new after grace container arguments. Returns tuple of single empty list. """ return ([],) ### PRIVATE METHODS ### # NOTE: format="absolute_before" for \oneVoice so that this works: # # \oneVoice # \override Stem.direction = #down # # ... because this ... # # \override Stem.direction = #down # \oneVoice # # ... doesn't work. # # This is hackish, and some sort of longer term solution should # happen later. def _attach_lilypond_one_voice(self): anchor_leaf = self._get_on_beat_anchor_leaf() anchor_voice = Parentage(anchor_leaf).get(Voice) final_anchor_leaf = _iterate._get_leaf(anchor_voice, -1) next_leaf = _iterate._get_leaf(final_anchor_leaf, 1) literal = LilyPondLiteral(r"\oneVoice", format_slot="absolute_before") if next_leaf._has_indicator(literal): return if isinstance(next_leaf._parent, OnBeatGraceContainer): return if self._is_on_beat_anchor_voice(next_leaf._parent): return site = "abjad.OnBeatGraceContainer._attach_lilypond_one_voice()" tag = Tag(site) tag = tag.append(Tag("ONE_VOICE_COMMAND")) attach(literal, next_leaf, tag=tag) def _format_invocation(self): return r'\context Voice = "On_Beat_Grace_Container"' def _format_open_brackets_slot(self, bundle): indent = LilyPondFormatBundle.indent result = [] if self.identifier: open_bracket = f"{{ {self.identifier}" else: open_bracket = "{" brackets_open = [open_bracket] overrides = bundle.grob_overrides settings = bundle.context_settings if overrides or settings: contributions = [self._format_invocation(), r"\with", "{"] contributions = self._tag_strings(contributions) contributions = tuple(contributions) identifier_pair = ("context_brackets", "open") result.append((identifier_pair, contributions)) contributions = [indent + _ for _ in overrides] contributions = self._tag_strings(contributions) contributions = tuple(contributions) identifier_pair = ("overrides", "overrides") result.append((identifier_pair, contributions)) contributions = [indent + _ for _ in settings] contributions = self._tag_strings(contributions) contributions = tuple(contributions) identifier_pair = ("settings", "settings") result.append((identifier_pair, contributions)) contributions = [f"}} {brackets_open[0]}"] contributions = ["}", open_bracket] contributions = self._tag_strings(contributions) contributions = tuple(contributions) identifier_pair = ("context_brackets", "open") result.append((identifier_pair, contributions)) else: contribution = self._format_invocation() contribution += f" {brackets_open[0]}" contributions = [contribution] contributions = [self._format_invocation(), open_bracket] contributions = self._tag_strings(contributions) contributions = tuple(contributions) identifier_pair = ("context_brackets", "open") result.append((identifier_pair, contributions)) return tuple(result) def _get_on_beat_anchor_leaf(self): container = self._parent if container is None: return None if len(container) != 2: raise Exception("Combine on-beat grace container with one other voice.") if container.index(self) == 0: anchor_voice = container[-1] else: assert container.index(self) == 1 anchor_voice = container[0] anchor_leaf = Selection(anchor_voice).leaf(0, grace=False) return anchor_leaf @staticmethod def _is_on_beat_anchor_voice(CONTAINER): wrapper = CONTAINER._parent if wrapper is None: return False if not isinstance(CONTAINER, Voice): return False return OnBeatGraceContainer._is_on_beat_wrapper(wrapper) @staticmethod def _is_on_beat_wrapper(CONTAINER): if not CONTAINER.simultaneous: return False if len(CONTAINER) != 2: return False if isinstance(CONTAINER[0], OnBeatGraceContainer) and isinstance( CONTAINER[1], Voice ): return True if isinstance(CONTAINER[0], Voice) and isinstance( CONTAINER[1], OnBeatGraceContainer ): return True return False def _match_anchor_leaf(self): first_grace = _iterate._get_leaf(self, 0) if not isinstance(first_grace, (Note, Chord)): message = "must start with note or chord:\n" message += f" {repr(self)}" raise Exception(message) anchor_leaf = self._get_on_beat_anchor_leaf() if isinstance(anchor_leaf, (Note, Chord)) and isinstance( first_grace, (Note, Chord) ): if isinstance(first_grace, Note): chord = Chord(first_grace) mutate.replace(first_grace, chord) first_grace = chord selection = Selection(anchor_leaf) anchor_pitches = PitchSet.from_selection(selection) highest_pitch = list(sorted(anchor_pitches))[-1] if highest_pitch not in first_grace.note_heads: first_grace.note_heads.append(highest_pitch) grace_mate_head = first_grace.note_heads.get(highest_pitch) tweak(grace_mate_head).font_size = 0 tweak(grace_mate_head).transparent = True def _set_leaf_durations(self): if self.leaf_duration is None: return for leaf in Selection(self).leaves(): duration = leaf._get_duration() if duration != self.leaf_duration: multiplier = self.leaf_duration / duration leaf.multiplier = multiplier ### PUBLIC PROPERTIES ### @property def leaf_duration(self) -> typing.Optional[Duration]: """ Gets leaf duration. """ return self._leaf_duration ### FACTORY FUNCTIONS ### def on_beat_grace_container( contents, anchor_voice_selection, *, anchor_voice_number=2, do_not_beam=None, do_not_slash=None, do_not_slur=None, do_not_stop_polyphony=None, font_size=-3, grace_voice_number=1, leaf_duration=None, ): r""" Makes on-beat grace container and wraps around ``selection``. .. container:: example GRACE NOTES ABOVE. Note-to-note anchor: >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> string = "g'8 a' b' c'' d'' c'' b' a' b' c'' d''" >>> result = abjad.on_beat_grace_container( ... string, music_voice[1:3], leaf_duration=(1, 30) ... ) >>> staff = abjad.Staff([music_voice]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t d' g' >8 * 4/15 [ ( a'8 * 4/15 b'8 * 4/15 c''8 * 4/15 d''8 * 4/15 c''8 * 4/15 b'8 * 4/15 a'8 * 4/15 b'8 * 4/15 c''8 * 4/15 d''8 * 4/15 ) ] } \context Voice = "Music_Voice" { \voiceTwo d'4 e'4 } >> \oneVoice f'4 } } Note-to-chord anchor: >>> music_voice = abjad.Voice( ... "<a c'>4 <b d'> <c' e'> <d' f'>", name="Music_Voice" ... ) >>> string = "g'8 a' b' c'' d'' c'' b' a' b' c'' d''" >>> result = abjad.on_beat_grace_container( ... string, music_voice[1:3], leaf_duration=(1, 30) ... ) >>> staff = abjad.Staff([music_voice]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { <a c'>4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t d' g' >8 * 4/15 [ ( a'8 * 4/15 b'8 * 4/15 c''8 * 4/15 d''8 * 4/15 c''8 * 4/15 b'8 * 4/15 a'8 * 4/15 b'8 * 4/15 c''8 * 4/15 d''8 * 4/15 ) ] } \context Voice = "Music_Voice" { \voiceTwo <b d'>4 <c' e'>4 } >> \oneVoice <d' f'>4 } } Chord-to-note anchor: >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> string = "<g' b'>8 a' b' c'' d'' c'' b' a' b' c'' d''" >>> result = abjad.on_beat_grace_container( ... string, music_voice[1:3], leaf_duration=(1, 30) ... ) >>> staff = abjad.Staff([music_voice]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t d' g' b' >8 * 4/15 [ ( a'8 * 4/15 b'8 * 4/15 c''8 * 4/15 d''8 * 4/15 c''8 * 4/15 b'8 * 4/15 a'8 * 4/15 b'8 * 4/15 c''8 * 4/15 d''8 * 4/15 ) ] } \context Voice = "Music_Voice" { \voiceTwo d'4 e'4 } >> \oneVoice f'4 } } Chord-to-chord anchor: >>> music_voice = abjad.Voice( ... "<a c'>4 <b d'> <c' e'> <d' f'>", name="Music_Voice" ... ) >>> string = "<g' b'>8 a' b' c'' d'' c'' b' a' b' c'' d''" >>> result = abjad.on_beat_grace_container( ... string, music_voice[1:3], leaf_duration=(1, 30) ... ) >>> staff = abjad.Staff([music_voice]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { <a c'>4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t d' g' b' >8 * 4/15 [ ( a'8 * 4/15 b'8 * 4/15 c''8 * 4/15 d''8 * 4/15 c''8 * 4/15 b'8 * 4/15 a'8 * 4/15 b'8 * 4/15 c''8 * 4/15 d''8 * 4/15 ) ] } \context Voice = "Music_Voice" { \voiceTwo <b d'>4 <c' e'>4 } >> \oneVoice <d' f'>4 } } .. container:: example GRACE NOTES BELOW. Note-to-note anchor: >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> string = "g8 a b c' d' c' b a b c' d'" >>> result = abjad.on_beat_grace_container( ... string, ... music_voice[1:3], ... anchor_voice_number=1, ... grace_voice_number=2, ... leaf_duration=(1, 30), ... ) >>> staff = abjad.Staff([music_voice]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceTwo < g \tweak font-size 0 \tweak transparent ##t d' >8 * 4/15 [ ( a8 * 4/15 b8 * 4/15 c'8 * 4/15 d'8 * 4/15 c'8 * 4/15 b8 * 4/15 a8 * 4/15 b8 * 4/15 c'8 * 4/15 d'8 * 4/15 ) ] } \context Voice = "Music_Voice" { \voiceOne d'4 e'4 } >> \oneVoice f'4 } } Note-to-chord anchor: >>> music_voice = abjad.Voice( ... "<c' e'>4 <d' f'> <e' g'> <f' a'>", name="Music_Voice" ... ) >>> string = "g8 a b c' d' c' b a b c' d'" >>> result = abjad.on_beat_grace_container( ... string, ... music_voice[1:3], ... anchor_voice_number=1, ... grace_voice_number=2, ... leaf_duration=(1, 30), ... ) >>> staff = abjad.Staff([music_voice]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { <c' e'>4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceTwo < g \tweak font-size 0 \tweak transparent ##t f' >8 * 4/15 [ ( a8 * 4/15 b8 * 4/15 c'8 * 4/15 d'8 * 4/15 c'8 * 4/15 b8 * 4/15 a8 * 4/15 b8 * 4/15 c'8 * 4/15 d'8 * 4/15 ) ] } \context Voice = "Music_Voice" { \voiceOne <d' f'>4 <e' g'>4 } >> \oneVoice <f' a'>4 } } Chord-to-note anchor: >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> string = "<e g>8 a b c' d' c' b a b c' d'" >>> result = abjad.on_beat_grace_container( ... string, ... music_voice[1:3], ... anchor_voice_number=1, ... grace_voice_number=2, ... leaf_duration=(1, 30), ... ) >>> staff = abjad.Staff([music_voice]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceTwo < e g \tweak font-size 0 \tweak transparent ##t d' >8 * 4/15 [ ( a8 * 4/15 b8 * 4/15 c'8 * 4/15 d'8 * 4/15 c'8 * 4/15 b8 * 4/15 a8 * 4/15 b8 * 4/15 c'8 * 4/15 d'8 * 4/15 ) ] } \context Voice = "Music_Voice" { \voiceOne d'4 e'4 } >> \oneVoice f'4 } } Chord-to-chord anchor: >>> music_voice = abjad.Voice( ... "<c' e'>4 <d' f'> <e' g'> <f' a'>", name="Music_Voice" ... ) >>> string = "<e g>8 a b c' d' c' b a b c' d'" >>> result = abjad.on_beat_grace_container( ... string, ... music_voice[1:3], ... anchor_voice_number=1, ... grace_voice_number=2, ... leaf_duration=(1, 30), ... ) >>> staff = abjad.Staff([music_voice]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { <c' e'>4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceTwo < e g \tweak font-size 0 \tweak transparent ##t f' >8 * 4/15 [ ( a8 * 4/15 b8 * 4/15 c'8 * 4/15 d'8 * 4/15 c'8 * 4/15 b8 * 4/15 a8 * 4/15 b8 * 4/15 c'8 * 4/15 d'8 * 4/15 ) ] } \context Voice = "Music_Voice" { \voiceOne <d' f'>4 <e' g'>4 } >> \oneVoice <f' a'>4 } } .. container:: example Raises exception when duration of on-beat grace container exceeds duration of anchor container: >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> string = "g'8 a' b' c'' d'' c'' b' a' b' c'' d''" >>> result = abjad.on_beat_grace_container( ... string, music_voice[1:2], leaf_duration=(1, 8) ... ) Traceback (most recent call last): ... Exception: grace Duration(11, 8) exceeds anchor Duration(1, 4). """ def _site(n): return Tag(f"abjad.on_beat_grace_container({n})") assert isinstance(anchor_voice_selection, Selection) if not anchor_voice_selection.are_contiguous_same_parent( ignore_before_after_grace=True ): message = "selection must be contiguous in same parent:\n" message += f" {repr(anchor_voice_selection)}" raise Exception(message) on_beat_grace_container = OnBeatGraceContainer( contents, leaf_duration=leaf_duration ) if not isinstance(anchor_voice_selection, Selection): raise Exception(f"must be selection:\n {repr(anchor_voice_selection)}") anchor_leaf = _iterate._get_leaf(anchor_voice_selection, 0) anchor_voice = Parentage(anchor_leaf).get(Voice) if anchor_voice.name is None: raise Exception(f"anchor voice must be named:\n {repr(anchor_voice)}") anchor_voice_insert = Voice(name=anchor_voice.name) mutate.wrap(anchor_voice_selection, anchor_voice_insert) container = Container(simultaneous=True) mutate.wrap(anchor_voice_insert, container) container.insert(0, on_beat_grace_container) on_beat_grace_container._match_anchor_leaf() on_beat_grace_container._set_leaf_durations() insert_duration = anchor_voice_insert._get_duration() grace_container_duration = on_beat_grace_container._get_duration() if insert_duration < grace_container_duration: message = f"grace {repr(grace_container_duration)}" message += f" exceeds anchor {repr(insert_duration)}." raise Exception(message) if font_size is not None: string = rf"\set fontSize = #{font_size}" literal = LilyPondLiteral(string) attach(literal, on_beat_grace_container, tag=_site(1)) if not do_not_beam: beam(on_beat_grace_container[:]) if not do_not_slash: literal = LilyPondLiteral(r"\slash") attach(literal, on_beat_grace_container[0], tag=_site(2)) if not do_not_slur: slur(on_beat_grace_container[:]) voice_number_to_string = { 1: r"\voiceOne", 2: r"\voiceTwo", 3: r"\voiceThree", 4: r"\voiceFour", } first_grace = _iterate._get_leaf(on_beat_grace_container, 0) one_voice_literal = LilyPondLiteral(r"\oneVoice", format_slot="absolute_before") string = voice_number_to_string.get(grace_voice_number, None) if string is not None: literal detach(one_voice_literal, anchor_leaf) attach(LilyPondLiteral(string), first_grace, tag=_site(3)) string = voice_number_to_string.get(anchor_voice_number, None) if string is not None: detach(one_voice_literal, anchor_leaf) attach(LilyPondLiteral(string), anchor_leaf, tag=_site(4)) if not do_not_stop_polyphony: last_anchor_leaf = _iterate._get_leaf(anchor_voice_selection, -1) next_leaf = _iterate._get_leaf(last_anchor_leaf, 1) if next_leaf is not None: literal = LilyPondLiteral(r"\oneVoice", format_slot="absolute_before") attach(literal, next_leaf, tag=_site(5)) return on_beat_grace_container
gpl-3.0
6,486,861,756,668,666,000
33.565564
84
0.377911
false
4.506318
false
false
false
UB-info/estructura-datos
RafaelArqueroGimeno_S6/ABB_Rafael_Arquero_Gimeno.py
1
8132
import copy __author__ = "Rafael Arquero Gimeno" class Node(object): def __init__(self): self.data = [] self.left = None self.right = None def clear(self): """Empty Node""" self.data = [] self.left = None self.right = None def clearData(self): """Empty stored values""" self.data = [] def append(self, data): """Appends given value""" self.data.append(data) return self # allow method chaining def delete(self, data): """Deletes the given value from Node""" self.data.remove(data) @property def key(self): return self.data[0] if self else None @property def leftmost(self): return self.left.leftmost if self.left else self.key @property def rightmost(self): return self.right.rightmost if self.right else self.key @property def depth(self): if self: left_depth = self.left.depth if self.left else 0 right_depth = self.right.depth if self.right else 0 return 1 + max(left_depth, right_depth) else: return 0 def __copy__(self): """Returns a copy of self :rtype : Node """ result = Node() result.data = copy.copy(self.data) if self.left: result.left = copy.copy(self.left) if self.right: result.right = copy.copy(self.right) return result def __nonzero__(self): return bool(self.data) def __cmp__(self, other): return cmp(self.key, other.key) if isinstance(other, Node) else cmp(self.key, other) def __str__(self): return reduce(lambda x, y: x + str(y) + "\n", self.data, "") class ABB(object): def __init__(self): self.root = Node() def clear(self): """Empty the tree""" self.root.clear() def insert(self, data): """Insert a value in tree :param data: value to be inserted :return: self to allow method chaining """ if not self: self.root.append(data) return self parent, current = self._lookup(data) if current: # data equivalent node found! current.append(data) else: # equivalent node not found! setattr(parent, "right" if parent < data else "left", Node().append(data)) return self def delete(self, data, wholeNode=False): """Deletes the given Node or Value if it is contained, Therefore do nothing :type data: Node or ValueType (e.g. User) :type wholeNode: bool :param data: The node or value to delete :param wholeNode: if whole matched node should be deleted or only the matched value """ parent, current = self._lookup(data) if current: # data was found current.clearData() if wholeNode else current.delete(data) if not current: # we have deleted the last element from current node! if current.left and current.right: # 2 children newData = current.right.leftmost() current.clearData() current.append(newData) self.delete(newData) elif current.left: # only left child current.data = current.left.data current.right = current.left.right current.left = current.left.left # TODO elif current.right: # only right child current.data = current.right.data current.left = current.right.left current.right = current.right.right # TODO else: # no children if not parent: parent = self.root setattr(parent, "right" if parent < data else "left", None) def deleteLower(self, threshold, current=None, parent=None): """Deletes all values below threshold :param threshold: All values below that will be deleted :param current: The current inspected node (default root) :param parent: The parent of current node :return: self, allows method chaining """ if current is None: if self: current = self.root else: return self # break if current > threshold: if current.left: self.deleteLower(threshold, current.left, current) elif current < threshold: if current.right: current.data = current.right.data current.left = current.right.left current.right = current.right.right self.deleteLower(threshold, current, parent) else: if parent: parent.left = None # restart current else: self.clear() # restart root else: # equals current.left = None return self def deleteHigher(self, threshold, current=None, parent=None): """Deletes all values above threshold :param threshold: All values above that will be deleted :param current: The current inspected current (default root) :param parent: The parent of current node :return: self, allows method chaining """ if current is None: if self: current = self.root else: return self # break if current < threshold: if current.right: self.deleteHigher(threshold, current.right, current) elif current > threshold: if current.left: current.data = current.left.data current.right = current.left.right current.left = current.left.left self.deleteHigher(threshold, current, parent) else: if parent: parent.right = None # restart current else: self.clear() # restart root else: # equals current.right = None return self def _lookup(self, data): """Internal method. Finds the given value and return the node where it IS or where it SHOULD BE (i.e. None) and also his parent node. :rtype: Node, Node """ parent, current = None, self.root while current: if current < data: # data should be in right parent, current = current, current.right elif current > data: # data should be in left parent, current = current, current.left else: # equals return parent, current return parent, current @property def min(self): """Returns the minimum value of the tree""" return self.root.leftmost @property def max(self): """Returns the maximum value of the tree""" return self.root.rightmost @property def depth(self): return self.root.depth def __copy__(self): """Returns a copy of self :rtype : ABB """ result = ABB() result.root = copy.copy(self.root) return result def __nonzero__(self): """Returns false if the tree is empty, therefore returns true""" return self.root.__nonzero__() def __iter__(self, current=None): """Creates a generator that walks through the tree in descending order :param current: The current node :type current: Node """ if current is None: # first call current = self.root if current.right: for x in self.__iter__(current.right): yield x for x in current.data: yield x if current.left: for x in self.__iter__(current.left): yield x def __str__(self): return reduce(lambda x, y: x + str(y) + "\n", self, "")
mit
-3,202,886,532,054,247,400
30.280769
119
0.539843
false
4.568539
false
false
false
msaadat/paper
password_dlg.py
1
2143
from PyQt5.QtWidgets import (QApplication, QMessageBox, QDialog, QGridLayout, QHBoxLayout, QLabel, QPushButton, QLineEdit) class PasswordDialog(QDialog): def __init__(self, parent=None): super(PasswordDialog, self).__init__(parent) self.password = None okButton = QPushButton("&Ok") okButton.clicked.connect(self.ok_pressed) self.pass1_edit = QLineEdit() self.pass1_edit.setEchoMode(QLineEdit.Password) self.pass2_edit = QLineEdit() self.pass2_edit.setEchoMode(QLineEdit.Password) lable1 = QLabel("Password:") lable2 = QLabel("Repeat password:") buttonsLayout = QHBoxLayout() buttonsLayout.addStretch() buttonsLayout.addWidget(okButton) mainLayout = QGridLayout() mainLayout.addWidget(lable1, 0, 0) mainLayout.addWidget(self.pass1_edit, 0, 1) mainLayout.addWidget(lable2, 1, 0) mainLayout.addWidget(self.pass2_edit, 1, 1) mainLayout.addLayout(buttonsLayout, 2, 1) self.setLayout(mainLayout) self.setWindowTitle("Set Password") def ok_pressed(self): pass1 = self.pass1_edit.text() pass2 = self.pass2_edit.text() if pass1 != pass2: QMessageBox.warning(self, "Password", "Passwords do not match.") self.pass1_edit.setFocus() self.pass1_edit.selectAll() elif pass1 == '': QMessageBox.information(self, "Password", "Passwords cannot be empty.") self.pass1_edit.setFocus() self.pass1_edit.selectAll() else: self.password = pass1 self.accept() @staticmethod def getPassword(parent): dialog = PasswordDialog(parent) result = dialog.exec_() return dialog.password, result if __name__ == '__main__': import sys app = QApplication(sys.argv) window = PasswordDialog() window.show() sys.exit(app.exec_())
gpl-3.0
-6,078,596,863,458,804,000
29.057971
77
0.569295
false
4.145068
false
false
false
ircah/cah-js
util/convert-csv.py
1
1612
#!/usr/bin/env python3 import re import json # turns out the dump function of the json5 module just calls the normal json module (╯°□°)╯︵ ┻━┻ INPUT = "cards-DevOpsAgainstHumanity.csv" META_NAME = "DevOps Against Humanity" DELIM = "," QUOTE = "\"" SKIPLINES = 2 def parse_csv(line): a = [] tmp = "" at_elem_start = True in_quotes = False in_escape = False for c in line: if at_elem_start: if c == DELIM: # empty element a.append("") continue in_quotes = (c == QUOTE) if not in_quotes: tmp += c at_elem_start = False continue if c == QUOTE and in_quotes and not in_escape: in_escape = True elif c == QUOTE and in_quotes and in_escape: tmp += QUOTE in_escape = False elif (c == DELIM and in_quotes and in_escape) or (c == DELIM and not in_quotes): a.append(tmp) tmp = "" in_escape = False at_elem_start = True else: tmp += c a.append(tmp) return a r_blank = re.compile(r"_+") odict = {} odict["questions"] = [] odict["answers"] = [] odict["meta"] = {} odict["meta"]["name"] = META_NAME ifd = open(INPUT, "r") for i in range(SKIPLINES): ifd.readline() n = 0 while True: l = ifd.readline() if not l: break l = l.rstrip("\r\n") l = parse_csv(l) if l[0] != "": odict["answers"].append(l[0]) n += 1 if l[1] != "": tmp = {} tmp["text"] = re.sub(r_blank, "%s", l[1]) # pick is inferred from number of %s odict["questions"].append(tmp) n += 1 ifd.close() ofd = open(INPUT.replace(".csv", ".json5"), "w") json.dump(odict, ofd, indent=2, sort_keys=True) ofd.close() print("Processed %d cards." % (n, ))
mit
5,042,605,997,194,050,000
19.410256
108
0.600503
false
2.479751
false
false
false
miguelgrinberg/python-socketio
tests/common/test_pubsub_manager.py
1
13274
import functools import logging import unittest from unittest import mock import pytest from socketio import base_manager from socketio import pubsub_manager class TestPubSubManager(unittest.TestCase): def setUp(self): id = 0 def generate_id(): nonlocal id id += 1 return str(id) mock_server = mock.MagicMock() mock_server.eio.generate_id = generate_id self.pm = pubsub_manager.PubSubManager() self.pm._publish = mock.MagicMock() self.pm.set_server(mock_server) self.pm.host_id = '123456' self.pm.initialize() def test_default_init(self): assert self.pm.channel == 'socketio' self.pm.server.start_background_task.assert_called_once_with( self.pm._thread ) def test_custom_init(self): pubsub = pubsub_manager.PubSubManager(channel='foo') assert pubsub.channel == 'foo' assert len(pubsub.host_id) == 32 def test_write_only_init(self): mock_server = mock.MagicMock() pm = pubsub_manager.PubSubManager(write_only=True) pm.set_server(mock_server) pm.initialize() assert pm.channel == 'socketio' assert len(pm.host_id) == 32 assert pm.server.start_background_task.call_count == 0 def test_write_only_default_logger(self): pm = pubsub_manager.PubSubManager(write_only=True) pm.initialize() assert pm.channel == 'socketio' assert len(pm.host_id) == 32 assert pm._get_logger() == logging.getLogger('socketio') def test_write_only_with_provided_logger(self): test_logger = logging.getLogger('new_logger') pm = pubsub_manager.PubSubManager(write_only=True, logger=test_logger) pm.initialize() assert pm.channel == 'socketio' assert len(pm.host_id) == 32 assert pm._get_logger() == test_logger def test_emit(self): self.pm.emit('foo', 'bar') self.pm._publish.assert_called_once_with( { 'method': 'emit', 'event': 'foo', 'data': 'bar', 'namespace': '/', 'room': None, 'skip_sid': None, 'callback': None, 'host_id': '123456', } ) def test_emit_with_namespace(self): self.pm.emit('foo', 'bar', namespace='/baz') self.pm._publish.assert_called_once_with( { 'method': 'emit', 'event': 'foo', 'data': 'bar', 'namespace': '/baz', 'room': None, 'skip_sid': None, 'callback': None, 'host_id': '123456', } ) def test_emit_with_room(self): self.pm.emit('foo', 'bar', room='baz') self.pm._publish.assert_called_once_with( { 'method': 'emit', 'event': 'foo', 'data': 'bar', 'namespace': '/', 'room': 'baz', 'skip_sid': None, 'callback': None, 'host_id': '123456', } ) def test_emit_with_skip_sid(self): self.pm.emit('foo', 'bar', skip_sid='baz') self.pm._publish.assert_called_once_with( { 'method': 'emit', 'event': 'foo', 'data': 'bar', 'namespace': '/', 'room': None, 'skip_sid': 'baz', 'callback': None, 'host_id': '123456', } ) def test_emit_with_callback(self): with mock.patch.object( self.pm, '_generate_ack_id', return_value='123' ): self.pm.emit('foo', 'bar', room='baz', callback='cb') self.pm._publish.assert_called_once_with( { 'method': 'emit', 'event': 'foo', 'data': 'bar', 'namespace': '/', 'room': 'baz', 'skip_sid': None, 'callback': ('baz', '/', '123'), 'host_id': '123456', } ) def test_emit_with_callback_without_server(self): standalone_pm = pubsub_manager.PubSubManager() with pytest.raises(RuntimeError): standalone_pm.emit('foo', 'bar', callback='cb') def test_emit_with_callback_missing_room(self): with mock.patch.object( self.pm, '_generate_ack_id', return_value='123' ): with pytest.raises(ValueError): self.pm.emit('foo', 'bar', callback='cb') def test_emit_with_ignore_queue(self): sid = self.pm.connect('123', '/') self.pm.emit( 'foo', 'bar', room=sid, namespace='/', ignore_queue=True ) self.pm._publish.assert_not_called() self.pm.server._emit_internal.assert_called_once_with( '123', 'foo', 'bar', '/', None ) def test_can_disconnect(self): sid = self.pm.connect('123', '/') assert self.pm.can_disconnect(sid, '/') self.pm.can_disconnect(sid, '/foo') self.pm._publish.assert_called_once_with( {'method': 'disconnect', 'sid': sid, 'namespace': '/foo'} ) def test_close_room(self): self.pm.close_room('foo') self.pm._publish.assert_called_once_with( {'method': 'close_room', 'room': 'foo', 'namespace': '/'} ) def test_close_room_with_namespace(self): self.pm.close_room('foo', '/bar') self.pm._publish.assert_called_once_with( {'method': 'close_room', 'room': 'foo', 'namespace': '/bar'} ) def test_handle_emit(self): with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit: self.pm._handle_emit({'event': 'foo', 'data': 'bar'}) super_emit.assert_called_once_with( 'foo', 'bar', namespace=None, room=None, skip_sid=None, callback=None, ) def test_handle_emit_with_namespace(self): with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit: self.pm._handle_emit( {'event': 'foo', 'data': 'bar', 'namespace': '/baz'} ) super_emit.assert_called_once_with( 'foo', 'bar', namespace='/baz', room=None, skip_sid=None, callback=None, ) def test_handle_emit_with_room(self): with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit: self.pm._handle_emit( {'event': 'foo', 'data': 'bar', 'room': 'baz'} ) super_emit.assert_called_once_with( 'foo', 'bar', namespace=None, room='baz', skip_sid=None, callback=None, ) def test_handle_emit_with_skip_sid(self): with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit: self.pm._handle_emit( {'event': 'foo', 'data': 'bar', 'skip_sid': '123'} ) super_emit.assert_called_once_with( 'foo', 'bar', namespace=None, room=None, skip_sid='123', callback=None, ) def test_handle_emit_with_callback(self): host_id = self.pm.host_id with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit: self.pm._handle_emit( { 'event': 'foo', 'data': 'bar', 'namespace': '/baz', 'callback': ('sid', '/baz', 123), 'host_id': host_id, } ) assert super_emit.call_count == 1 assert super_emit.call_args[0] == ('foo', 'bar') assert super_emit.call_args[1]['namespace'] == '/baz' assert super_emit.call_args[1]['room'] is None assert super_emit.call_args[1]['skip_sid'] is None assert isinstance( super_emit.call_args[1]['callback'], functools.partial ) super_emit.call_args[1]['callback']('one', 2, 'three') self.pm._publish.assert_called_once_with( { 'method': 'callback', 'host_id': host_id, 'sid': 'sid', 'namespace': '/baz', 'id': 123, 'args': ('one', 2, 'three'), } ) def test_handle_callback(self): host_id = self.pm.host_id with mock.patch.object(self.pm, 'trigger_callback') as trigger: self.pm._handle_callback( { 'method': 'callback', 'host_id': host_id, 'sid': 'sid', 'namespace': '/', 'id': 123, 'args': ('one', 2), } ) trigger.assert_called_once_with('sid', 123, ('one', 2)) def test_handle_callback_bad_host_id(self): with mock.patch.object(self.pm, 'trigger_callback') as trigger: self.pm._handle_callback( { 'method': 'callback', 'host_id': 'bad', 'sid': 'sid', 'namespace': '/', 'id': 123, 'args': ('one', 2), } ) assert trigger.call_count == 0 def test_handle_callback_missing_args(self): host_id = self.pm.host_id with mock.patch.object(self.pm, 'trigger_callback') as trigger: self.pm._handle_callback( { 'method': 'callback', 'host_id': host_id, 'sid': 'sid', 'namespace': '/', 'id': 123, } ) self.pm._handle_callback( { 'method': 'callback', 'host_id': host_id, 'sid': 'sid', 'namespace': '/', } ) self.pm._handle_callback( {'method': 'callback', 'host_id': host_id, 'sid': 'sid'} ) self.pm._handle_callback( {'method': 'callback', 'host_id': host_id} ) assert trigger.call_count == 0 def test_handle_disconnect(self): self.pm._handle_disconnect( {'method': 'disconnect', 'sid': '123', 'namespace': '/foo'} ) self.pm.server.disconnect.assert_called_once_with( sid='123', namespace='/foo', ignore_queue=True ) def test_handle_close_room(self): with mock.patch.object( base_manager.BaseManager, 'close_room' ) as super_close_room: self.pm._handle_close_room({'method': 'close_room', 'room': 'foo'}) super_close_room.assert_called_once_with( room='foo', namespace=None ) def test_handle_close_room_with_namespace(self): with mock.patch.object( base_manager.BaseManager, 'close_room' ) as super_close_room: self.pm._handle_close_room( {'method': 'close_room', 'room': 'foo', 'namespace': '/bar'} ) super_close_room.assert_called_once_with( room='foo', namespace='/bar' ) def test_background_thread(self): self.pm._handle_emit = mock.MagicMock() self.pm._handle_callback = mock.MagicMock() self.pm._handle_disconnect = mock.MagicMock() self.pm._handle_close_room = mock.MagicMock() def messages(): import pickle yield {'method': 'emit', 'value': 'foo'} yield {'missing': 'method'} yield '{"method": "callback", "value": "bar"}' yield {'method': 'disconnect', 'sid': '123', 'namespace': '/foo'} yield {'method': 'bogus'} yield pickle.dumps({'method': 'close_room', 'value': 'baz'}) yield 'bad json' yield b'bad pickled' self.pm._listen = mock.MagicMock(side_effect=messages) try: self.pm._thread() except StopIteration: pass self.pm._handle_emit.assert_called_once_with( {'method': 'emit', 'value': 'foo'} ) self.pm._handle_callback.assert_called_once_with( {'method': 'callback', 'value': 'bar'} ) self.pm._handle_disconnect.assert_called_once_with( {'method': 'disconnect', 'sid': '123', 'namespace': '/foo'} ) self.pm._handle_close_room.assert_called_once_with( {'method': 'close_room', 'value': 'baz'} )
mit
408,921,371,623,106,050
33.035897
79
0.469715
false
4.027306
true
false
false
rschnapka/account-closing
account_cutoff_prepaid/account.py
1
6258
# -*- encoding: utf-8 -*- ############################################################################## # # Account Cut-off Prepaid module for OpenERP # Copyright (C) 2013 Akretion (http://www.akretion.com) # @author Alexis de Lattre <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import orm, fields from openerp.tools.translate import _ class account_invoice_line(orm.Model): _inherit = 'account.invoice.line' _columns = { 'start_date': fields.date('Start Date'), 'end_date': fields.date('End Date'), } def _check_start_end_dates(self, cr, uid, ids): for invline in self.browse(cr, uid, ids): if invline.start_date and not invline.end_date: raise orm.except_orm( _('Error:'), _("Missing End Date for invoice line with " "Description '%s'.") % (invline.name)) if invline.end_date and not invline.start_date: raise orm.except_orm( _('Error:'), _("Missing Start Date for invoice line with " "Description '%s'.") % (invline.name)) if invline.end_date and invline.start_date and \ invline.start_date > invline.end_date: raise orm.except_orm( _('Error:'), _("Start Date should be before or be the same as " "End Date for invoice line with Description '%s'.") % (invline.name)) # Note : we can't check invline.product_id.must_have_dates # have start_date and end_date here, because it would # block automatic invoice generation. So we do the check # upon validation of the invoice (see below the function # action_move_create) return True _constraints = [ (_check_start_end_dates, "Error msg in raise", ['start_date', 'end_date', 'product_id']), ] def move_line_get_item(self, cr, uid, line, context=None): res = super(account_invoice_line, self).move_line_get_item( cr, uid, line, context=context) res['start_date'] = line.start_date res['end_date'] = line.end_date return res class account_move_line(orm.Model): _inherit = "account.move.line" _columns = { 'start_date': fields.date('Start Date'), 'end_date': fields.date('End Date'), } def _check_start_end_dates(self, cr, uid, ids): for moveline in self.browse(cr, uid, ids): if moveline.start_date and not moveline.end_date: raise orm.except_orm( _('Error:'), _("Missing End Date for move line with Name '%s'.") % (moveline.name)) if moveline.end_date and not moveline.start_date: raise orm.except_orm( _('Error:'), _("Missing Start Date for move line with Name '%s'.") % (moveline.name)) if moveline.end_date and moveline.start_date and \ moveline.start_date > moveline.end_date: raise orm.except_orm( _('Error:'), _("Start Date should be before End Date for move line " "with Name '%s'.") % (moveline.name)) # should we check that it's related to an expense / revenue ? # -> I don't think so return True _constraints = [( _check_start_end_dates, "Error msg in raise", ['start_date', 'end_date'] )] class account_invoice(orm.Model): _inherit = 'account.invoice' def inv_line_characteristic_hashcode(self, invoice, invoice_line): '''Add start and end dates to hashcode used when the option "Group Invoice Lines" is active on the Account Journal''' code = super(account_invoice, self).inv_line_characteristic_hashcode( invoice, invoice_line) hashcode = '%s-%s-%s' % ( code, invoice_line.get('start_date', 'False'), invoice_line.get('end_date', 'False'), ) return hashcode def line_get_convert(self, cr, uid, x, part, date, context=None): res = super(account_invoice, self).line_get_convert( cr, uid, x, part, date, context=context) res['start_date'] = x.get('start_date', False) res['end_date'] = x.get('end_date', False) return res def action_move_create(self, cr, uid, ids, context=None): '''Check that products with must_have_dates=True have Start and End Dates''' for invoice in self.browse(cr, uid, ids, context=context): for invline in invoice.invoice_line: if invline.product_id and invline.product_id.must_have_dates: if not invline.start_date or not invline.end_date: raise orm.except_orm( _('Error:'), _("Missing Start Date and End Date for invoice " "line with Product '%s' which has the " "property 'Must Have Start and End Dates'.") % (invline.product_id.name)) return super(account_invoice, self).action_move_create( cr, uid, ids, context=context)
agpl-3.0
7,632,116,702,459,464,000
40.72
78
0.541067
false
4.147117
false
false
false
PaddlePaddle/Paddle
python/paddle/fluid/tests/unittests/hybrid_parallel_mp_clip_grad.py
1
1391
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division from __future__ import print_function import paddle import numpy as np from hybrid_parallel_mp_model import TestDistMPTraning import unittest import logging #log = logging.getLogger("HybridParallel") #log.setLevel(logging.WARNING) class TestMPClipGrad(TestDistMPTraning): def build_optimizer(self, model): grad_clip = paddle.nn.ClipGradByGlobalNorm(2.0) scheduler = paddle.optimizer.lr.ExponentialDecay( learning_rate=0.001, gamma=0.999, verbose=True) optimizer = paddle.optimizer.SGD(scheduler, grad_clip=grad_clip, parameters=model.parameters()) return optimizer if __name__ == "__main__": unittest.main()
apache-2.0
-7,350,561,299,220,626,000
33.775
74
0.698059
false
4.079179
false
false
false
zsiciarz/django-pgallery
pgallery/admin.py
1
2105
""" Administration for photos and galleries. """ from django.contrib import admin from django.db.models import Count from django.utils.translation import ugettext_lazy as _ from .forms import PhotoForm from .models import Gallery, Photo class PhotoInline(admin.TabularInline): """ Administration for photos. """ model = Photo form = PhotoForm ordering = ["created"] def get_extra(self, request, obj=None, **kwargs): return 0 if obj else 3 class GalleryAdmin(admin.ModelAdmin): """ Administration for galleries. """ list_display = ( "author", "title", "status", # Having "description" here raises SystemCheckError (admin.E108). # We need to remove description from list_display for Django 2.1-2.2 # See https://code.djangoproject.com/ticket/30543 # "description", "shot_date", "modified", "photo_count", ) list_display_links = ("title",) list_editable = ("status",) list_filter = ("status",) date_hierarchy = "shot_date" prepopulated_fields = {"slug": ("title",)} inlines = [PhotoInline] def photo_count(self, obj): return obj.photo_count photo_count.short_description = _("Photo count") def get_queryset(self, request): """ Add number of photos to each gallery. """ qs = super(GalleryAdmin, self).get_queryset(request) return qs.annotate(photo_count=Count("photos")) def save_model(self, request, obj, form, change): """ Set currently authenticated user as the author of the gallery. """ obj.author = request.user obj.save() def save_formset(self, request, form, formset, change): """ For each photo set it's author to currently authenticated user. """ instances = formset.save(commit=False) for instance in instances: if isinstance(instance, Photo): instance.author = request.user instance.save() admin.site.register(Gallery, GalleryAdmin)
mit
6,763,024,374,916,542,000
25.3125
76
0.614252
false
4.095331
false
false
false
dgketchum/satellite_image
sat_image/image.py
1
27285
# ============================================================================================= # Copyright 2017 dgketchum # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================================= import os import shutil from rasterio import open as rasopen from numpy import where, pi, cos, nan, inf, true_divide, errstate, log from numpy import float32, sin, deg2rad, array, isnan from shapely.geometry import Polygon, mapping from fiona import open as fiopen from fiona.crs import from_epsg from tempfile import mkdtemp from datetime import datetime from bounds import RasterBounds from sat_image import mtl class UnmatchedStackGeoError(ValueError): pass class InvalidObjectError(TypeError): pass class LandsatImage(object): ''' Object to process landsat images. The parent class: LandsatImage takes a directory containing untarred files, for now this ingests images that have been downloaded from USGS earth explorer, using our Landsat578 package. ''' def __init__(self, obj): ''' :param obj: Directory containing an unzipped Landsat 5, 7, or 8 image. This should include at least a tif for each band, and a .mtl file. ''' self.obj = obj if os.path.isdir(obj): self.isdir = True self.date_acquired = None self.file_list = os.listdir(obj) self.tif_list = [x for x in os.listdir(obj) if x.endswith('.TIF')] self.tif_list.sort() # parse metadata file into attributes # structure: {HEADER: {SUBHEADER: {key(attribute), val(attribute value)}}} self.mtl = mtl.parsemeta(obj) self.meta_header = list(self.mtl)[0] self.super_dict = self.mtl[self.meta_header] for key, val in self.super_dict.items(): for sub_key, sub_val in val.items(): # print(sub_key.lower(), sub_val) setattr(self, sub_key.lower(), sub_val) self.satellite = self.landsat_scene_id[:3] # create numpy nd_array objects for each band self.band_list = [] self.tif_dict = {} for i, tif in enumerate(self.tif_list): raster = os.path.join(self.obj, tif) # set all lower case attributes tif = tif.lower() front_ind = tif.index('b') end_ind = tif.index('.tif') att_string = tif[front_ind: end_ind] self.band_list.append(att_string) self.tif_dict[att_string] = raster self.band_count = i + 1 if i == 0: with rasopen(raster) as src: transform = src.transform profile = src.profile meta = src.meta.copy() self.rasterio_geometry = meta self.profile = profile self.transform = transform self.shape = (1, profile['height'], profile['width']) bounds = RasterBounds(affine_transform=transform, profile=profile, latlon=False) self.bounds = bounds self.north, self.west, self.south, self.east = bounds.get_nwse_tuple() self.coords = bounds.as_tuple('nsew') self.solar_zenith = 90. - self.sun_elevation self.solar_zenith_rad = self.solar_zenith * pi / 180 self.sun_elevation_rad = self.sun_elevation * pi / 180 self.earth_sun_dist = self.earth_sun_d(self.date_acquired) dtime = datetime.strptime(str(self.date_acquired), '%Y-%m-%d') julian_day = dtime.strftime('%j') self.doy = int(julian_day) self.scene_coords_deg = self._scene_centroid() self.scene_coords_rad = deg2rad(self.scene_coords_deg[0]), deg2rad(self.scene_coords_deg[1]) def _get_band(self, band_str): path = self.tif_dict[band_str] with rasopen(path) as src: arr = src.read(1) arr = array(arr, dtype=float32) arr[arr < 1.] = nan return arr def _scene_centroid(self): """ Compute image center coordinates :return: Tuple of image center in lat, lon """ ul_lat = self.corner_ul_lat_product ll_lat = self.corner_ll_lat_product ul_lon = self.corner_ul_lon_product ur_lon = self.corner_ur_lon_product lat = (ul_lat + ll_lat) / 2. lon = (ul_lon + ur_lon) / 2. return lat, lon @staticmethod def earth_sun_d(dtime): """ Earth-sun distance in AU :param dtime time, e.g. datetime.datetime(2007, 5, 1) :type datetime object :return float(distance from sun to earth in astronomical units) """ doy = int(dtime.strftime('%j')) rad_term = 0.9856 * (doy - 4) * pi / 180 distance_au = 1 - 0.01672 * cos(rad_term) return distance_au @staticmethod def _divide_zero(a, b, replace=0): with errstate(divide='ignore', invalid='ignore'): c = true_divide(a, b) c[c == inf] = replace return c def get_tile_geometry(self, output_filename=None, geographic_coords=False): if not output_filename: temp_dir = mkdtemp() temp = os.path.join(temp_dir, 'shape.shp') else: temp = output_filename # corners = {'ul': (self.corner_ul_projection_x_product, # self.corner_ul_projection_y_product), # 'll': (self.corner_ll_projection_x_product, # self.corner_ll_projection_y_product), # 'lr': (self.corner_lr_projection_x_product, # self.corner_lr_projection_y_product), # 'ur': (self.corner_ur_projection_x_product, # self.corner_ur_projection_y_product)} if geographic_coords: points = [(self.north, self.west), (self.south, self.west), (self.south, self.east), (self.north, self.east), (self.north, self.west)] else: points = [(self.west, self.north), (self.west, self.south), (self.east, self.south), (self.east, self.north), (self.west, self.north)] polygon = Polygon(points) schema = {'geometry': 'Polygon', 'properties': {'id': 'int'}} crs = from_epsg(int(self.rasterio_geometry['crs']['init'].split(':')[1])) with fiopen(temp, 'w', 'ESRI Shapefile', schema=schema, crs=crs) as shp: shp.write({ 'geometry': mapping(polygon), 'properties': {'id': 1}}) if output_filename: return None with fiopen(temp, 'r') as src: features = [f['geometry'] for f in src] if not output_filename: try: shutil.rmtree(temp_dir) except UnboundLocalError: pass return features def save_array(self, arr, output_filename): geometry = self.rasterio_geometry arr = arr.reshape(1, arr.shape[0], arr.shape[1]) geometry['dtype'] = arr.dtype with rasopen(output_filename, 'w', **geometry) as dst: dst.write(arr) return None def mask_by_image(self, arr): image = self._get_band('b1') image = array(image, dtype=float32) image[image < 1.] = nan arr = where(isnan(image), nan, arr) return arr def mask(self): image = self._get_band('b1') image = array(image, dtype=float32) image[image < 1.] = nan arr = where(isnan(image), 0, 1) return arr class Landsat5(LandsatImage): def __init__(self, obj): LandsatImage.__init__(self, obj) if self.satellite != 'LT5': raise ValueError('Must init Landsat5 object with Landsat5 data, not {}'.format(self.satellite)) # https://landsat.usgs.gov/esun self.ex_atm_irrad = (1958.0, 1827.0, 1551.0, 1036.0, 214.9, nan, 80.65) # old values from fmask.exe # self.ex_atm_irrad = (1983.0, 1796.0, 1536.0, 1031.0, 220.0, nan, 83.44) self.k1, self.k2 = 607.76, 1260.56 def radiance(self, band): qcal_min = getattr(self, 'quantize_cal_min_band_{}'.format(band)) qcal_max = getattr(self, 'quantize_cal_max_band_{}'.format(band)) l_min = getattr(self, 'radiance_minimum_band_{}'.format(band)) l_max = getattr(self, 'radiance_maximum_band_{}'.format(band)) qcal = self._get_band('b{}'.format(band)) rad = ((l_max - l_min) / (qcal_max - qcal_min)) * (qcal - qcal_min) + l_min return rad.astype(float32) def brightness_temp(self, band, temp_scale='K'): if band in [1, 2, 3, 4, 5, 7]: raise ValueError('LT5 brightness must be band 6') rad = self.radiance(band) brightness = self.k2 / (log((self.k1 / rad) + 1)) if temp_scale == 'K': return brightness elif temp_scale == 'F': return brightness * (9 / 5.0) - 459.67 elif temp_scale == 'C': return brightness - 273.15 else: raise ValueError('{} is not a valid temperature scale'.format(temp_scale)) def reflectance(self, band): """ :param band: An optical band, i.e. 1-5, 7 :return: At satellite reflectance, [-] """ if band == 6: raise ValueError('LT5 reflectance must be other than band 6') rad = self.radiance(band) esun = self.ex_atm_irrad[band - 1] toa_reflect = (pi * rad * self.earth_sun_dist ** 2) / (esun * cos(self.solar_zenith_rad)) return toa_reflect def albedo(self, model='smith'): """Finds broad-band surface reflectance (albedo) Smith (2010), “The heat budget of the earth’s surface deduced from space” LT5 toa reflectance bands 1, 3, 4, 5, 7 # normalized i.e. 0.356 + 0.130 + 0.373 + 0.085 + 0.07 = 1.014 Should have option for Liang, 2000; Tasumi (2008), "At-Surface Reflectance and Albedo from Satellite for Operational Calculation of Land Surface Energy Balance" :return albedo array of floats """ if model == 'smith': blue, red, nir, swir1, swir2 = (self.reflectance(1), self.reflectance(3), self.reflectance(4), self.reflectance(5), self.reflectance(7)) alb = (0.356 * blue + 0.130 * red + 0.373 * nir + 0.085 * swir1 + 0.072 * swir2 - 0.0018) / 1.014 elif model == 'tasumi': pass # add tasumi algorithm TODO return alb def saturation_mask(self, band, value=255): """ Mask saturated pixels, 1 (True) is saturated. :param band: Image band with dn values, type: array :param value: Maximum (saturated) value, i.e. 255 for 8-bit data, type: int :return: boolean array """ dn = self._get_band('b{}'.format(band)) mask = self.mask() mask = where((dn == value) & (mask > 0), True, False) return mask def ndvi(self): """ Normalized difference vegetation index. :return: NDVI """ red, nir = self.reflectance(3), self.reflectance(4) ndvi = self._divide_zero((nir - red), (nir + red), nan) return ndvi def lai(self): """ Leaf area index (LAI), or the surface area of leaves to surface area ground. Trezza and Allen, 2014 :param ndvi: normalized difference vegetation index [-] :return: LAI [-] """ ndvi = self.ndvi() lai = 7.0 * (ndvi ** 3) lai = where(lai > 6., 6., lai) return lai def emissivity(self, approach='tasumi'): ndvi = self.ndvi() if approach == 'tasumi': lai = self.lai() # Tasumi et al., 2003 # narrow-band emissivity nb_epsilon = where((ndvi > 0) & (lai <= 3), 0.97 + 0.0033 * lai, nan) nb_epsilon = where((ndvi > 0) & (lai > 3), 0.98, nb_epsilon) nb_epsilon = where(ndvi <= 0, 0.99, nb_epsilon) return nb_epsilon if approach == 'sobrino': # Sobrino et el., 2004 red = self.reflectance(3) bound_ndvi = where(ndvi > 0.5, ndvi, 0.99) bound_ndvi = where(ndvi < 0.2, red, bound_ndvi) pv = ((ndvi - 0.2) / (0.5 - 0.2)) ** 2 pv_emiss = 0.004 * pv + 0.986 emissivity = where((ndvi >= 0.2) & (ndvi <= 0.5), pv_emiss, bound_ndvi) return emissivity def land_surface_temp(self): """ Mean values from Allen (2007) :return: """ rp = 0.91 tau = 0.866 rsky = 1.32 epsilon = self.emissivity(approach='tasumi') radiance = self.radiance(6) rc = ((radiance - rp) / tau) - ((1 - epsilon) * rsky) lst = self.k2 / (log((epsilon * self.k1 / rc) + 1)) return lst def ndsi(self): """ Normalized difference snow index. :return: NDSI """ green, swir1 = self.reflectance(2), self.reflectance(5) ndsi = self._divide_zero((green - swir1), (green + swir1), nan) return ndsi class Landsat7(LandsatImage): def __init__(self, obj): LandsatImage.__init__(self, obj) if self.satellite != 'LE7': raise ValueError('Must init Landsat7 object with Landsat5 data, not {}'.format(self.satellite)) # https://landsat.usgs.gov/esun; Landsat 7 Handbook self.ex_atm_irrad = (1970.0, 1842.0, 1547.0, 1044.0, 255.700, nan, 82.06, 1369.00) self.k1, self.k2 = 666.09, 1282.71 def radiance(self, band): if band == 6: band = '6_vcid_1' qcal_min = getattr(self, 'quantize_cal_min_band_{}'.format(band)) qcal_max = getattr(self, 'quantize_cal_max_band_{}'.format(band)) l_min = getattr(self, 'radiance_minimum_band_{}'.format(band)) l_max = getattr(self, 'radiance_maximum_band_{}'.format(band)) qcal = self._get_band('b{}'.format(band)) rad = ((l_max - l_min) / (qcal_max - qcal_min)) * (qcal - qcal_min) + l_min return rad def brightness_temp(self, band=6, gain='low', temp_scale='K'): if band in [1, 2, 3, 4, 5, 7, 8]: raise ValueError('LE7 brightness must be either vcid_1 or vcid_2') if gain == 'low': # low gain : b6_vcid_1 band_gain = '6_vcid_1' else: band_gain = '6_vcid_2' rad = self.radiance(band_gain) brightness = self.k2 / (log((self.k1 / rad) + 1)) if temp_scale == 'K': return brightness elif temp_scale == 'F': return brightness * (9 / 5.0) - 459.67 elif temp_scale == 'C': return brightness - 273.15 else: raise ValueError('{} is not a valid temperature scale'.format(temp_scale)) def reflectance(self, band): """ :param band: An optical band, i.e. 1-5, 7 :return: At satellite reflectance, [-] """ if band in ['b6_vcid_1', 'b6_vcid_2']: raise ValueError('LE7 reflectance must not be b6_vcid_1 or b6_vcid_2') rad = self.radiance(band) esun = self.ex_atm_irrad[band - 1] toa_reflect = (pi * rad * self.earth_sun_dist ** 2) / (esun * cos(self.solar_zenith_rad)) return toa_reflect def albedo(self): """Finds broad-band surface reflectance (albedo) Smith (2010), “The heat budget of the earth’s surface deduced from space” Should have option for Liang, 2000; LE7 toa reflectance bands 1, 3, 4, 5, 7 # normalized i.e. 0.356 + 0.130 + 0.373 + 0.085 + 0.07 = 1.014 :return albedo array of floats """ blue, red, nir, swir1, swir2 = (self.reflectance(1), self.reflectance(3), self.reflectance(4), self.reflectance(5), self.reflectance(7)) alb = (0.356 * blue + 0.130 * red + 0.373 * nir + 0.085 * swir1 + 0.072 * swir2 - 0.0018) / 1.014 return alb def saturation_mask(self, band, value=255): """ Mask saturated pixels, 1 (True) is saturated. :param band: Image band with dn values, type: array :param value: Maximum (saturated) value, i.e. 255 for 8-bit data, type: int :return: boolean array """ dn = self._get_band('b{}'.format(band)) mask = where((dn == value) & (self.mask() > 0), True, False) return mask def ndvi(self): """ Normalized difference vegetation index. :return: NDVI """ red, nir = self.reflectance(3), self.reflectance(4) ndvi = self._divide_zero((nir - red), (nir + red), nan) return ndvi def lai(self): """ Leaf area index (LAI), or the surface area of leaves to surface area ground. Trezza and Allen, 2014 :param ndvi: normalized difference vegetation index [-] :return: LAI [-] """ ndvi = self.ndvi() lai = 7.0 * (ndvi ** 3) lai = where(lai > 6., 6., lai) return lai def emissivity(self, approach='tasumi'): ndvi = self.ndvi() if approach == 'tasumi': lai = self.lai() # Tasumi et al., 2003 # narrow-band emissivity nb_epsilon = where((ndvi > 0) & (lai <= 3), 0.97 + 0.0033 * lai, nan) nb_epsilon = where((ndvi > 0) & (lai > 3), 0.98, nb_epsilon) nb_epsilon = where(ndvi <= 0, 0.99, nb_epsilon) return nb_epsilon if approach == 'sobrino': # Sobrino et el., 2004 red = self.reflectance(3) bound_ndvi = where(ndvi > 0.5, ndvi, 0.99) bound_ndvi = where(ndvi < 0.2, red, bound_ndvi) pv = ((ndvi - 0.2) / (0.5 - 0.2)) ** 2 pv_emiss = 0.004 * pv + 0.986 emissivity = where((ndvi >= 0.2) & (ndvi <= 0.5), pv_emiss, bound_ndvi) return emissivity def land_surface_temp(self): rp = 0.91 tau = 0.866 rsky = 1.32 epsilon = self.emissivity() rc = ((self.radiance(6) - rp) / tau) - ((1 - epsilon) * rsky) lst = self.k2 / (log((epsilon * self.k1 / rc) + 1)) return lst def ndsi(self): """ Normalized difference snow index. :return NDSI """ green, swir1 = self.reflectance(2), self.reflectance(5) ndsi = self._divide_zero((green - swir1), (green + swir1), nan) return ndsi class Landsat8(LandsatImage): def __init__(self, obj): LandsatImage.__init__(self, obj) self.oli_bands = [1, 2, 3, 4, 5, 6, 7, 8, 9] def brightness_temp(self, band, temp_scale='K'): """Calculate brightness temperature of Landsat 8 as outlined here: http://landsat.usgs.gov/Landsat8_Using_Product.php T = K2 / log((K1 / L) + 1) and L = ML * Q + AL where: T = At-satellite brightness temperature (degrees kelvin) L = TOA spectral radiance (Watts / (m2 * srad * mm)) ML = Band-specific multiplicative rescaling factor from the metadata (RADIANCE_MULT_BAND_x, where x is the band number) AL = Band-specific additive rescaling factor from the metadata (RADIANCE_ADD_BAND_x, where x is the band number) Q = Quantized and calibrated standard product pixel values (DN) (ndarray img) K1 = Band-specific thermal conversion constant from the metadata (K1_CONSTANT_BAND_x, where x is the thermal band number) K2 = Band-specific thermal conversion constant from the metadata (K1_CONSTANT_BAND_x, where x is the thermal band number) Returns -------- ndarray: float32 ndarray with shape == input shape """ if band in self.oli_bands: raise ValueError('Landsat 8 brightness should be TIRS band (i.e. 10 or 11)') k1 = getattr(self, 'k1_constant_band_{}'.format(band)) k2 = getattr(self, 'k2_constant_band_{}'.format(band)) rad = self.radiance(band) brightness = k2 / log((k1 / rad) + 1) if temp_scale == 'K': return brightness elif temp_scale == 'F': return brightness * (9 / 5.0) - 459.67 elif temp_scale == 'C': return brightness - 273.15 else: raise ValueError('{} is not a valid temperature scale'.format(temp_scale)) def reflectance(self, band): """Calculate top of atmosphere reflectance of Landsat 8 as outlined here: http://landsat.usgs.gov/Landsat8_Using_Product.php R_raw = MR * Q + AR R = R_raw / cos(Z) = R_raw / sin(E) Z = 90 - E (in degrees) where: R_raw = TOA planetary reflectance, without correction for solar angle. R = TOA reflectance with a correction for the sun angle. MR = Band-specific multiplicative rescaling factor from the metadata (REFLECTANCE_MULT_BAND_x, where x is the band number) AR = Band-specific additive rescaling factor from the metadata (REFLECTANCE_ADD_BAND_x, where x is the band number) Q = Quantized and calibrated standard product pixel values (DN) E = Local sun elevation angle. The scene center sun elevation angle in degrees is provided in the metadata (SUN_ELEVATION). Z = Local solar zenith angle (same angle as E, but measured from the zenith instead of from the horizon). Returns -------- ndarray: float32 ndarray with shape == input shape """ if band not in self.oli_bands: raise ValueError('Landsat 8 reflectance should OLI band (i.e. bands 1-8)') elev = getattr(self, 'sun_elevation') dn = self._get_band('b{}'.format(band)) mr = getattr(self, 'reflectance_mult_band_{}'.format(band)) ar = getattr(self, 'reflectance_add_band_{}'.format(band)) if elev < 0.0: raise ValueError("Sun elevation must be non-negative " "(sun must be above horizon for entire scene)") rf = ((mr * dn.astype(float32)) + ar) / sin(deg2rad(elev)) return rf def radiance(self, band): """Calculate top of atmosphere radiance of Landsat 8 as outlined here: http://landsat.usgs.gov/Landsat8_Using_Product.php L = ML * Q + AL where: L = TOA spectral radiance (Watts / (m2 * srad * mm)) ML = Band-specific multiplicative rescaling factor from the metadata (RADIANCE_MULT_BAND_x, where x is the band number) AL = Band-specific additive rescaling factor from the metadata (RADIANCE_ADD_BAND_x, where x is the band number) Q = Quantized and calibrated standard product pixel values (DN) (ndarray img) Returns -------- ndarray: float32 ndarray with shape == input shape """ ml = getattr(self, 'radiance_mult_band_{}'.format(band)) al = getattr(self, 'radiance_add_band_{}'.format(band)) dn = self._get_band('b{}'.format(band)) rad = ml * dn.astype(float32) + al return rad def albedo(self): """Smith (2010), finds broad-band surface reflectance (albedo) Should have option for Liang, 2000; Tasumi, 2008; LC8 toa reflectance bands 2, 4, 5, 6, 7 # normalized i.e. 0.356 + 0.130 + 0.373 + 0.085 + 0.07 = 1.014 :return albedo array of floats """ blue, red, nir, swir1, swir2 = (self.reflectance(2), self.reflectance(4), self.reflectance(5), self.reflectance(6), self.reflectance(7)) alb = (0.356 * blue + 0.130 * red + 0.373 * nir + 0.085 * swir1 + 0.072 * swir2 - 0.0018) / 1.014 return alb def ndvi(self): """ Normalized difference vegetation index. :return: NDVI """ red, nir = self.reflectance(4), self.reflectance(5) ndvi = self._divide_zero((nir - red), (nir + red), nan) return ndvi def lai(self): """ Leaf area index (LAI), or the surface area of leaves to surface area ground. Trezza and Allen, 2014 :param ndvi: normalized difference vegetation index [-] :return: LAI [-] """ ndvi = self.ndvi() lai = 7.0 * (ndvi ** 3) lai = where(lai > 6., 6., lai) return lai def emissivity(self, approach='tasumi'): ndvi = self.ndvi() if approach == 'tasumi': lai = self.lai() # Tasumi et al., 2003 # narrow-band emissivity nb_epsilon = where((ndvi > 0) & (lai <= 3), 0.97 + 0.0033 * lai, nan) nb_epsilon = where((ndvi > 0) & (lai > 3), 0.98, nb_epsilon) nb_epsilon = where(ndvi <= 0, 0.99, nb_epsilon) return nb_epsilon if approach == 'sobrino': # Sobrino et el., 2004 red = self.reflectance(3) bound_ndvi = where(ndvi > 0.5, ndvi, 0.99) bound_ndvi = where(ndvi < 0.2, red, bound_ndvi) pv = ((ndvi - 0.2) / (0.5 - 0.2)) ** 2 pv_emiss = 0.004 * pv + 0.986 emissivity = where((ndvi >= 0.2) & (ndvi <= 0.5), pv_emiss, bound_ndvi) return emissivity def land_surface_temp(self): band = 10 k1 = getattr(self, 'k1_constant_band_{}'.format(band)) k2 = getattr(self, 'k2_constant_band_{}'.format(band)) rp = 0.91 tau = 0.866 rsky = 1.32 epsilon = self.emissivity() rc = ((self.radiance(band) - rp) / tau) - ((1 - epsilon) * rsky) lst = k2 / (log((epsilon * k1 / rc) + 1)) return lst def ndsi(self): """ Normalized difference snow index. :return: NDSI """ green, swir1 = self.reflectance(3), self.reflectance(6) ndsi = self._divide_zero((green - swir1), (green + swir1), nan) return ndsi # =============================================================================================
apache-2.0
-1,773,811,191,002,346,000
34.419481
109
0.541011
false
3.510942
false
false
false
gchq/gaffer-tools
python-shell/src/example.py
1
26676
# # Copyright 2016-2019 Crown Copyright # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from gafferpy import gaffer as g from gafferpy import gaffer_connector def run(host, verbose=False): return run_with_connector(create_connector(host, verbose)) def run_with_connector(gc): print() print('Running operations') print('--------------------------') print() get_schema(gc) get_filter_functions(gc) get_class_filter_functions(gc) get_element_generators(gc) get_object_generators(gc) get_operations(gc) get_serialised_fields(gc) get_store_traits(gc) is_operation_supported(gc) add_elements(gc) get_elements(gc) get_adj_seeds(gc) get_all_elements(gc) get_walks(gc) generate_elements(gc) generate_domain_objs(gc) generate_domain_objects_chain(gc) get_element_group_counts(gc) get_sub_graph(gc) export_to_gaffer_result_cache(gc) get_job_details(gc) get_all_job_details(gc) add_named_operation(gc) get_all_named_operations(gc) named_operation(gc) delete_named_operation(gc) add_named_view_summarise(gc) add_named_view_date_range(gc) get_all_named_views(gc) named_view_summarise(gc) named_view_date_range(gc) named_views(gc) delete_named_views(gc) sort_elements(gc) max_element(gc) min_element(gc) to_vertices_to_entity_seeds(gc) complex_op_chain(gc) op_chain_in_json(gc) def create_connector(host, verbose=False): return gaffer_connector.GafferConnector(host, verbose) def get_schema(gc): # Get Schema result = gc.execute_get( g.GetSchema() ) print('Schema:') print(result) print() def get_filter_functions(gc): # Get filter functions result = gc.execute_get( g.GetFilterFunctions() ) print('Filter Functions:') print(result) print() def get_class_filter_functions(gc): # Get class filter functions class_name = 'uk.gov.gchq.koryphe.impl.predicate.IsMoreThan' result = gc.execute_get( g.GetClassFilterFunctions(class_name=class_name) ) print('Class Filter Functions (IsMoreThan):') print(result) print() def get_element_generators(gc): # Get Element generators result = gc.execute_get( g.GetElementGenerators() ) print('Element generators:') print(result) print() def get_object_generators(gc): # Get Object generators result = gc.execute_get( g.GetObjectGenerators() ) print('Object generators:') print(result) print() def get_operations(gc): # Get operations result = gc.execute_get( g.GetOperations() ) print('Operations:') print(result) print() def get_serialised_fields(gc): # Get serialised fields class_name = 'uk.gov.gchq.koryphe.impl.predicate.IsMoreThan' result = gc.execute_get( g.GetSerialisedFields(class_name=class_name) ) print('Serialised Fields (IsMoreThan):') print(result) print() def get_store_traits(gc): # Get Store Traits result = gc.execute_get( g.GetStoreTraits() ) print('Store Traits:') print(result) print() def is_operation_supported(gc): # Is operation supported operation = 'uk.gov.gchq.gaffer.operation.impl.add.AddElements' result = gc.is_operation_supported( g.IsOperationSupported(operation=operation) ) print( '\nOperation supported ("uk.gov.gchq.gaffer.operation.impl.add.AddElements"):') print(result) print() def add_elements(gc): # Add Elements gc.execute_operation( g.AddElements( input=[ g.Entity( group='JunctionUse', vertex='M1:1', properties={ 'countByVehicleType': g.freq_map({ 'BUS': 10, 'CAR': 50 }), 'endDate': g.date(1034319600000), 'count': g.long(60), 'startDate': g.date(1034316000000) } ), g.Edge( group='RoadHasJunction', source='M1', destination='M1:1', directed=True, properties={} ) ] ) ) print('Elements have been added') print() def get_elements(gc): # Get Elements input = gc.execute_operation( g.GetElements( input=[ g.EntitySeed('M5:10'), # Edge input can be provided as follows g.EdgeSeed('M5:10', 'M5:11', g.DirectedType.EITHER), g.EdgeSeed('M5:10', 'M5:11', g.DirectedType.DIRECTED), # Or you can use True or False for the direction g.EdgeSeed('M5:10', 'M5:11', True) ], view=g.View( edges=[ g.ElementDefinition( group='RoadUse', group_by=[], transient_properties=[ g.Property('description', 'java.lang.String') ], pre_aggregation_filter_functions=[ g.PredicateContext( selection=['count'], predicate=g.IsMoreThan( value=g.long(1) ) ) ], transform_functions=[ g.FunctionContext( selection=['SOURCE', 'DESTINATION', 'count'], function=g.Function( class_name='uk.gov.gchq.gaffer.traffic.transform.DescriptionTransform' ), projection=['description'] ) ] ) ] ), directed_type=g.DirectedType.EITHER ) ) print('Related input') print(input) print() def get_adj_seeds(gc): # Adjacent Elements - chain 2 adjacent entities together adj_seeds = gc.execute_operations( [ g.GetAdjacentIds( input=[ g.EntitySeed( vertex='M5' ) ], view=g.View( edges=[ g.ElementDefinition( 'RoadHasJunction', group_by=[] ) ] ), include_incoming_out_going=g.InOutType.OUT ), g.GetAdjacentIds( view=g.View( edges=[ g.ElementDefinition( 'RoadUse', group_by=[] ) ] ), include_incoming_out_going=g.InOutType.OUT ) ] ) print('Adjacent entities - 2 hop') print(adj_seeds) print() def get_all_elements(gc): # Get all input, but limit the total results to 3 all_elements = gc.execute_operations( operations=[ g.GetAllElements(), g.Limit(result_limit=3) ] ) print('All input (Limited to first 3)') print(all_elements) print() def get_walks(gc): # Get walks from M32 traversing down RoadHasJunction then JunctionLocatedAt walks = gc.execute_operation( g.GetWalks( input=[ g.EntitySeed('M32'), ], operations=[ g.GetElements( view=g.View( edges=[ g.ElementDefinition( group='RoadHasJunction' ) ] ) ), g.GetElements( view=g.View( edges=[ g.ElementDefinition( group='JunctionLocatedAt' ) ] ) ) ] ) ) print( 'Walks from M32 traversing down RoadHasJunction then JunctionLocatedAt') print(walks) print() def generate_elements(gc): # Generate Elements input = gc.execute_operation( g.GenerateElements( element_generator=g.ElementGenerator( class_name='uk.gov.gchq.gaffer.traffic.generator.RoadTrafficStringElementGenerator' ), input=[ '"South West","E06000054","Wiltshire","6016","389200","179080","M4","LA Boundary","381800","180030","17","391646","179560","TM","E","2000","2000-05-03 00:00:00","7","0","9","2243","15","426","127","21","20","37","106","56","367","3060"' ] ) ) print('Generated input from provided domain input') print(input) print() def generate_domain_objs(gc): # Generate Domain Objects - single provided element input = gc.execute_operation( g.GenerateObjects( element_generator=g.ElementGenerator( class_name='uk.gov.gchq.gaffer.rest.example.ExampleDomainObjectGenerator' ), input=[ g.Entity('entity', '1'), g.Edge('edge', '1', '2', True) ] ) ) print('Generated input from provided input') print(input) print() def generate_domain_objects_chain(gc): # Generate Domain Objects - chain of get input then generate input input = gc.execute_operations( [ g.GetElements( input=[g.EntitySeed(vertex='M5')], seed_matching_type=g.SeedMatchingType.RELATED, view=g.View( edges=[ g.ElementDefinition( group='RoadHasJunction', group_by=[] ) ] ) ), g.GenerateObjects( element_generator=g.ElementGenerator( class_name='uk.gov.gchq.gaffer.rest.example.ExampleDomainObjectGenerator' ) ) ] ) print('Generated input from get input by seed') print(input) print() def get_element_group_counts(gc): # Get Elements group_counts = gc.execute_operations([ g.GetElements( input=[g.EntitySeed('M5')] ), g.CountGroups(limit=1000) ]) print('Groups counts (limited to 1000 input)') print(group_counts) print() def get_sub_graph(gc): # Export and Get to/from an in memory set entity_seeds = gc.execute_operations( [ g.GetAdjacentIds( input=[g.EntitySeed('South West')], include_incoming_out_going=g.InOutType.OUT ), g.ExportToSet(), g.GetAdjacentIds(include_incoming_out_going=g.InOutType.OUT), g.ExportToSet(), g.DiscardOutput(), g.GetSetExport() ] ) print('Export and Get to/from an in memory set') print(entity_seeds) print() def export_to_gaffer_result_cache(gc): # Export to Gaffer Result Cache and Get from Gaffer Result Cache job_details = gc.execute_operations( [ g.GetAdjacentIds( input=[g.EntitySeed('South West')], include_incoming_out_going=g.InOutType.OUT ), g.ExportToGafferResultCache(), g.DiscardOutput(), g.GetJobDetails() ] ) print('Export to Gaffer Result Cache. Job Details:') print(job_details) print() job_id = job_details['jobId'] entity_seeds = gc.execute_operation( g.GetGafferResultCacheExport(job_id=job_id), ) print('Get Gaffer Result Cache Export.') print(entity_seeds) print() def get_job_details(gc): # Get all job details job_details_initial = gc.execute_operations( [ g.GetAdjacentIds( input=[g.EntitySeed('1')], ), g.ExportToGafferResultCache(), g.DiscardOutput(), g.GetJobDetails() ] ) job_id = job_details_initial['jobId'] job_details = gc.execute_operation( g.GetJobDetails(job_id=job_id), ) print('Get job details') print(job_details) print() def get_all_job_details(gc): # Get all job details all_job_details = gc.execute_operation( g.GetAllJobDetails(), ) print('Get all job details (just prints the first 3 results)') print(all_job_details[:3]) print() def delete_named_operation(gc): gc.execute_operation( g.DeleteNamedOperation('2-hop-with-limit') ) print('Deleted named operation: 2-hop-with-limit') print() def add_named_operation(gc): gc.execute_operation( g.AddNamedOperation( operation_chain={ "operations": [{ "class": "uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds", "includeIncomingOutGoing": "OUTGOING" }, { "class": "uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds", "includeIncomingOutGoing": "OUTGOING" }, { "class": "uk.gov.gchq.gaffer.operation.impl.Limit", "resultLimit": "${param1}" }] }, operation_name='2-hop-with-limit', description='2 hop query with limit', overwrite_flag=True, read_access_roles=["read-user"], write_access_roles=["write-user"], parameters=[ g.NamedOperationParameter( name="param1", description="Limit param", default_value=1, value_class="java.lang.Long", required=False ) ] ) ) print('Added named operation: 2-hop-with-limit') print() def get_all_named_operations(gc): namedOperations = gc.execute_operation( g.GetAllNamedOperations() ) print('Named operations') print(namedOperations) print() def named_operation(gc): result = gc.execute_operation( g.NamedOperation( operation_name='2-hop-with-limit', parameters={ 'param1': 2 }, input=[ g.EntitySeed('M5') ] ) ) print('Execute named operation') print(result) print() def delete_named_views(gc): gc.execute_operation( g.DeleteNamedView(name='summarise') ) print('Deleted named view: summarise') gc.execute_operation( g.DeleteNamedView(name='dateRange') ) print('Deleted named view: dateRange') print() def add_named_view_summarise(gc): gc.execute_operation( g.AddNamedView( view=g.View( global_elements=[ g.GlobalElementDefinition(group_by=[]) ] ), name='summarise', description='Summarises all results (overrides the groupBy to an empty array).', overwrite_flag=True ) ) print('Added named view: summarise') print() def add_named_view_date_range(gc): gc.execute_operation( g.AddNamedView( view=g.View( global_elements=g.GlobalElementDefinition( pre_aggregation_filter_functions=[ g.PredicateContext( selection=['startDate'], predicate=g.InDateRange( start='${start}', end='${end}' ) ) ] ) ), name='dateRange', description='Filters results to a provided date range.', overwrite_flag=True, parameters=[ g.NamedViewParameter( name="start", description="A date string for the start of date range.", value_class="java.lang.String", required=False ), g.NamedViewParameter( name="end", description="A date string for the end of the date range.", value_class="java.lang.String", required=False ) ] ) ) print('Added named view: dateRange') print() def get_all_named_views(gc): namedViews = gc.execute_operation( g.GetAllNamedViews() ) print('Named views') print(namedViews) print() def named_view_summarise(gc): result = gc.execute_operation( g.GetElements( input=[ g.EntitySeed( vertex='M32:1' ) ], view=g.NamedView( name="summarise" ) ) ) print('Execute get elements with summarised named view') print(result) print() def named_view_date_range(gc): result = gc.execute_operation( g.GetElements( input=[ g.EntitySeed( vertex='M32:1' ) ], view=g.NamedView( name="dateRange", parameters={ 'start': '2005/05/03 06:00', 'end': '2005/05/03 09:00' } ) ) ) print('Execute get elements with date range named view') print(result) print() def named_views(gc): result = gc.execute_operation( g.GetElements( input=[ g.EntitySeed( vertex='M32:1' ) ], view=[ g.NamedView( name="summarise" ), g.NamedView( name="dateRange", parameters={ 'start': '2005/05/03 06:00', 'end': '2005/05/03 09:00' } ) ] ) ) print('Execute get elements with summarised and date range named views') print(result) print() def sort_elements(gc): # Get sorted Elements input = gc.execute_operations([ g.GetAllElements( view=g.View( edges=[ g.ElementDefinition( group='RoadUse', group_by=[] ) ] ) ), g.Sort( comparators=[ g.ElementPropertyComparator( groups=['RoadUse'], property='count' ) ], result_limit=5 ) ]) print('Sorted input') print(input) print() def max_element(gc): # Get sorted Elements input = gc.execute_operations([ g.GetAllElements( view=g.View( edges=[ g.ElementDefinition( group='RoadUse', group_by=[] ) ] ) ), g.Max( comparators=[ g.ElementPropertyComparator( groups=['RoadUse'], property='count' ) ] ) ]) print('Max element') print(input) print() def min_element(gc): # Get sorted Elements input = gc.execute_operations([ g.GetAllElements( view=g.View( edges=[ g.ElementDefinition( group='RoadUse', group_by=[] ) ] ) ), g.Min( comparators=[ g.ElementPropertyComparator( groups=['RoadUse'], property='count' ) ] ) ]) print('Min element') print(input) print() def to_vertices_to_entity_seeds(gc): # Get sorted Elements input = gc.execute_operations([ g.GetElements( input=[ g.EntitySeed( vertex='South West' ) ], view=g.View( edges=[ g.ElementDefinition( 'RegionContainsLocation', group_by=[] ) ] ), include_incoming_out_going=g.InOutType.OUT ), g.ToVertices( edge_vertices=g.EdgeVertices.DESTINATION, use_matched_vertex=g.UseMatchedVertex.OPPOSITE ), g.ToEntitySeeds(), g.GetElements( view=g.View( edges=[ g.ElementDefinition( 'LocationContainsRoad', group_by=[] ) ] ), include_incoming_out_going=g.InOutType.OUT ), g.Limit(5) ]) print('ToVertices then ToEntitySeeds') print(input) print() def complex_op_chain(gc): # All road junctions in the South West that were heavily used by buses in year 2000. junctions = gc.execute_operations( operations=[ g.GetAdjacentIds( input=[g.EntitySeed(vertex='South West')], view=g.View( edges=[ g.ElementDefinition( group='RegionContainsLocation', group_by=[] ) ] ) ), g.GetAdjacentIds( view=g.View( edges=[ g.ElementDefinition( group='LocationContainsRoad', group_by=[] ) ] ) ), g.ToSet(), g.GetAdjacentIds( view=g.View( edges=[ g.ElementDefinition( group='RoadHasJunction', group_by=[] ) ] ) ), g.GetElements( view=g.View( entities=[ g.ElementDefinition( group='JunctionUse', group_by=[], transient_properties=[ g.Property('busCount', 'java.lang.Long') ], pre_aggregation_filter_functions=[ g.PredicateContext( selection=['startDate'], predicate=g.InDateRange( start='2000/01/01', end='2001/01/01' ) ) ], post_aggregation_filter_functions=[ g.PredicateContext( selection=['countByVehicleType'], predicate=g.PredicateMap( predicate=g.IsMoreThan( value={'java.lang.Long': 1000}, or_equal_to=False ), key='BUS' ) ) ], transform_functions=[ g.FunctionContext( selection=['countByVehicleType'], function=g.FreqMapExtractor(key='BUS'), projection=['busCount'] ) ] ) ] ), include_incoming_out_going=g.InOutType.OUT ), g.ToCsv( element_generator=g.CsvGenerator( fields={ 'VERTEX': 'Junction', 'busCount': 'Bus Count' }, quoted=False ), include_header=True ) ] ) print( 'All road junctions in the South West that were heavily used by buses in year 2000.') print(junctions) print() def op_chain_in_json(gc): # Operation chain defined in json result = gc.execute_operation_chain( { "class": "uk.gov.gchq.gaffer.operation.OperationChain", "operations": [{ "class": "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements", }, { "class": "uk.gov.gchq.gaffer.operation.impl.CountGroups" }] } ) print('Operation chain defined in json') print(result) print() if __name__ == "__main__": run('http://localhost:8080/rest/latest', False)
apache-2.0
-6,482,966,961,602,001,000
26.700935
252
0.458315
false
4.535963
false
false
false
menegazzo/travispy
setup.py
2
1873
from setuptools import setup from setuptools.command.test import test as TestCommand import sys class PyTest(TestCommand): user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import pytest args = ['travispy'] if self.pytest_args: args.insert(0, self.pytest_args) errno = pytest.main(args) sys.exit(errno) setup( name='TravisPy', version='0.3.5', packages=['travispy', 'travispy.entities'], python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', install_requires=['requests'], # metadata for upload to PyPI author='Fabio Menegazzo', author_email='[email protected]', description='Python API for Travis CI.', long_description=open('README.rst').read(), license='GPL', keywords='travis ci continuous integration travisci', url='https://github.com/menegazzo/travispy', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', ], # tests tests_require=['pytest'], cmdclass={'test': PyTest}, )
gpl-3.0
-5,545,387,666,978,756,000
29.704918
75
0.608649
false
3.893971
true
false
false
tulikavijay/vms
vms/administrator/tests/test_report.py
1
16008
# third party from selenium import webdriver from selenium.common.exceptions import NoSuchElementException # Django from django.contrib.staticfiles.testing import LiveServerTestCase from django.db import IntegrityError # local Django from pom.locators.administratorReportPageLocators import * from pom.pages.administratorReportPage import AdministratorReportPage from pom.pages.authenticationPage import AuthenticationPage from shift.utils import ( create_admin, create_volunteer, create_organization_with_details, create_event_with_details, create_job_with_details, create_shift_with_details, log_hours_with_details, register_volunteer_for_shift_utility ) class Report(LiveServerTestCase): ''' ''' @classmethod def setUpClass(cls): cls.driver = webdriver.Firefox() cls.driver.implicitly_wait(5) cls.driver.maximize_window() cls.authentication_page = AuthenticationPage(cls.driver) cls.report_page = AdministratorReportPage(cls.driver) cls.elements = AdministratorReportPageLocators() super(Report, cls).setUpClass() def setUp(self): create_admin() self.login_admin() self.report_page.go_to_admin_report() def tearDown(self): pass @classmethod def tearDownClass(cls): cls.driver.quit() super(Report, cls).tearDownClass() def login_admin(self): self.authentication_page.server_url = self.live_server_url self.authentication_page.login({ 'username' : 'admin', 'password' : 'admin'}) def verify_shift_details(self, total_shifts, hours): total_no_of_shifts = self.report_page.get_shift_summary().split(' ')[10].strip('\nTotal') total_no_of_hours = self.report_page.get_shift_summary().split(' ')[-1].strip('\n') self.assertEqual(total_no_of_shifts, total_shifts) self.assertEqual(total_no_of_hours, hours) #Failing test case which has been documented #Test commented out to prevent travis build failure - bug #327 """def test_null_values_with_dataset(self): # register dataset org = create_organization_with_details('organization-one') volunteer = create_volunteer() volunteer.organization = org volunteer.save() # create shift and log hours # register event first to create job event = ['Hackathon', '2017-08-21', '2017-09-28'] created_event = create_event_with_details(event) # create job job = ['Developer', '2017-08-21', '2017-08-30', '',created_event] created_job = create_job_with_details(job) # create shift shift = ['2017-08-21', '09:00', '15:00', '10', created_job] created_shift = create_shift_with_details(shift) logged_shift = log_hours_with_details(volunteer, created_shift, "09:00", "12:00") report_page = self.report_page # check admin report with null fields, should return the above shift report_page.fill_report_form(['','','','','']) self.verify_shift_details('1','3.0') self.assertEqual(report_page.element_by_xpath( self.elements.NAME).text, created_event.name) self.assertEqual(report_page.element_by_xpath( self.elements.DATE).text, 'Aug. 21, 2016') self.assertEqual(report_page.element_by_xpath( self.elements.START_TIME).text, '9 a.m.') self.assertEqual(report_page.element_by_xpath( self.elements.END_TIME).text, '12 p.m.') self.assertEqual(report_page.element_by_xpath( self.elements.HOURS).text, '3.0')""" def test_null_values_with_empty_dataset(self): # should return no entries report_page = self.report_page report_page.fill_report_form(['','','','','']) self.assertEqual(report_page.get_alert_box_text(),report_page.no_results_message) def test_only_logged_shifts_are_reported(self): # register dataset org = create_organization_with_details('organization-one') volunteer = create_volunteer() volunteer.organization = org volunteer.save() # register event first to create job event = ['Hackathon', '2017-08-21', '2017-09-28'] created_event = create_event_with_details(event) # create job job = ['Developer', '2017-08-21', '2017-08-30', '',created_event] created_job = create_job_with_details(job) # create shift shift = ['2017-08-21', '09:00', '15:00', '10', created_job] created_shift = create_shift_with_details(shift) # shift is assigned to volunteer-one, but hours have not been logged volunteer_shift = register_volunteer_for_shift_utility(created_shift, volunteer) report_page = self.report_page # check admin report with null fields, should not return the above shift report_page.fill_report_form(['','','','','']) self.assertEqual(report_page.get_alert_box_text(),report_page.no_results_message) #Failing test case which has been documented - bug #327 #Test commented out to prevent travis build failure """def test_check_intersection_of_fields(self): self.create_dataset() report_page = self.report_page search_parameters_1 = ['tom','','','',''] report_page.fill_report_form(search_parameters_1) self.verify_shift_details('2','2.0') search_parameters_2 = ['','','','','org-one'] report_page.fill_report_form(search_parameters_2) self.verify_shift_details('3','3.0') search_parameters_3 = ['','','event-four','Two',''] report_page.fill_report_form(search_parameters_3) # 1 shift of 1:30 hrs self.verify_shift_details('1','1.5') search_parameters_4 = ['','','one','',''] report_page.fill_report_form(search_parameters_4) # 3 shifts of 0:30 hrs, 1:00 hrs, 1:00 hrs self.verify_shift_details('3','2.5') # check case-insensitive search_parameters_5 = ['','sherlock','two','',''] report_page.fill_report_form(search_parameters_5) self.verify_shift_details('1','2.0') def create_dataset(self): parameters = {'org' : 'org-one', 'volunteer' : { 'username' : 'uname1', 'password' : 'uname1', 'email' : '[email protected]', 'first_name' : 'tom-fname', 'last_name' : 'tom-lname', 'address' : 'address', 'city' : 'city', 'state' : 'state', 'country' : 'country', 'phone-no' : '9999999999'}, 'event' : { 'name' : 'event-four', 'start_date' : '2016-06-01', 'end_date' : '2016-06-10'}, 'job' : { 'name' : 'jobOneInEventFour', 'start_date' : '2016-06-01', 'end_date' : '2016-06-01'}, 'shift' : { 'date' : '2016-06-01', 'start_time' : '09:00', 'end_time' : '11:00', 'max_volunteers' : '10'}, 'vshift' : { 'start_time' : '09:30', 'end_time' : '10:00',}} self.register_dataset(parameters) parameters = {'org' : 'org-one', 'volunteer' : { 'username' : 'uname2', 'password' : 'uname2', 'email' : '[email protected]', 'first_name' : 'peter-fname', 'last_name' : 'peter-lname', 'address' : 'address', 'city' : 'city', 'state' : 'state', 'country' : 'country', 'phone-no' : '9999999999'}, 'event' : { 'name' : 'event-one', 'start_date' : '2016-06-01', 'end_date' : '2016-06-10'}, 'job' : { 'name' : 'jobOneInEventOne', 'start_date' : '2016-06-01', 'end_date' : '2016-06-01'}, 'shift' : { 'date' : '2016-06-01', 'start_time' : '18:00', 'end_time' : '23:00', 'max_volunteers' : '10'}, 'vshift' : { 'start_time' : '19:00', 'end_time' : '20:00'}} self.register_dataset(parameters) parameters = {'org' : 'org-one', 'volunteer' : { 'username' : 'uname3', 'password' : 'uname3', 'email' : '[email protected]', 'first_name' : 'tom-fname', 'last_name' : 'tom-lname', 'address' : 'address', 'city' : 'city', 'state' : 'state', 'country' : 'country', 'phone-no' : '9999999999'}, 'event' : { 'name' : 'event-four', 'start_date' : '2016-06-01', 'end_date' : '2016-06-10'}, 'job' : { 'name' : 'jobTwoInEventFour', 'start_date' : '2016-06-01', 'end_date' : '2016-06-01'}, 'shift' : { 'date' : '2016-06-01', 'start_time' : '09:00', 'end_time' : '15:00', 'max_volunteers' : '10'}, 'vshift' : { 'start_time' : '10:00', 'end_time' : '11:30'}} self.register_dataset(parameters) parameters = {'org' : 'org-two', 'volunteer' : { 'username' : 'uname4', 'password' : 'uname4', 'email' : '[email protected]', 'first_name' : 'harry-fname', 'last_name' : 'harry-lname', 'address' : 'address', 'city' : 'city', 'state' : 'state', 'country' : 'country', 'phone-no' : '9999999999'}, 'event' : { 'name' : 'event-one', 'start_date' : '2016-06-01', 'end_date' : '2016-06-10'}, 'job' : { 'name' : 'jobTwoInEventOne', 'start_date' : '2016-06-01', 'end_date' : '2016-06-01'}, 'shift' : { 'date' : '2016-06-01', 'start_time' : '09:00', 'end_time' : '11:00', 'max_volunteers' : '10'}, 'vshift' : { 'start_time' : '09:00', 'end_time' : '10:00'}} self.register_dataset(parameters) parameters = {'org' : 'org-two', 'volunteer' : { 'username' : 'uname5', 'password' : 'uname5', 'email' : '[email protected]', 'first_name' : 'harry-fname', 'last_name' : 'harry-lname', 'address' : 'address', 'city' : 'city', 'state' : 'state', 'country' : 'country', 'phone-no' : '9999999999'}, 'event' : { 'name' : 'event-two', 'start_date' : '2016-06-01', 'end_date' : '2016-06-10'}, 'job' : { 'name' : 'jobOneInEventTwo', 'start_date' : '2016-06-01', 'end_date' : '2016-06-01'}, 'shift' : { 'date' : '2016-06-01', 'start_time' : '09:00', 'end_time' : '18:00', 'max_volunteers' : '10'}, 'vshift' : { 'start_time' : '12:00', 'end_time' : '15:00'}} self.register_dataset(parameters) parameters = {'org' : 'org-three', 'volunteer' : { 'username' : 'uname6', 'password' : 'uname6', 'email' : '[email protected]', 'first_name' : 'sherlock-fname', 'last_name' : 'sherlock-lname', 'address' : 'address', 'city' : 'city', 'state' : 'state', 'country' : 'country', 'phone-no' : '9999999999'}, 'event' : { 'name' : 'event-two', 'start_date' : '2016-06-01', 'end_date' : '2016-06-10'}, 'job' : { 'name' : 'jobOneInEventTwo', 'start_date' : '2016-06-01', 'end_date' : '2016-06-01'}, 'shift' : { 'date' : '2016-06-01', 'start_time' : '09:00', 'end_time' : '16:00', 'max_volunteers' : '10'}, 'vshift' : { 'start_time' : '12:00', 'end_time' : '14:00'}} self.register_dataset(parameters) parameters = {'org' : 'org-four', 'volunteer' : { 'username' : 'uname7', 'password' : 'uname7', 'email' : '[email protected]', 'first_name' : 'harvey-fname', 'last_name' : 'harvey-lname', 'address' : 'address', 'city' : 'city', 'state' : 'state', 'country' : 'country', 'phone-no' : '9999999999'}, 'event' : { 'name' : 'event-one', 'start_date' : '2016-06-01', 'end_date' : '2016-06-10'}, 'job' : { 'name' : 'jobThreeInEventOne', 'start_date' : '2016-06-01', 'end_date' : '2016-06-01'}, 'shift' : { 'date' : '2016-06-01', 'start_time' : '09:00', 'end_time' : '13:00', 'max_volunteers' : '10'}, 'vshift' : { 'start_time' : '12:00', 'end_time' : '12:30'}} self.register_dataset(parameters) parameters = {'org' : 'org-four', 'volunteer' : { 'username' : 'uname8', 'password' : 'uname8', 'email' : '[email protected]', 'first_name' : 'mike-fname', 'last_name' : 'mike-lname', 'address' : 'address', 'city' : 'city', 'state' : 'state', 'country' : 'country', 'phone-no' : '9999999999'}, 'event' : { 'name' : 'event-three', 'start_date' : '2016-06-01', 'end_date' : '2016-06-10'}, 'job' : { 'name' : 'jobOneInEventThree', 'start_date' : '2016-06-01', 'end_date' : '2016-06-01'}, 'shift' : { 'date' : '2016-06-01', 'start_time' : '01:00', 'end_time' : '10:00', 'max_volunteers' : '10'}, 'vshift' : { 'start_time' : '01:00', 'end_time' : '04:00'}} self.register_dataset(parameters)"""
gpl-2.0
6,484,563,254,342,688,000
37.760291
97
0.446777
false
3.889213
true
false
false
uclouvain/osis_louvain
base/models/offer.py
1
2121
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from django.db import models from osis_common.models.serializable_model import SerializableModel, SerializableModelAdmin class OfferAdmin(SerializableModelAdmin): list_display = ('id', 'title', 'changed') search_fields = ['title'] class Offer(SerializableModel): external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True) changed = models.DateTimeField(null=True, auto_now=True) title = models.CharField(max_length=255) def __str__(self): return "{} {}".format(self.id, self.title) class Meta: permissions = ( ("can_access_offer", "Can access offer"), ("can_access_catalog", "Can access catalog"), ) def find_by_id(offer_id): try: return Offer.objects.get(pk=offer_id) except Offer.DoesNotExist: return None
agpl-3.0
137,301,873,159,780,320
38.259259
91
0.649528
false
4.007561
false
false
false
django-oscar/django-oscar-mws
oscar_mws/migrations/0002_auto__add_field_fulfillmentorderline_shipment__add_field_fulfillmentor.py
1
34713
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'FulfillmentOrderLine.shipment' db.add_column('oscar_mws_fulfillmentorderline', 'shipment', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='order_lines', null=True, to=orm['oscar_mws.FulfillmentShipment']), keep_default=False) # Adding field 'FulfillmentOrderLine.package' db.add_column('oscar_mws_fulfillmentorderline', 'package', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='order_lines', null=True, to=orm['oscar_mws.ShipmentPackage']), keep_default=False) # Changing field 'FulfillmentOrderLine.line' db.alter_column('oscar_mws_fulfillmentorderline', 'line_id', self.gf('django.db.models.fields.related.OneToOneField')(unique=True, to=orm['order.Line'])) # Adding unique constraint on 'FulfillmentOrderLine', fields ['line'] db.create_unique('oscar_mws_fulfillmentorderline', ['line_id']) # Adding field 'ShipmentPackage.package_number' db.add_column('oscar_mws_shipmentpackage', 'package_number', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Removing unique constraint on 'FulfillmentOrderLine', fields ['line'] db.delete_unique('oscar_mws_fulfillmentorderline', ['line_id']) # Deleting field 'FulfillmentOrderLine.shipment' db.delete_column('oscar_mws_fulfillmentorderline', 'shipment_id') # Deleting field 'FulfillmentOrderLine.package' db.delete_column('oscar_mws_fulfillmentorderline', 'package_id') # Changing field 'FulfillmentOrderLine.line' db.alter_column('oscar_mws_fulfillmentorderline', 'line_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['order.Line'])) # Deleting field 'ShipmentPackage.package_number' db.delete_column('oscar_mws_shipmentpackage', 'package_number') models = { 'address.country': { 'Meta': {'ordering': "('-display_order', 'name')", 'object_name': 'Country'}, 'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}), 'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}), 'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}), 'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'catalogue.attributeentity': { 'Meta': {'object_name': 'AttributeEntity'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"}) }, 'catalogue.attributeentitytype': { 'Meta': {'object_name': 'AttributeEntityType'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}) }, 'catalogue.attributeoption': { 'Meta': {'object_name': 'AttributeOption'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'option': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'catalogue.attributeoptiongroup': { 'Meta': {'object_name': 'AttributeOptionGroup'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'catalogue.category': { 'Meta': {'ordering': "['full_name']", 'object_name': 'Category'}, 'depth': ('django.db.models.fields.PositiveIntegerField', [], {}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}) }, 'catalogue.option': { 'Meta': {'object_name': 'Option'}, 'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'}) }, 'catalogue.product': { 'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'}, 'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}), 'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}), 'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}), 'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}), 'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}), 'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}), 'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}) }, 'catalogue.productattribute': { 'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'}, 'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}), 'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}), 'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, 'catalogue.productattributevalue': { 'Meta': {'object_name': 'ProductAttributeValue'}, 'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}), 'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}), 'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}), 'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, 'catalogue.productcategory': { 'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}) }, 'catalogue.productclass': { 'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}), 'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}), 'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'catalogue.productrecommendation': { 'Meta': {'object_name': 'ProductRecommendation'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}), 'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'order.billingaddress': { 'Meta': {'object_name': 'BillingAddress'}, 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}) }, 'order.line': { 'Meta': {'object_name': 'Line'}, 'est_dispatch_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'line_price_before_discounts_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}), 'line_price_before_discounts_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}), 'line_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}), 'line_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': "orm['order.Order']"}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['partner.Partner']"}), 'partner_line_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'partner_line_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'partner_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'unit_cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'unit_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'unit_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'unit_retail_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'upc': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}) }, 'order.order': { 'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'}, 'basket_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.BillingAddress']", 'null': 'True', 'blank': 'True'}), 'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'guest_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingAddress']", 'null': 'True', 'blank': 'True'}), 'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}), 'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}), 'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}), 'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'order.shippingaddress': { 'Meta': {'object_name': 'ShippingAddress'}, 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}) }, 'order.shippingevent': { 'Meta': {'ordering': "['-date_created']", 'object_name': 'ShippingEvent'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingEventType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lines': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shipping_events'", 'symmetrical': 'False', 'through': "orm['order.ShippingEventQuantity']", 'to': "orm['order.Line']"}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_events'", 'to': "orm['order.Order']"}) }, 'order.shippingeventquantity': { 'Meta': {'object_name': 'ShippingEventQuantity'}, 'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.ShippingEvent']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_event_quantities'", 'to': "orm['order.Line']"}), 'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'order.shippingeventtype': { 'Meta': {'ordering': "('sequence_number',)", 'object_name': 'ShippingEventType'}, 'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'oscar_mws.amazonprofile': { 'Meta': {'object_name': 'AmazonProfile'}, 'asin': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}), 'fulfillment_by': ('django.db.models.fields.CharField', [], {'default': "'MFN'", 'max_length': '3'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'item_package_quantity': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'launch_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'number_of_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'product': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'amazon_profile'", 'unique': 'True', 'to': "orm['catalogue.Product']"}), 'product_tax_code': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'release_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'oscar_mws.feedreport': { 'Meta': {'object_name': 'FeedReport'}, 'errors': ('django.db.models.fields.PositiveIntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'processed': ('django.db.models.fields.PositiveIntegerField', [], {}), 'status_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'submission': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'report'", 'unique': 'True', 'to': "orm['oscar_mws.FeedSubmission']"}), 'successful': ('django.db.models.fields.PositiveIntegerField', [], {}), 'warnings': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'oscar_mws.feedresult': { 'Meta': {'object_name': 'FeedResult'}, 'description': ('django.db.models.fields.TextField', [], {}), 'feed_report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['oscar_mws.FeedReport']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['catalogue.Product']"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'oscar_mws.feedsubmission': { 'Meta': {'ordering': "['-date_updated']", 'object_name': 'FeedSubmission'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {}), 'date_submitted': ('django.db.models.fields.DateTimeField', [], {}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {}), 'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'processing_status': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'submission_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'submitted_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'feed_submissions'", 'symmetrical': 'False', 'to': "orm['catalogue.Product']"}) }, 'oscar_mws.fulfillmentorder': { 'Meta': {'object_name': 'FulfillmentOrder'}, 'date_updated': ('django.db.models.fields.DateTimeField', [], {}), 'fulfillment_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lines': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fulfillment_orders'", 'symmetrical': 'False', 'through': "orm['oscar_mws.FulfillmentOrderLine']", 'to': "orm['order.Line']"}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fulfillment_orders'", 'to': "orm['order.Order']"}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}) }, 'oscar_mws.fulfillmentorderline': { 'Meta': {'object_name': 'FulfillmentOrderLine'}, 'fulfillment_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fulfillment_lines'", 'to': "orm['oscar_mws.FulfillmentOrder']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'line': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'fulfillment_line'", 'unique': 'True', 'to': "orm['order.Line']"}), 'order_item_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'package': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'to': "orm['oscar_mws.ShipmentPackage']"}), 'shipment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'to': "orm['oscar_mws.FulfillmentShipment']"}) }, 'oscar_mws.fulfillmentshipment': { 'Meta': {'object_name': 'FulfillmentShipment'}, 'date_estimated_arrival': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'date_shipped': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'fulfillment_center_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fulfillment_shipments'", 'to': "orm['order.Order']"}), 'shipment_events': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fulfillment_shipments'", 'symmetrical': 'False', 'to': "orm['order.ShippingEvent']"}), 'shipment_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '24'}) }, 'oscar_mws.shipmentpackage': { 'Meta': {'object_name': 'ShipmentPackage'}, 'carrier_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'fulfillment_shipment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['oscar_mws.FulfillmentShipment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'package_number': ('django.db.models.fields.IntegerField', [], {}), 'tracking_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'partner.partner': { 'Meta': {'object_name': 'Partner'}, 'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}) }, 'sites.site': { 'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['oscar_mws']
bsd-3-clause
-6,517,976,486,046,583,000
85.568579
222
0.563046
false
3.627273
false
false
false
BrAwnyTime/RayTracer
Textures/makeScaledTex.py
1
1585
import numpy as np import time import tables import sys '''---------------------------------------------------------''' ''' Setup PyTables Files ''' '''---------------------------------------------------------''' scale = 2 originalName = "earthScaled8" scaledName = "earthScaled16" h5tex = tables.open_file("/home/brad/rayTracer/Textures/textures.h5", mode = 'a', title = "HDF5 Texture File") og = h5tex.getNode(h5tex.root, name=originalName) texWidth = og.shape[1] / 3 texHeight = og.shape[0] scaledWidth = texWidth/scale scaledHeight = texHeight/scale scaled = np.zeros((scaledHeight, scaledWidth * 3)) str_time = time.time() curPercent = 0 lastPercent = 0 for y in range(0, scaledHeight): for x in range(0, scaledWidth): scaledValue = np.zeros(3) t_y = y * scale t_x = x * scale curPercent = np.floor((((y*scaledWidth)+(x+1))/float(scaledWidth*scaledHeight))*1000) / 10.0 if (curPercent > lastPercent): lastPercent = curPercent cur_sec = time.time() - str_time sys.stdout.write("\rScale Texture %.1f%% [%ds]" % (curPercent, cur_sec)) sys.stdout.flush() for iy in range(0, scale): for ix in range(0, scale): scaledValue += og[t_y + iy, (3 * (t_x + ix)):(3 * (t_x + ix)) + 3] scaledValue = scaledValue / float(scale**2) scaled[y, (3 * x):(3 * x) + 3] = scaledValue earthsmall = h5tex.create_array(h5tex.root, scaledName, scaled, "Scaled texture map of the Earth's surface") h5tex.close()
mit
3,638,641,487,451,038,000
27.818182
110
0.557098
false
3.274793
false
false
false