repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
hirokihamasaki/irma | frontend/frontend/api/v1_1/controllers/files.py | 1 | 7314 | # Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
from bottle import response, request
from frontend.api.v1_1.errors import process_error
from frontend.helpers.utils import guess_hash_type
from frontend.models.sqlobjects import FileWeb, File
from frontend.api.v1_1.schemas import FileWebSchema_v1_1, ScanSchema_v1_1, \
FileSchema_v1_1
from lib.common.utils import decode_utf8
from lib.irma.common.exceptions import IrmaDatabaseResultNotFound
file_web_schema = FileWebSchema_v1_1()
scan_schema = ScanSchema_v1_1()
file_web_schema.context = {'formatted': True}
log = logging.getLogger(__name__)
file_web_schema_lite = FileWebSchema_v1_1(exclude=['probe_results'])
file_web_schema_lite.context = {'formatted': True}
def list(db):
""" Search a file using query filters (tags + hash or name). Support
pagination.
:param all params are sent using query method
:rtype: dict of 'total': int, 'page': int, 'per_page': int,
'items': list of file(s) found
:return:
on success 'items' contains a list of files found
on error 'msg' gives reason message
"""
try:
name = None
if 'name' in request.query:
name = decode_utf8(request.query['name'])
h_value = request.query.get('hash')
search_tags = request.query.get('tags')
if search_tags is not None:
search_tags = search_tags.split(',')
log.debug("name %s h_value %s search_tags %s",
name, h_value, search_tags)
if name is not None and h_value is not None:
raise ValueError("Can't find using both name and hash")
# Get values from query or default
offset = request.query.get("offset", default=0)
offset = int(offset)
limit = request.query.get("limit", default=25)
limit = int(limit)
if name is not None:
base_query = FileWeb.query_find_by_name(name, search_tags, db)
elif h_value is not None:
h_type = guess_hash_type(h_value)
if h_type is None:
raise ValueError("Hash not supported")
base_query = FileWeb.query_find_by_hash(
h_type, h_value, search_tags, db)
else:
# FIXME this is just a temporary way to output
# all files, need a dedicated
# file route and controller
base_query = FileWeb.query_find_by_name("", search_tags, db)
# TODO: Find a way to move pagination as a BaseQuery like in
# flask_sqlalchemy.
# https://github.com/mitsuhiko/flask-sqlalchemy/blob/master/flask_sqlalchemy/__init__.py#L422
items = base_query.limit(limit).offset(offset).all()
if offset == 0 and len(items) < limit:
total = len(items)
else:
total = base_query.count()
log.debug("Found %s results", total)
response.content_type = "application/json; charset=UTF-8"
return {
'total': total,
'offset': offset,
'limit': limit,
'items': file_web_schema_lite.dump(items, many=True).data,
}
except Exception as e:
log.exception(e)
process_error(e)
def get(sha256, db):
""" Detail about one file and all known scans summary where file was
present (identified by sha256). Support pagination.
:param all params are sent using query method
:param if alt parameter is "media", response will contains the binary data
:rtype: dict of 'total': int, 'page': int, 'per_page': int,
:return:
on success fileinfo contains file information
on success 'items' contains a list of files found
on error 'msg' gives reason message
"""
try:
log.debug("h_value %s", sha256)
# Check wether its a download attempt or not
if request.query.alt == "media":
return _download(sha256, db)
# Get values from query or default
offset = request.query.get("offset", default=0)
offset = int(offset)
limit = request.query.get("limit", default=25)
limit = int(limit)
file = File.load_from_sha256(sha256, db)
# query all known results not only those with different names
base_query = FileWeb.query_find_by_hash("sha256", sha256, None, db,
distinct_name=False)
# TODO: Find a way to move pagination as a BaseQuery like in
# flask_sqlalchemy.
# https://github.com/mitsuhiko/flask-sqlalchemy/blob/master/flask_sqlalchemy/__init__.py#L422
items = base_query.limit(limit).offset(offset).all()
if offset == 0 and len(items) < limit:
total = len(items)
else:
total = base_query.count()
log.debug("offset %d limit %d total %d", offset, limit, total)
file_web_schema = FileWebSchema_v1_1(exclude=('probe_results',
'file_infos'))
fileinfo_schema = FileSchema_v1_1()
# TODO: allow formatted to be a parameter
formatted = True
fileinfo_schema.context = {'formatted': formatted}
response.content_type = "application/json; charset=UTF-8"
return {
'file_infos': fileinfo_schema.dump(file).data,
'total': total,
'offset': offset,
'limit': limit,
'items': file_web_schema.dump(items, many=True).data,
}
except Exception as e:
log.exception(e)
process_error(e)
def add_tag(sha256, tagid, db):
""" Attach a tag to a file.
"""
try:
log.debug("h_value %s tagid %s", sha256, tagid)
fobj = File.load_from_sha256(sha256, db)
fobj.add_tag(tagid, db)
db.commit()
except Exception as e:
log.exception(e)
process_error(e)
def remove_tag(sha256, tagid, db):
""" Remove a tag attached to a file.
"""
try:
log.debug("h_value %s tagid %s", sha256, tagid)
fobj = File.load_from_sha256(sha256, db)
fobj.remove_tag(tagid, db)
db.commit()
except Exception as e:
log.exception(e)
process_error(e)
# called by get
def _download(sha256, db):
"""Retrieve a file based on its sha256"""
log.debug("h_value %s", sha256)
fobj = File.load_from_sha256(sha256, db)
# check if file is still present
if fobj.path is None:
raise IrmaDatabaseResultNotFound("downloading a removed file")
# Force download
ctype = 'application/octet-stream; charset=UTF-8'
# Suggest Filename to sha256
cdisposition = "attachment; filename={}".format(sha256)
response.headers["Content-Type"] = ctype
response.headers["Content-Disposition"] = cdisposition
return open(fobj.path).read()
| apache-2.0 | 4,231,076,620,459,806,000 | 34.852941 | 101 | 0.613071 | false | 3.76622 | false | false | false |
qisanstudio/qsapp-suibe | src/suibe/models/channel.py | 1 | 3718 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from jinja2 import Markup
from flask import url_for
from studio.core.engines import db
from sqlalchemy.ext.hybrid import hybrid_property
from suibe.models.article import ArticleModel
__all__ = [
'NaviChannelModel',
'ChannelModel',
'ChannelSummaryModel',
'NaviModel',
]
def articles_order_by():
return [db.desc(ArticleModel.is_sticky),
db.desc(ArticleModel.date_published)]
class NaviChannelModel(db.Model):
__tablename__ = 'navi_channel'
navi_id = db.Column(db.Integer(), db.ForeignKey('navi.id'),
primary_key=True, index=True)
channel_id = db.Column(db.Integer(), db.ForeignKey('channel.id'),
primary_key=True, index=True)
class ChannelModel(db.Model):
__tablename__ = 'channel'
id = db.Column(db.Integer(), nullable=False, primary_key=True)
parent_id = db.Column(db.Integer(),
db.ForeignKey('channel.id'),
index=True)
name = db.Column(db.Unicode(256), nullable=False, unique=True, index=True)
date_created = db.Column(db.DateTime(timezone=True),
nullable=False, index=True,
server_default=db.func.current_timestamp())
_summary = db.relationship(
'ChannelSummaryModel',
backref=db.backref('channel', lazy='joined', innerjoin=True),
primaryjoin='ChannelModel.id==ChannelSummaryModel.id',
foreign_keys='[ChannelSummaryModel.id]',
uselist=False, cascade='all, delete-orphan')
@hybrid_property
def summary(self):
return self._summary.content
@summary.setter
def summary_setter(self, value):
if not self._summary:
self._summary = ChannelSummaryModel(id=self.id, content=value)
self._summary.content = value
@property
def html(self):
return Markup(self.summary)
parent = db.relationship('ChannelModel',
remote_side=[id],
backref='channels')
articles = db.relationship(
'ArticleModel',
primaryjoin='and_(ChannelModel.id==ArticleModel.cid,'
'ArticleModel.date_published<=func.now())',
order_by=articles_order_by,
foreign_keys='[ArticleModel.cid]',
passive_deletes='all', lazy='dynamic')
all_articles = db.relationship(
'ArticleModel',
primaryjoin='ChannelModel.id==ArticleModel.cid',
order_by=articles_order_by,
foreign_keys='[ArticleModel.cid]',
backref=db.backref(
'channel', lazy='joined', innerjoin=True),
passive_deletes='all', lazy='dynamic')
@property
def url(self):
return url_for("views.channel", cid=self.id)
def __str__(self):
return self.name
class ChannelSummaryModel(db.Model):
__tablename__ = 'channel_summary'
id = db.Column(db.Integer(), db.ForeignKey('channel.id'),
nullable=False, primary_key=True)
content = db.Column(db.UnicodeText(), nullable=False)
class NaviModel(db.Model):
__tablename__ = 'navi'
id = db.Column(db.Integer(), nullable=False, primary_key=True)
name = db.Column(db.Unicode(256), nullable=False, unique=True, index=True)
date_created = db.Column(db.DateTime(timezone=True),
nullable=False, index=True,
server_default=db.func.current_timestamp())
channels = db.relationship('ChannelModel',
secondary=NaviChannelModel.__table__)
def __str__(self):
return self.name | mit | -6,052,231,150,220,093,000 | 31.060345 | 78 | 0.600861 | false | 4.050109 | false | false | false |
martinohanlon/pgzero-pong | pong.py | 1 | 3151 | from math import sin, cos, radians
from time import sleep
#setup the constants
WIDTH = 500
HEIGHT = 300
BALLSPEED = 10
PADDLESPEED = 5
MAXBOUNCEANGLE = 75
def reset_game(angle):
#setup ball properties
ball.pos = WIDTH / 2, HEIGHT / 2
ball.x_float = float(ball.x)
ball.y_float = float(ball.y)
ball.angle = angle
ball.x_vel = BALLSPEED * cos(radians(ball.angle))
ball.y_vel = BALLSPEED * sin(radians(ball.angle))
#position the paddles
pad1.pos = 10, HEIGHT / 2
pad2.pos = WIDTH - 10, HEIGHT / 2
#create a rectangle of the playing area
screenRect = Rect(10,0,WIDTH - 10,HEIGHT)
#create ball
ball = Actor('ball')
#create paddles
pad1 = Actor('paddle')
pad2 = Actor('paddle')
#reset the game
reset_game(180)
#setup the goals
goals = [0, 0]
def draw():
screen.clear()
ball.draw()
pad1.draw()
pad2.draw()
def update():
#move the paddles
if keyboard.q:
pad1.top -= PADDLESPEED
if keyboard.a:
pad1.top += PADDLESPEED
if keyboard.k:
pad2.top -= PADDLESPEED
if keyboard.m:
pad2.top += PADDLESPEED
#move the ball
ball_old_x = ball.x_float
ball_old_y = ball.y_float
ball.x_float = ball.x_float + ball.x_vel
ball.y_float = ball.y_float + ball.y_vel
ball.x = int(round(ball.x_float))
ball.y = int(round(ball.y_float))
#move the ball back to where it was?
reset_ball = False
#has the ball left the screen?
if not screenRect.contains(ball):
#did it hit the top or bottom?
if ball.top < 0 or ball.bottom > HEIGHT:
ball.y_vel *= -1
reset_ball = True
#it must have hit the side
else:
if ball.left < 10:
print("Player 2 goal")
goals[1] += 1
reset_game(180)
sleep(2)
print("Score {} : {}".format(goals[0], goals[1]))
elif ball.right > WIDTH - 10:
print("player 1 goal")
goals[1] += 1
reset_game(0)
sleep(2)
print("Score {} : {}".format(goals[0], goals[1]))
#has the ball hit a paddle
if pad1.colliderect(ball):
#work out the bounce angle
bounce_angle = ((ball.y - pad1.y) / (pad1.height / 2)) * MAXBOUNCEANGLE
ball.angle = max(0 - MAXBOUNCEANGLE, min(MAXBOUNCEANGLE, bounce_angle))
#work out the ball velocity
ball.x_vel = BALLSPEED * cos(radians(ball.angle))
ball.y_vel = BALLSPEED * sin(radians(ball.angle))
reset_ball = True
elif pad2.colliderect(ball):
bounce_angle = 180 - (((ball.y - pad2.y) / (pad2.height / 2)) * MAXBOUNCEANGLE)
ball.angle = max(180 - MAXBOUNCEANGLE, min(180 + MAXBOUNCEANGLE, bounce_angle))
ball.x_vel = BALLSPEED * cos(radians(ball.angle))
ball.y_vel = BALLSPEED * sin(radians(ball.angle))
reset_ball = True
if reset_ball:
ball.x_float = ball_old_x
ball.y_float = ball_old_y
ball.x = int(round(ball.x_float))
ball.y = int(round(ball.y_float))
| mit | 6,057,346,098,873,700,000 | 25.478992 | 87 | 0.569026 | false | 3.026897 | false | false | false |
zqqf16/SYM | SYM/Models/models.py | 1 | 1880 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
import sys
import json
import requests
def get_raw(url):
r = requests.get(url)
if r.status_code != 200:
return None
return r.text
def parse_models(regex, text):
result = []
lastModel = ""
model_regex = re.compile(r'.*\d,\d')
for item in regex.findall(text):
if model_regex.match(item):
result.append([item, lastModel])
else:
lastModel = item
return result
def get_all_models(url):
text = get_raw(url)
if not text:
print("Connect to url failed")
return
results = [
["i386", "Simulator"],
["x86_64", "Simulator"],
]
ipad = re.compile(r'rowspan.*(iPad[\w \(\)-.]*)')
results += parse_models(ipad, text)
iPhone = re.compile(r'rowspan.*(iPhone[\w \(\)-.]*)')
results += parse_models(iPhone, text)
iPod = re.compile(r'rowspan.*(iPod[\w \(\)-.]*)')
results += parse_models(iPod, text)
watch = re.compile(r'rowspan.*(Watch[\w \(\)-.]*)')
results += parse_models(watch, text)
return results
def json_output(results):
json_dict = { m[0]: m[1] for m in results }
print(json.dumps(json_dict, indent=4))
def nsdict_output(results):
print("@{")
for m in results:
print(' @"{}": @"{}",'.format(m[0], m[1]))
print('}')
def text_output(results):
for m in results:
print('{}:{}'.format(*m))
def pretty(results, fmt='json'):
if fmt == 'nsdict':
nsdict_output(results)
elif fmt == 'json':
json_output(results)
else:
text_output(results)
if __name__ == '__main__':
results = get_all_models('https://www.theiphonewiki.com/w/index.php?title=Models&action=edit')
fmt = 'text'
if len(sys.argv) > 1:
fmt = sys.argv[1]
pretty(results, fmt) | mit | 1,235,590,171,324,613,600 | 22.5125 | 98 | 0.55 | false | 3.208191 | false | false | false |
iamsteadman/bambu-api | bambu_api/__init__.py | 1 | 1314 | """
Quickly expose your models to a JSON or XML API, authenticated via HTTP or
OAuth.
"""
__version__ = '2.0.1'
from bambu_api.options import *
from bambu_api.sites import APISite
from bambu_api.exceptions import APIException
from bambu_api.decorators import argument, returns, named
from django.conf import settings
from datetime import datetime
default_app_config = 'bambu_api.apps.APIConfig'
site = APISite()
def autodiscover():
"""
Works like ``django.contrib.admin.autodiscover``, running thorugh each of the packages within a
project's ``INSTALLED_APPS`` setting, to find instances of an ``api`` module which might contain
calls to ``bambu_api.site.register``.
Unlike ``django.contrib.admin.autodiscover``, you do not need to call this function manually.
"""
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from copy import copy, deepcopy
from bambu_api.endpoints import *
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy(site._registry)
import_module('%s.api' % app)
except:
site._registry = before_import_registry
if module_has_submodule(mod, 'api'):
raise
| apache-2.0 | -4,812,282,785,063,941,000 | 31.04878 | 100 | 0.690259 | false | 3.910714 | false | false | false |
azumimuo/family-xbmc-addon | plugin.video.bubbles/resources/lib/sources/german/hoster/open/moviesever.py | 1 | 5229 | # -*- coding: utf-8 -*-
"""
Bubbles Addon
Copyright (C) 2016 Viper2k4
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import base64
import re
import urllib
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['moviesever.com/']
self.base_link = 'http://moviesever.com/'
self.search_link = '/?s=%s'
self.get_link = 'http://play.seriesever.net/me/moviesever.php'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
rels = dom_parser.parse_dom(rels, 'li')
rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']
r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
r = [(re.findall('link"?\s*:\s*"(.+?)"', ''.join([x.content for x in i])), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r]
r = [i[0][0] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1]]
for i in r:
try:
i = re.sub('\[.+?\]|\[/.+?\]', '', i)
i = client.replaceHTMLCodes(i)
if not i.startswith('http'): i = self.__decode_hash(i)
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i)
elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
else: direct = False; urls = [{'quality': 'SD', 'url': i}]
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
if url.startswith('/'): url = 'http:%s' % url
return url
def __decode_hash(self, hash):
hash = hash.replace("!BeF", "R")
hash = hash.replace("@jkp", "Ax")
hash += '=' * (-len(hash) % 4)
try: return base64.b64decode(hash)
except: return
def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'})
r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}), dom_parser.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r]
r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), i[1][0].content) for i in r if i[0] and i[1]]
r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r if i[0]]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return | gpl-2.0 | -8,893,008,589,265,289,000 | 40.84 | 174 | 0.552113 | false | 3.469808 | false | false | false |
ShashaQin/frappe | frappe/email/bulk.py | 1 | 11335 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import HTMLParser
import smtplib
from frappe import msgprint, throw, _
from frappe.email.smtp import SMTPServer, get_outgoing_email_account
from frappe.email.email_body import get_email, get_formatted_html
from frappe.utils.verified_command import get_signed_params, verify_request
from html2text import html2text
from frappe.utils import get_url, nowdate, encode, now_datetime, add_days, split_emails, cstr, cint
class BulkLimitCrossedError(frappe.ValidationError): pass
def send(recipients=None, sender=None, subject=None, message=None, reference_doctype=None,
reference_name=None, unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, reply_to=None, cc=(), show_as_cc=(), message_id=None, in_reply_to=None, send_after=None,
expose_recipients=False, bulk_priority=1, communication=None):
"""Add email to sending queue (Bulk Email)
:param recipients: List of recipients.
:param sender: Email sender.
:param subject: Email subject.
:param message: Email message.
:param reference_doctype: Reference DocType of caller document.
:param reference_name: Reference name of caller document.
:param bulk_priority: Priority for bulk email, default 1.
:param unsubscribe_method: URL method for unsubscribe. Default is `/api/method/frappe.email.bulk.unsubscribe`.
:param unsubscribe_params: additional params for unsubscribed links. default are name, doctype, email
:param attachments: Attachments to be sent.
:param reply_to: Reply to be captured here (default inbox)
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send this email after the given datetime. If value is in integer, then `send_after` will be the automatically set to no of days from current date.
:param communication: Communication link to be set in Bulk Email record
"""
if not unsubscribe_method:
unsubscribe_method = "/api/method/frappe.email.bulk.unsubscribe"
if not recipients:
return
if isinstance(recipients, basestring):
recipients = split_emails(recipients)
if isinstance(send_after, int):
send_after = add_days(nowdate(), send_after)
email_account = get_outgoing_email_account(True, append_to=reference_doctype)
if not sender or sender == "Administrator":
sender = email_account.default_sender
check_bulk_limit(recipients)
formatted = get_formatted_html(subject, message, email_account=email_account)
try:
text_content = html2text(formatted)
except HTMLParser.HTMLParseError:
text_content = "See html attachment"
if reference_doctype and reference_name:
unsubscribed = [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"reference_doctype": reference_doctype, "reference_name": reference_name})]
unsubscribed += [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"global_unsubscribe": 1})]
else:
unsubscribed = []
recipients = [r for r in list(set(recipients)) if r and r not in unsubscribed]
for email in recipients:
email_content = formatted
email_text_context = text_content
if reference_doctype:
unsubscribe_link = get_unsubscribe_link(
reference_doctype=reference_doctype,
reference_name=reference_name,
email=email,
recipients=recipients,
expose_recipients=expose_recipients,
unsubscribe_method=unsubscribe_method,
unsubscribe_params=unsubscribe_params,
unsubscribe_message=unsubscribe_message,
show_as_cc=show_as_cc
)
email_content = email_content.replace("<!--unsubscribe link here-->", unsubscribe_link.html)
email_text_context += unsubscribe_link.text
# show as cc
cc_message = ""
if email in show_as_cc:
cc_message = _("This email was sent to you as CC")
email_content = email_content.replace("<!-- cc message -->", cc_message)
email_text_context = cc_message + "\n" + email_text_context
# add to queue
add(email, sender, subject, email_content, email_text_context, reference_doctype,
reference_name, attachments, reply_to, cc, message_id, in_reply_to, send_after, bulk_priority,
email_account=email_account, communication=communication)
def add(email, sender, subject, formatted, text_content=None,
reference_doctype=None, reference_name=None, attachments=None, reply_to=None,
cc=(), message_id=None, in_reply_to=None, send_after=None, bulk_priority=1,
email_account=None, communication=None):
"""add to bulk mail queue"""
e = frappe.new_doc('Bulk Email')
e.recipient = email
e.priority = bulk_priority
try:
mail = get_email(email, sender=sender, formatted=formatted, subject=subject,
text_content=text_content, attachments=attachments, reply_to=reply_to, cc=cc, email_account=email_account)
mail.set_message_id(message_id)
if in_reply_to:
mail.set_in_reply_to(in_reply_to)
e.message = cstr(mail.as_string())
e.sender = mail.sender
except frappe.InvalidEmailAddressError:
# bad email id - don't add to queue
return
e.reference_doctype = reference_doctype
e.reference_name = reference_name
e.communication = communication
e.send_after = send_after
e.insert(ignore_permissions=True)
def check_bulk_limit(recipients):
# get count of mails sent this month
this_month = frappe.db.sql("""select count(name) from `tabBulk Email` where
status='Sent' and MONTH(creation)=MONTH(CURDATE())""")[0][0]
# if using settings from site_config.json, check bulk limit
# No limit for own email settings
smtp_server = SMTPServer()
if (smtp_server.email_account
and getattr(smtp_server.email_account, "from_site_config", False)
or frappe.flags.in_test):
monthly_bulk_mail_limit = frappe.conf.get('monthly_bulk_mail_limit') or 500
if (this_month + len(recipients)) > monthly_bulk_mail_limit:
throw(_("Cannot send this email. You have crossed the sending limit of {0} emails for this month.").format(monthly_bulk_mail_limit),
BulkLimitCrossedError)
def get_unsubscribe_link(reference_doctype, reference_name,
email, recipients, expose_recipients, show_as_cc,
unsubscribe_method, unsubscribe_params, unsubscribe_message):
email_sent_to = recipients if expose_recipients else [email]
email_sent_cc = ", ".join([e for e in email_sent_to if e in show_as_cc])
email_sent_to = ", ".join([e for e in email_sent_to if e not in show_as_cc])
if email_sent_cc:
email_sent_message = _("This email was sent to {0} and copied to {1}").format(email_sent_to, email_sent_cc)
else:
email_sent_message = _("This email was sent to {0}").format(email_sent_to)
if not unsubscribe_message:
unsubscribe_message = _("Unsubscribe from this list")
unsubscribe_url = get_unsubcribed_url(reference_doctype, reference_name, email,
unsubscribe_method, unsubscribe_params)
html = """<div style="margin: 15px auto; padding: 0px 7px; text-align: center; color: #8d99a6;">
{email}
<p style="margin: 15px auto;">
<a href="{unsubscribe_url}" style="color: #8d99a6; text-decoration: underline;
target="_blank">{unsubscribe_message}
</a>
</p>
</div>""".format(
unsubscribe_url = unsubscribe_url,
email=email_sent_message,
unsubscribe_message=unsubscribe_message
)
text = "\n{email}\n\n{unsubscribe_message}: {unsubscribe_url}".format(
email=email_sent_message,
unsubscribe_message=unsubscribe_message,
unsubscribe_url=unsubscribe_url
)
return frappe._dict({
"html": html,
"text": text
})
def get_unsubcribed_url(reference_doctype, reference_name, email, unsubscribe_method, unsubscribe_params):
params = {"email": email.encode("utf-8"),
"doctype": reference_doctype.encode("utf-8"),
"name": reference_name.encode("utf-8")}
if unsubscribe_params:
params.update(unsubscribe_params)
query_string = get_signed_params(params)
# for test
frappe.local.flags.signed_query_string = query_string
return get_url(unsubscribe_method + "?" + get_signed_params(params))
@frappe.whitelist(allow_guest=True)
def unsubscribe(doctype, name, email):
# unsubsribe from comments and communications
if not verify_request():
return
try:
frappe.get_doc({
"doctype": "Email Unsubscribe",
"email": email,
"reference_doctype": doctype,
"reference_name": name
}).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
frappe.db.rollback()
else:
frappe.db.commit()
return_unsubscribed_page(email, doctype, name)
def return_unsubscribed_page(email, doctype, name):
frappe.respond_as_web_page(_("Unsubscribed"), _("{0} has left the conversation in {1} {2}").format(email, _(doctype), name))
def flush(from_test=False):
"""flush email queue, every time: called from scheduler"""
smtpserver = SMTPServer()
auto_commit = not from_test
# additional check
check_bulk_limit([])
if frappe.are_emails_muted():
msgprint(_("Emails are muted"))
from_test = True
frappe.db.sql("""update `tabBulk Email` set status='Expired'
where datediff(curdate(), creation) > 3 and status='Not Sent'""", auto_commit=auto_commit)
for i in xrange(500):
if cint(frappe.defaults.get_defaults().get("hold_bulk")):
break
email = frappe.db.sql("""select * from `tabBulk Email` where
status='Not Sent' and ifnull(send_after, "2000-01-01 00:00:00") < %s
order by priority desc, creation asc limit 1 for update""", now_datetime(), as_dict=1)
if email:
email = email[0]
else:
break
frappe.db.sql("""update `tabBulk Email` set status='Sending' where name=%s""",
(email["name"],), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
try:
if not from_test:
smtpserver.setup_email_account(email.reference_doctype)
smtpserver.sess.sendmail(email["sender"], email["recipient"], encode(email["message"]))
frappe.db.sql("""update `tabBulk Email` set status='Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
except (smtplib.SMTPServerDisconnected,
smtplib.SMTPConnectError,
smtplib.SMTPHeloError,
smtplib.SMTPAuthenticationError,
frappe.ValidationError):
# bad connection, retry later
frappe.db.sql("""update `tabBulk Email` set status='Not Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
# no need to attempt further
return
except Exception, e:
frappe.db.sql("""update `tabBulk Email` set status='Error', error=%s
where name=%s""", (unicode(e), email["name"]), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
# NOTE: removing commit here because we pass auto_commit
# finally:
# frappe.db.commit()
def clear_outbox():
"""Remove mails older than 31 days in Outbox. Called daily via scheduler."""
frappe.db.sql("""delete from `tabBulk Email` where
datediff(now(), creation) > 31""")
| mit | 8,331,891,317,508,942,000 | 34.870253 | 166 | 0.727305 | false | 3.308523 | true | false | false |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractMaddertranslatesCom.py | 1 | 1779 | def extractMaddertranslatesCom(item):
'''
Parser for 'maddertranslates.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Form A Slaves Only Harem Guild', 'An S Rank Adventurer Me Along With Those Girls Who Are Slaves, Form A Slaves Only Harem Guild', 'translated'),
('IT IS A DIFFERENT WORLD AND YET I AM CULTIVATING MONSTERS', 'It Is A Different World And Yet I Am Cultivating Monsters', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('The Bloodshot One-Eyed Zombie Emperor ', 'The Bloodshot One-Eyed Zombie Emperor', 'translated'),
('An S Rank Adventurer Me Along With Those Girls Who Are Slaves, Form A Slaves Only Harem Guild', 'An S Rank Adventurer Me Along With Those Girls Who Are Slaves, Form A Slaves Only Harem Guild', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause | 8,159,679,789,587,798,000 | 52.939394 | 217 | 0.589657 | false | 3.325234 | false | false | false |
WillianPaiva/1flow | oneflow/base/templatetags/base_utils.py | 1 | 6677 | # -*- coding: utf-8 -*-
"""
Copyright 2012-2014 Olivier Cortès <[email protected]>
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import re
from django.template import Library, Node, TemplateSyntaxError
from django.template.base import Node, TemplateSyntaxError
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from sparks.foundations import utils as sfu
register = Library()
def get_view_name(context):
# context['request'].resolver_match.func
# context['request'].resolver_match.args
# context['request'].resolver_match.kwargs
# context['request'].resolver_match.view_name
try:
return context['request'].resolver_match.view_name
except AttributeError:
# Happens on / when the request is a
# WSGIRequest and not an HttpRequest.
return u'home'
@register.simple_tag(takes_context=True)
def reverse_active(context, views_names, return_value=None):
""" In the template:
class="{% reverse_active "view_name" %}"
class="{% reverse_active "view_name1,view_name2" "my-active" %}"
Taken from http://gnuvince.wordpress.com/2007/09/14/a-django-template-tag-for-the-current-active-page/ #NOQA
and extended a lot to simplify template calls…
"""
for view_name in views_names.split(','):
if reverse(view_name) == context['request'].path:
return return_value or u'active'
return u''
@register.simple_tag(takes_context=True)
def view_name_active(context, pattern, return_value=None):
""" Same as reverse active, but for URLs without any
view. :param:`pattern` must be a valid regular expression.
class="{% active "/help/" "top-menu-element-active" %}"
"""
view_name = get_view_name(context)
if re.search(pattern, view_name):
return return_value or u'active'
return u''
class CaptureasNode(Node):
def __init__(self, nodelist, varname):
self.nodelist = nodelist
self.varname = varname
def render(self, context):
output = self.nodelist.render(context)
context[self.varname] = output
return ''
class FirstOfAsNode(Node):
def __init__(self, args, variable_name=None):
self.vars = args
self.variable_name = variable_name
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
if self.variable_name:
context[self.variable_name] = value
break
else:
return smart_text(value)
return ''
@register.tag(name='captureas')
def do_captureas(parser, token):
""" Taken from http://djangosnippets.org/snippets/545/ verbatim. Handy!
Initial source: https://code.djangoproject.com/ticket/7239
"""
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
raise TemplateSyntaxError(
"'captureas' node requires a variable name.")
nodelist = parser.parse(('endcaptureas',))
parser.delete_first_token()
return CaptureasNode(nodelist, args)
@register.tag
def firstofas(parser, token):
""" Original idea: https://code.djangoproject.com/ticket/12199 """
bits = token.split_contents()[1:]
variable_name = None
expecting_save_as = bits[-2] == 'as'
if expecting_save_as:
variable_name = bits.pop(-1)
bits = bits[:-1]
if len(bits) < 1:
raise TemplateSyntaxError(
"'firstofas' statement requires at least one argument")
return FirstOfAsNode([parser.compile_filter(bit) for bit in bits],
variable_name)
@register.inclusion_tag('snippets/countdown.html')
def countdown(value, redirect=None, limit=0, show_seconds=True,
format=None, spacer=None):
""" From http://www.plus2net.com/javascript_tutorial/countdown.php """
if redirect is None:
redirect = '/'
if limit > 0:
operation = '+'
round_value = 0
counter_test = '<='
else:
operation = '-'
round_value = 0 # WAS: 2
counter_test = '>='
if format is None or format == 'long':
separator = ', '
short = False
units = {
'day': _('day'),
'days': _('days'),
'hour': _('hour'),
'hours': _('hours'),
'minute': _('minute'),
'minutes': _('minutes'),
'second': _('second'),
'seconds': _('seconds'),
}
elif format == 'abbr':
separator = ' '
short = True
units = {
'day': _('day'),
'days': _('days'),
'hour': _('hour'),
'hours': _('hours'),
'minute': _('min'),
'minutes': _('mins'),
'second': _('sec'),
'seconds': _('secs'),
}
elif format == 'short':
separator = ' '
short = True
units = {
'day': _('d'),
'days': _('d'),
'hour': _('h'),
'hours': _('h'),
'minute': _('m'),
'minutes': _('m'),
'second': _('s'),
'seconds': _('s'),
}
else:
raise TemplateSyntaxError("'countdown' 'format' keyword argument "
"must be either 'short', 'abbr' or 'long'")
return {
'name': sfu.unique_hash(only_letters=True),
'units': units,
'short': short,
'value': value,
'limit': limit,
'unit_sep': ' ' if spacer is None else spacer,
'redirect': redirect,
'operation': operation,
'separator': separator,
'round_value': round_value,
'show_seconds': show_seconds,
'counter_test': counter_test,
}
@register.filter
def lookup(d, key):
return d[key]
| agpl-3.0 | 7,640,967,080,721,701,000 | 27.279661 | 116 | 0.574318 | false | 4.057143 | false | false | false |
Keats/gutenberg | components/site/benches/gen.py | 1 | 5070 | """
Generates test sites for use in benchmark.
Tested with python3 and probably does not work on Windows.
"""
import datetime
import os
import random
import shutil
TAGS = ["a", "b", "c", "d", "e", "f", "g"]
CATEGORIES = ["c1", "c2", "c3", "c4"]
PAGE = """
+++
title = "Hello"
date = REPLACE_DATE
[taxonomies]
tags = REPLACE_TAG
categories = ["REPLACE_CATEGORY"]
+++
# Modus cognitius profanam ne duae virtutis mundi
## Ut vita
Lorem markdownum litora, care ponto nomina, et ut aspicit gelidas sui et
purpureo genuit. Tamen colla venientis [delphina](http://nil-sol.com/ecquis)
Tusci et temptata citaeque curam isto ubi vult vulnere reppulit.
- Seque vidit flendoque de quodam
- Dabit minimos deiecto caputque noctis pluma
- Leti coniunx est Helicen
- Illius pulvereumque Icare inpositos
- Vivunt pereo pluvio tot ramos Olenios gelidis
- Quater teretes natura inde
### A subsection
Protinus dicunt, breve per, et vivacis genus Orphei munere. Me terram [dimittere
casside](http://corpus.org/) pervenit saxo primoque frequentat genuum sorori
praeferre causas Libys. Illud in serpit adsuetam utrimque nunc haberent,
**terrae si** veni! Hectoreis potes sumite [Mavortis retusa](http://tua.org/)
granum captantur potuisse Minervae, frugum.
> Clivo sub inprovisoque nostrum minus fama est, discordia patrem petebat precatur
absumitur, poena per sit. Foramina *tamen cupidine* memor supplex tollentes
dictum unam orbem, Anubis caecae. Viderat formosior tegebat satis, Aethiopasque
sit submisso coniuge tristis ubi!
## Praeceps Corinthus totidem quem crus vultum cape
```rs
#[derive(Debug)]
pub struct Site {
/// The base path of the zola site
pub base_path: PathBuf,
/// The parsed config for the site
pub config: Config,
pub pages: HashMap<PathBuf, Page>,
pub sections: HashMap<PathBuf, Section>,
pub tera: Tera,
live_reload: bool,
output_path: PathBuf,
static_path: PathBuf,
pub tags: Option<Taxonomy>,
pub categories: Option<Taxonomy>,
/// A map of all .md files (section and pages) and their permalink
/// We need that if there are relative links in the content that need to be resolved
pub permalinks: HashMap<String, String>,
}
```
## More stuff
And a shortcode:
{{ youtube(id="my_youtube_id") }}
### Another subsection
Gotta make the toc do a little bit of work
# A big title
- hello
- world
- !
```py
if __name__ == "__main__":
gen_site("basic-blog", [""], 250, paginate=True)
```
"""
def gen_skeleton(name, is_blog):
if os.path.exists(name):
shutil.rmtree(name)
os.makedirs(os.path.join(name, "content"))
os.makedirs(os.path.join(name, "static"))
with open(os.path.join(name, "config.toml"), "w") as f:
if is_blog:
f.write("""
title = "My site"
base_url = "https://replace-this-with-your-url.com"
theme = "sample"
taxonomies = [
{name = "tags", rss = true},
{name = "categories"}
]
[extra.author]
name = "Vincent Prouillet"
""")
else:
f.write("""
title = "My site"
base_url = "https://replace-this-with-your-url.com"
theme = "sample"
[extra.author]
name = "Vincent Prouillet"
""")
# Re-use the test templates
shutil.copytree("../../../test_site/templates", os.path.join(name, "templates"))
shutil.copytree("../../../test_site/themes", os.path.join(name, "themes"))
def gen_section(path, num_pages, is_blog):
with open(os.path.join(path, "_index.md"), "w") as f:
if is_blog:
f.write("""
+++
paginate_by = 5
sort_by = "date"
template = "section_paginated.html"
+++
""")
else:
f.write("+++\n+++\n")
day = datetime.date.today()
for (i, page) in enumerate(range(0, num_pages)):
with open(os.path.join(path, "page-{}.md".format(i)), "w") as f:
f.write(
PAGE
.replace("REPLACE_DATE", str(day + datetime.timedelta(days=1)))
.replace("REPLACE_CATEGORY", random.choice(CATEGORIES))
.replace("REPLACE_TAG", str([random.choice(TAGS), random.choice(TAGS)]))
)
def gen_site(name, sections, num_pages_per_section, is_blog=False):
gen_skeleton(name, is_blog)
for section in sections:
path = os.path.join(name, "content", section) if section else os.path.join(name, "content")
if section:
os.makedirs(path)
gen_section(path, num_pages_per_section, is_blog)
if __name__ == "__main__":
gen_site("small-blog", [""], 30, is_blog=True)
gen_site("medium-blog", [""], 250, is_blog=True)
gen_site("big-blog", [""], 1000, is_blog=True)
gen_site("huge-blog", [""], 10000, is_blog=True)
gen_site("extra-huge-blog", [""], 100000, is_blog=True)
gen_site("small-kb", ["help", "help1", "help2", "help3", "help4", "help5", "help6", "help7", "help8", "help9"], 10)
gen_site("medium-kb", ["help", "help1", "help2", "help3", "help4", "help5", "help6", "help7", "help8", "help9"], 100)
gen_site("huge-kb", ["help", "help1", "help2", "help3", "help4", "help5", "help6", "help7", "help8", "help9"], 1000)
| mit | -6,960,706,027,240,699,000 | 27.806818 | 121 | 0.641223 | false | 2.849916 | false | false | false |
salcho/antares | ui/loggerWidget.py | 1 | 1137 | #!/usr/bin/env python
import gtk
import logging
import time
from ui.IWidget import IWidget
from core.data import logger
from core.log import addStreamHandler
#TODO: Implement file-like thread to show logging!
class loggerWidget(IWidget):
def __init__(self):
IWidget.__init__(self)
self.frame = gtk.Frame('Logger')
self.text_view = None
def start(self):
self.frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.text_view = gtk.TextView()
self.text_view.set_editable(False)
self.text_view.set_wrap_mode(gtk.WRAP_NONE)
self.text_view.set_justification(gtk.JUSTIFY_LEFT)
self.text_view.set_cursor_visible(True)
sw.add_with_viewport(self.text_view)
self.frame.add(sw)
# Add handler to the logger
handler = handlerClass()
addStreamHandler(handler)
def updateView(self, record):
buf = self.text_view.get_buffer()
buf.insert(buf.get_end_iter(), record)
def getWidget(self):
return self.frame
class handlerClass(logging.StreamHandler):
def emit(self, record):
loggerWidget.updateView(record )
self.flush() | mit | 3,276,869,649,480,475,600 | 24.863636 | 59 | 0.737907 | false | 2.930412 | false | false | false |
zestyr/lbry | lbrynet/dht/msgtypes.py | 1 | 1593 | #!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
from lbrynet.core.utils import generate_id
class Message(object):
""" Base class for messages - all "unknown" messages use this class """
def __init__(self, rpcID, nodeID):
self.id = rpcID
self.nodeID = nodeID
class RequestMessage(Message):
""" Message containing an RPC request """
def __init__(self, nodeID, method, methodArgs, rpcID=None):
if rpcID is None:
rpcID = generate_id()
Message.__init__(self, rpcID, nodeID)
self.request = method
self.args = methodArgs
class ResponseMessage(Message):
""" Message containing the result from a successful RPC request """
def __init__(self, rpcID, nodeID, response):
Message.__init__(self, rpcID, nodeID)
self.response = response
class ErrorMessage(ResponseMessage):
""" Message containing the error from an unsuccessful RPC request """
def __init__(self, rpcID, nodeID, exceptionType, errorMessage):
ResponseMessage.__init__(self, rpcID, nodeID, errorMessage)
if isinstance(exceptionType, type):
self.exceptionType = '%s.%s' % (exceptionType.__module__, exceptionType.__name__)
else:
self.exceptionType = exceptionType
| mit | 4,668,161,594,579,503,000 | 32.1875 | 93 | 0.670433 | false | 4.148438 | false | false | false |
shalzuth/BraveHaxvius | IDAScripts/GetNetworkKeys.py | 1 | 2891 | from idautils import *
from idaapi import *
def get_string(addr):
out = ""
while True:
if Byte(addr) != 0:
out += chr(Byte(addr))
else:
break
addr += 1
return out
def get_string_from_head(head):
refs = DataRefsFrom(head)
for ref in refs:
refs2 = DataRefsFrom(ref)
for ref2 in refs2:
stringval = get_string(ref2)
return stringval
def dumpkvp(functionName, addr, key):
if key in functionName and 'Request' in functionName:
functionName = functionName[3:]
functionName = functionName[:functionName.index(key)]
functionName = ''.join([i for i in functionName if not i.isdigit()])
functionName = functionName[:len(functionName)-7]
for (startea, endea) in Chunks(addr):
for head in Heads(startea, endea):
operand = GetDisasm(head)
if 'R0, [PC,R0]' in operand:
#if ', =(' in operand:
stringval = get_string_from_head(head)
if key is 'getUrl':
stringval = stringval[14:22]
if 'action' in stringval:
stringval = 'action'
if not (functionName in requests):
requests[functionName] = {}
requests[functionName][key[3:]] = stringval
if 'aActionsymbol' in operand:
stringval = get_string_from_head(head)
if key is 'getUrl':
stringval = stringval[14:22]
if 'action' in stringval:
stringval = 'action'
if not (functionName in requests):
requests[functionName] = {}
requests[functionName][key[3:]] = stringval
def dumpbody(functionName, addr, key):
if key in functionName and 'Request' in functionName:
functionName = functionName[3:]
functionName = functionName[:functionName.index(key)]
functionName = ''.join([i for i in functionName if not i.isdigit()])
functionName = functionName[:len(functionName)-7]
stringval = ""
basenode = ""
for (startea, endea) in Chunks(addr):
for head in Heads(startea, endea):
operand = GetDisasm(head)
if 'mov' in operand and 'ds:(off' in operand:
stringval = get_string_from_head(head)
if '_ZN9JsonGroup7addNodeEv' in operand:
if not (functionName in requests):
requests[functionName] = {}
if not ("Parameters" in requests[functionName]):
requests[functionName]["Parameters"] = {}
basenode = stringval
requests[functionName]["Parameters"][basenode] = {}
if '_ZN8JsonNode8addParamEPK' in operand:
requests[functionName]["Parameters"][basenode] = stringval
requests = {}
for funcea in Functions(0x100000, 0x14ea010):
functionName = GetFunctionName(funcea)
dumpkvp(functionName, funcea, 'getUrl')
dumpkvp(functionName, funcea, 'getRequestID')
dumpkvp(functionName, funcea, 'getEncodeKey')
#dumpbody(functionName, funcea, 'createBody')
print requests
import json
filename = os.path.expanduser("~/OneDrive/Documents/GitHub/BraveHaxvius/DataExtractor/network2.json")
with open(filename, 'w') as fp:
json.dump(requests, fp) | mit | -4,050,094,355,223,870,000 | 31.863636 | 101 | 0.684192 | false | 3.105263 | false | false | false |
eRestin/MezzGIS | mezzanine/conf/forms.py | 1 | 3040 | from __future__ import unicode_literals
from future.builtins import int
from collections import defaultdict
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import urlize
from mezzanine.conf import settings, registry
from mezzanine.conf.models import Setting
FIELD_TYPES = {
bool: forms.BooleanField,
int: forms.IntegerField,
float: forms.FloatField,
}
class SettingsForm(forms.Form):
"""
Form for settings - creates a field for each setting in
``mezzanine.conf`` that is marked as editable.
"""
def __init__(self, *args, **kwargs):
super(SettingsForm, self).__init__(*args, **kwargs)
settings.use_editable()
# Create a form field for each editable setting's from its type.
for name in sorted(registry.keys()):
setting = registry[name]
if setting["editable"]:
field_class = FIELD_TYPES.get(setting["type"], forms.CharField)
kwargs = {
"label": setting["label"] + ":",
"required": setting["type"] in (int, float),
"initial": getattr(settings, name),
"help_text": self.format_help(setting["description"]),
}
if setting["choices"]:
field_class = forms.ChoiceField
kwargs["choices"] = setting["choices"]
self.fields[name] = field_class(**kwargs)
css_class = field_class.__name__.lower()
self.fields[name].widget.attrs["class"] = css_class
def __iter__(self):
"""
Calculate and apply a group heading to each field and order by the
heading.
"""
fields = list(super(SettingsForm, self).__iter__())
group = lambda field: field.name.split("_", 1)[0].title()
misc = _("Miscellaneous")
groups = defaultdict(int)
for field in fields:
groups[group(field)] += 1
for (i, field) in enumerate(fields):
setattr(fields[i], "group", group(field))
if groups[fields[i].group] == 1:
fields[i].group = misc
return iter(sorted(fields, key=lambda x: (x.group == misc, x.group)))
def save(self):
"""
Save each of the settings to the DB.
"""
for (name, value) in self.cleaned_data.items():
setting_obj, created = Setting.objects.get_or_create(name=name)
setting_obj.value = value
setting_obj.save()
def format_help(self, description):
"""
Format the setting's description into HTML.
"""
for bold in ("``", "*"):
parts = []
for i, s in enumerate(description.split(bold)):
parts.append(s if i % 2 == 0 else "<b>%s</b>" % s)
description = "".join(parts)
return mark_safe(urlize(description).replace("\n", "<br>"))
| bsd-2-clause | 8,096,700,865,729,011,000 | 35.190476 | 79 | 0.565789 | false | 4.293785 | false | false | false |
volab/pyvorcv | setup.py | 1 | 3971 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# VoR-CV
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
import sys
from vorcv import __version__ as VERSION
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence']
KEYWORDS = 'opencv image recognition robotics'
# You can either specify manually the list of packages to include in the
# distribution or use "setuptools.find_packages()" to include them
# automatically with a recursive search (from the root directory of the
# project).
#PACKAGES = find_packages()
PACKAGES = ['vorcv']
# The following list contains all dependencies that Python will try to
# install with this project
INSTALL_REQUIRES = ['numpy']
#INSTALL_REQUIRES = []
SCRIPTS = ["scripts/vorcv-demo",
"scripts/vorcv-circle-detection-calibration"]
# Entry point can be used to create plugins or to automatically generate
# system commands to call specific functions.
# Syntax: "name_of_the_command_to_make = package.module:function".
ENTRY_POINTS = {}
#ENTRY_POINTS = {
# 'console_scripts': [
# 'vorcv-demo = vorcv.demo:main',
# ],
#}
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='Jeremie DECOCK',
author_email='[email protected]',
maintainer='Jeremie DECOCK',
maintainer_email='[email protected]',
name='pyvorcv',
description="The PyVoR-CV project, a computer vision library made for some VoRobotics projects (VoR11, VoR12, ...).",
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# Where the package can be downloaded
classifiers=CLASSIFIERS,
#license='MIT', # Useless if license is already in CLASSIFIERS
keywords=KEYWORDS,
packages=PACKAGES,
include_package_data=True, # Use the MANIFEST.in file
install_requires=INSTALL_REQUIRES,
#platforms=['Linux'],
#requires=['pyserial'],
scripts=SCRIPTS,
entry_points=ENTRY_POINTS,
version=VERSION)
| mit | 7,094,162,803,281,906,000 | 32.940171 | 123 | 0.684211 | false | 3.927794 | false | false | false |
robertnishihara/ray | python/ray/dashboard/node_stats.py | 1 | 13362 | from collections import defaultdict
from ray.dashboard.util import to_unix_time, format_reply_id
from base64 import b64decode
import ray
import threading
import json
import traceback
import copy
import logging
from datetime import datetime
import time
from typing import Dict
import re
from operator import itemgetter
logger = logging.getLogger(__name__)
PYCLASSNAME_RE = re.compile(r"(.+?)\(")
def _group_actors_by_python_class(actors):
groups = defaultdict(list)
for actor in actors.values():
actor_title = actor.get("actorTitle")
if not actor_title:
groups["Unknown Class"].append(actor)
else:
match = PYCLASSNAME_RE.search(actor_title)
if match:
# Catches case of actorTitle like
# Foo(bar, baz, [1,2,3]) -> Foo
class_name = match.groups()[0]
groups[class_name].append(actor)
else:
# Catches case of e.g. just Foo
# in case of actor task
groups[actor_title].append(actor)
return groups
def _get_actor_group_stats(group):
state_to_count = defaultdict(lambda: 0)
executed_tasks = 0
min_timestamp = None
num_timestamps = 0
sum_timestamps = 0
now = time.time() * 1000 # convert S -> MS
for actor in group:
state_to_count[actor["state"]] += 1
if "timestamp" in actor:
if not min_timestamp or actor["timestamp"] < min_timestamp:
min_timestamp = actor["timestamp"]
num_timestamps += 1
sum_timestamps += now - actor["timestamp"]
if "numExecutedTasks" in actor:
executed_tasks += actor["numExecutedTasks"]
if num_timestamps > 0:
avg_lifetime = int((sum_timestamps / num_timestamps) / 1000)
max_lifetime = int((now - min_timestamp) / 1000)
else:
avg_lifetime = 0
max_lifetime = 0
return {
"stateToCount": state_to_count,
"avgLifetime": avg_lifetime,
"maxLifetime": max_lifetime,
"numExecutedTasks": executed_tasks,
}
class NodeStats(threading.Thread):
def __init__(self, redis_address, redis_password=None):
self.redis_key = "{}.*".format(ray.gcs_utils.REPORTER_CHANNEL)
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self._node_stats = {}
self._ip_to_hostname = {}
self._addr_to_owner_addr = {}
self._addr_to_actor_id = {}
self._addr_to_extra_info_dict = {}
self._node_stats_lock = threading.Lock()
self._default_info = {
"actorId": "",
"children": {},
"currentTaskFuncDesc": [],
"ipAddress": "",
"jobId": "",
"numExecutedTasks": 0,
"numLocalObjects": 0,
"numObjectRefsInScope": 0,
"port": 0,
"state": 0,
"taskQueueLength": 0,
"usedObjectStoreMemory": 0,
"usedResources": {},
}
# Mapping from IP address to PID to list of log lines
self._logs = defaultdict(lambda: defaultdict(list))
# Mapping from IP address to PID to list of error messages
self._errors = defaultdict(lambda: defaultdict(list))
ray.state.state._initialize_global_state(
redis_address=redis_address, redis_password=redis_password)
super().__init__()
def _insert_log_counts(self):
for ip, logs_by_pid in self._logs.items():
hostname = self._ip_to_hostname.get(ip)
if not hostname or hostname not in self._node_stats:
continue
logs_by_pid = {pid: len(logs) for pid, logs in logs_by_pid.items()}
self._node_stats[hostname]["log_count"] = logs_by_pid
def _insert_error_counts(self):
for ip, errs_by_pid in self._errors.items():
hostname = self._ip_to_hostname.get(ip)
if not hostname or hostname not in self._node_stats:
continue
errs_by_pid = {pid: len(errs) for pid, errs in errs_by_pid.items()}
self._node_stats[hostname]["error_count"] = errs_by_pid
def _purge_outdated_stats(self):
def current(then, now):
if (now - then) > 5:
return False
return True
now = to_unix_time(datetime.utcnow())
self._node_stats = {
k: v
for k, v in self._node_stats.items() if current(v["now"], now)
}
def get_node_stats(self):
with self._node_stats_lock:
self._purge_outdated_stats()
self._insert_error_counts()
self._insert_log_counts()
node_stats = sorted(
(v for v in self._node_stats.values()),
key=itemgetter("boot_time"))
return {"clients": node_stats}
# Gets actors in a flat way to allow for grouping by actor type.
def get_actors(self, workers_info_by_node, infeasible_tasks, ready_tasks):
now = time.time()
actors: Dict[str, Dict[str, any]] = {}
# construct flattened actor tree
with self._node_stats_lock:
for addr, actor_id in self._addr_to_actor_id.items():
actors[actor_id] = copy.deepcopy(self._default_info)
actors[actor_id].update(self._addr_to_extra_info_dict[addr])
for node_id, workers_info in workers_info_by_node.items():
for worker_info in workers_info:
if "coreWorkerStats" in worker_info:
core_worker_stats = worker_info["coreWorkerStats"]
addr = (core_worker_stats["ipAddress"],
str(core_worker_stats["port"]))
if addr in self._addr_to_actor_id:
actor_info = actors[self._addr_to_actor_id[addr]]
format_reply_id(core_worker_stats)
actor_info.update(core_worker_stats)
actor_info["averageTaskExecutionSpeed"] = round(
actor_info["numExecutedTasks"] /
(now - actor_info["timestamp"] / 1000), 2)
actor_info["nodeId"] = node_id
actor_info["pid"] = worker_info["pid"]
def _update_from_actor_tasks(task, task_spec_type,
invalid_state_type):
actor_id = ray.utils.binary_to_hex(
b64decode(task[task_spec_type]["actorId"]))
if invalid_state_type == "pendingActor":
task["state"] = -1
elif invalid_state_type == "infeasibleActor":
task["state"] = -2
else:
raise ValueError(f"Invalid argument"
"invalid_state_type={invalid_state_type}")
task["actorTitle"] = task["functionDescriptor"][
"pythonFunctionDescriptor"]["className"]
format_reply_id(task)
actors[actor_id] = task
for infeasible_task in infeasible_tasks:
_update_from_actor_tasks(infeasible_task,
"actorCreationTaskSpec",
"infeasibleActor")
for ready_task in ready_tasks:
_update_from_actor_tasks(ready_task, "actorCreationTaskSpec",
"pendingActor")
actor_groups = _group_actors_by_python_class(actors)
stats_by_group = {
name: _get_actor_group_stats(group)
for name, group in actor_groups.items()
}
response_data = {}
for name, group in actor_groups.items():
response_data[name] = {
"entries": group,
"summary": stats_by_group[name]
}
return response_data
def get_logs(self, hostname, pid):
ip = self._node_stats.get(hostname, {"ip": None})["ip"]
logs = self._logs.get(ip, {})
if pid:
logs = {pid: logs.get(pid, [])}
return logs
def get_errors(self, hostname, pid):
ip = self._node_stats.get(hostname, {"ip": None})["ip"]
errors = self._errors.get(ip, {})
if pid:
errors = {pid: errors.get(pid, [])}
return errors
def run(self):
p = self.redis_client.pubsub(ignore_subscribe_messages=True)
p.psubscribe(self.redis_key)
logger.info("NodeStats: subscribed to {}".format(self.redis_key))
log_channel = ray.gcs_utils.LOG_FILE_CHANNEL
p.subscribe(log_channel)
logger.info("NodeStats: subscribed to {}".format(log_channel))
error_channel = ray.gcs_utils.RAY_ERROR_PUBSUB_PATTERN
p.psubscribe(error_channel)
logger.info("NodeStats: subscribed to {}".format(error_channel))
actor_channel = ray.gcs_utils.RAY_ACTOR_PUBSUB_PATTERN
p.psubscribe(actor_channel)
logger.info("NodeStats: subscribed to {}".format(actor_channel))
current_actor_table = ray.actors()
with self._node_stats_lock:
for actor_data in current_actor_table.values():
addr = (actor_data["Address"]["IPAddress"],
str(actor_data["Address"]["Port"]))
owner_addr = (actor_data["OwnerAddress"]["IPAddress"],
str(actor_data["OwnerAddress"]["Port"]))
self._addr_to_owner_addr[addr] = owner_addr
self._addr_to_actor_id[addr] = actor_data["ActorID"]
self._addr_to_extra_info_dict[addr] = {
"jobId": actor_data["JobID"],
"state": actor_data["State"],
"timestamp": actor_data["Timestamp"]
}
for x in p.listen():
try:
with self._node_stats_lock:
channel = ray.utils.decode(x["channel"])\
if "pattern" not in x or x["pattern"] is None\
else x["pattern"]
data = x["data"]
if channel == log_channel:
data = json.loads(ray.utils.decode(data))
ip = data["ip"]
pid = str(data["pid"])
self._logs[ip][pid].extend(data["lines"])
elif channel == str(error_channel):
pubsub_msg = ray.gcs_utils.PubSubMessage.FromString(
data)
error_data = ray.gcs_utils.ErrorTableData.FromString(
pubsub_msg.data)
message = error_data.error_message
message = re.sub(r"\x1b\[\d+m", "", message)
match = re.search(r"\(pid=(\d+), ip=(.*?)\)", message)
if match:
pid = match.group(1)
ip = match.group(2)
self._errors[ip][pid].append({
"message": message,
"timestamp": error_data.timestamp,
"type": error_data.type
})
elif channel == actor_channel:
pubsub_msg = ray.gcs_utils.PubSubMessage.FromString(
data)
actor_data = ray.gcs_utils.ActorTableData.FromString(
pubsub_msg.data)
addr = (actor_data.address.ip_address,
str(actor_data.address.port))
owner_addr = (actor_data.owner_address.ip_address,
str(actor_data.owner_address.port))
self._addr_to_owner_addr[addr] = owner_addr
self._addr_to_actor_id[addr] = ray.utils.binary_to_hex(
actor_data.actor_id)
self._addr_to_extra_info_dict[addr] = {
"jobId": ray.utils.binary_to_hex(
actor_data.job_id),
"state": actor_data.state,
"timestamp": actor_data.timestamp
}
elif channel == ray.gcs_utils.RAY_REPORTER_PUBSUB_PATTERN:
data = json.loads(ray.utils.decode(data))
self._ip_to_hostname[data["ip"]] = data["hostname"]
self._node_stats[data["hostname"]] = data
else:
try:
data = json.loads(ray.utils.decode(data))
except Exception as e:
data = f"Failed to load data because of {e}"
logger.warning("Unexpected channel data received, "
f"channel: {channel}, data: {data}")
except Exception:
logger.exception(traceback.format_exc())
continue
| apache-2.0 | 5,768,132,720,916,335,000 | 40.496894 | 79 | 0.503068 | false | 4.28132 | false | false | false |
0/realtimepork | realtimepork/gpu.py | 1 | 1605 | """
GPU utilities.
"""
from functools import wraps
from math import ceil
# Load everything we need in this module from PyCUDA (but don't autoinit until
# requested).
try:
from pycuda.tools import DeviceData
except ImportError:
_pycuda_available = False
else:
_pycuda_available = True
# Is this thing on?
_enabled = False
class PyCUDAMissingError(Exception):
pass
def _require_pycuda(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not _pycuda_available:
raise PyCUDAMissingError('Unable to load PyCUDA.')
return f(*args, **kwargs)
return wrapper
@_require_pycuda
def enable():
"""
Initialize the GPU machinery.
"""
global _enabled
if _enabled:
return
import pycuda.autoinit
_enabled = True
def is_enabled():
"""
Check whether the GPU is available and initialized.
"""
return _enabled
@_require_pycuda
def carve_array(xn, yn):
"""
Determine the best grid and block sizes given the input size.
Parameters:
xn: Size in the x direction (shorter stride).
yn: Size in the y direction (longer stride).
Returns:
Grid size tuple, block size tuple.
"""
dev = DeviceData()
# Align with the warp size in the x direction and use what remains for the
# y direction.
x_threads = dev.warp_size
y_threads = dev.max_threads // x_threads
assert x_threads * y_threads <= dev.max_threads
x_blocks = int(ceil(xn / x_threads))
y_blocks = int(ceil(yn / y_threads))
return (x_blocks, y_blocks), (x_threads, y_threads, 1)
| mit | 6,945,346,836,958,899,000 | 17.662791 | 78 | 0.640498 | false | 3.672769 | false | false | false |
hfercc/mese2014 | lib/rest_framework/mixins.py | 1 | 7228 | """
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.http import Http404
from rest_framework import status
from rest_framework.response import Response
from rest_framework.request import clone_request
from rest_framework.settings import api_settings
import warnings
def _get_validation_exclusions(obj, pk=None, slug_field=None, lookup_field=None):
"""
Given a model instance, and an optional pk and slug field,
return the full list of all other field names on that model.
For use when performing full_clean on a model instance,
so we only clean the required fields.
"""
include = []
if pk:
# Pending deprecation
pk_field = obj._meta.pk
while pk_field.rel:
pk_field = pk_field.rel.to._meta.pk
include.append(pk_field.name)
if slug_field:
# Pending deprecation
include.append(slug_field)
if lookup_field and lookup_field != 'pk':
include.append(lookup_field)
return [field.name for field in obj._meta.fields if field.name not in include]
class CreateModelMixin(object):
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
data = dict(request.DATA)
data.update(**kwargs)
serializer = self.get_serializer(data=data, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
self.object = serializer.save(force_insert=True)
self.post_save(self.object, created=True)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_success_headers(self, data):
try:
return {'Location': data[api_settings.URL_FIELD_NAME]}
except (TypeError, KeyError):
return {}
class ListModelMixin(object):
"""
List a queryset.
"""
empty_error = "Empty list and '%(class_name)s.allow_empty' is False."
def list(self, request, *args, **kwargs):
self.object_list = self.filter_queryset(self.get_queryset())
# Default is to allow empty querysets. This can be altered by setting
# `.allow_empty = False`, to raise 404 errors on empty querysets.
if not self.allow_empty and not self.object_list:
warnings.warn(
'The `allow_empty` parameter is due to be deprecated. '
'To use `allow_empty=False` style behavior, You should override '
'`get_queryset()` and explicitly raise a 404 on empty querysets.',
PendingDeprecationWarning
)
class_name = self.__class__.__name__
error_msg = self.empty_error % {'class_name': class_name}
raise Http404(error_msg)
# Switch between paginated or standard style responses
page = self.paginate_queryset(self.object_list)
if page is not None:
serializer = self.get_pagination_serializer(page)
else:
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
class RetrieveModelMixin(object):
"""
Retrieve a model instance.
"""
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = self.get_serializer(self.object)
return Response(serializer.data)
class UpdateModelMixin(object):
"""
Update a model instance.
"""
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
self.object = self.get_object_or_none()
serializer = self.get_serializer(self.object, data=request.DATA,
files=request.FILES, partial=partial)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
try:
self.pre_save(serializer.object)
except ValidationError as err:
# full_clean on model instance may be called in pre_save,
# so we have to handle eventual errors.
return Response(err.message_dict, status=status.HTTP_400_BAD_REQUEST)
if self.object is None:
self.object = serializer.save(force_insert=True)
self.post_save(self.object, created=True)
return Response(serializer.data, status=status.HTTP_201_CREATED)
self.object = serializer.save(force_update=True)
self.post_save(self.object, created=False)
return Response(serializer.data, status=status.HTTP_200_OK)
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
def get_object_or_none(self):
try:
return self.get_object()
except Http404:
if self.request.method == 'PUT':
# For PUT-as-create operation, we need to ensure that we have
# relevant permissions, as if this was a POST request. This
# will either raise a PermissionDenied exception, or simply
# return None.
self.check_permissions(clone_request(self.request, 'POST'))
else:
# PATCH requests where the object does not exist should still
# return a 404 response.
raise
def pre_save(self, obj):
"""
Set any attributes on the object that are implicit in the request.
"""
# pk and/or slug attributes are implicit in the URL.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup = self.kwargs.get(lookup_url_kwarg, None)
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
slug_field = slug and self.slug_field or None
if lookup:
setattr(obj, self.lookup_field, lookup)
if pk:
setattr(obj, 'pk', pk)
if slug:
setattr(obj, slug_field, slug)
# Ensure we clean the attributes so that we don't eg return integer
# pk using a string representation, as provided by the url conf kwarg.
if hasattr(obj, 'full_clean'):
exclude = _get_validation_exclusions(obj, pk, slug_field, self.lookup_field)
obj.full_clean(exclude)
class DestroyModelMixin(object):
"""
Destroy a model instance.
"""
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
self.pre_delete(obj)
obj.delete()
self.post_delete(obj)
return Response(status=status.HTTP_204_NO_CONTENT)
| apache-2.0 | 8,436,068,386,180,543,000 | 34.505051 | 88 | 0.606945 | false | 4.259281 | false | false | false |
aerostitch/nagios_checks | hdfs_datanode_balancing_status.py | 1 | 4396 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Joseph Herlant <[email protected]>
# File name: hdfs_datanode_balancing_status.py
# Creation date: 2014-10-08
#
# Distributed under terms of the GNU GPLv3 license.
"""
This nagios active check parses the Hadoop HDFS web interface url:
http://<namenode>:<port>/dfsnodelist.jsp?whatNodes=LIVE
to check that no datanode is beyond the balancing threshold (in both ways).
The goal of this check is to check if the balancer needs to be run manually and
do its job correctly (while running for example in cron jobs).
The output includes performance datas and is truncated if longer than 1024
chars. The values of the output are the variation between the average disk
usage of the nodes over the cluster and the disk usage of the current node on
the cluster.
A negative value of X means that the node is X percent under the average disk
usage of the datanodes over the cluster. A positive value means that it's over
the average.
Tested on: Hadoop CDH3U5
"""
__author__ = 'Joseph Herlant'
__copyright__ = 'Copyright 2014, Joseph Herlant'
__credits__ = ['Joseph Herlant']
__license__ = 'GNU GPLv3'
__version__ = '1.0.0'
__maintainer__ = 'Joseph Herlant'
__email__ = '[email protected]'
__status__ = 'Production'
__website__ = 'https://github.com/aerostitch/'
from mechanize import Browser
from BeautifulSoup import BeautifulSoup
import argparse, sys
if __name__ == '__main__':
# use -h argument to get help
parser = argparse.ArgumentParser(
description='A Nagios check to verify that all datanodes of an HDFS \
cluster is in under the balancing threshold \
using the namenode web interface.')
parser.add_argument('-n', '--namenode', required=True,
help='hostname of the namenode of the cluster')
parser.add_argument('-p', '--port', type=int, default=50070,
help='port of the namenode http interface. \
Defaults to 50070.')
parser.add_argument(
'-w', '--warning', type=int, default=10,
help='warning threshold. If the datanode usage differs from average \
usage to more than this threshold, raise a warning. Defaults to 10.'
)
parser.add_argument(
'-c', '--critical', type=int, default=15,
help='critical threshold. If the datanode usage differs from average \
usage to more than this threshold, raise a critical. Defaults to 15.'
)
args = parser.parse_args()
# Get the web page from the namenode
url = "http://%s:%d/dfsnodelist.jsp?whatNodes=LIVE" % (args.namenode, args.port)
try:
page = Browser().open(url)
except IOError:
print 'CRITICAL: Cannot access namenode interface on %s:%d!' % (args.namenode, args.port)
sys.exit(2)
# parse the page and storing the {datanode: pct_usage} hash
html = page.read()
soup = BeautifulSoup(html)
datanodes = soup.findAll('td', {'class' : 'name'})
pcused = soup.findAll('td', {'class' : 'pcused', 'align' : 'right'})
nodes_pct = {}
for (idx, node) in enumerate(datanodes):
pct = float(pcused[idx].contents[0].strip())
node = datanodes[idx].findChildren('a')[0].contents[0].strip()
nodes_pct[node] = pct
# Each node variation against the average pct must be under the threshold
w_msg = ''
c_msg = ''
perfdata = ''
avg = 0
if len(nodes_pct) > 0:
avg = float(sum(nodes_pct.values()))/len(nodes_pct)
else:
print 'CRITICAL: Unable to find any node.'
sys.exit(2)
for (node, pct) in nodes_pct.items():
if abs(pct-avg) >= args.critical:
c_msg += ' %s=%.1f,' % (node, pct-avg)
perfdata += ' %s=%.1f,' % (node, pct-avg)
elif abs(avg-pct) >= args.warning:
w_msg += ' %s=%.1f,' % (node, pct-avg)
perfdata += ' %s=%.1f,' % (node, pct-avg)
else:
perfdata += ' %s=%.1f,' % (node, pct-avg)
# Prints the values and exits with the nagios exit code
if len(c_msg) > 0:
print ('CRITICAL:%s%s |%s' % (c_msg, w_msg, perfdata)).strip(',')[:1024]
sys.exit(2)
elif len(w_msg) > 0:
print ('WARNING:%s |%s' % (w_msg, perfdata)).strip(',')[:1024]
sys.exit(1)
else:
print ('OK |%s' % (perfdata)).strip(',')[:1024]
sys.exit(0)
| gpl-2.0 | 1,909,556,536,262,776,600 | 37.226087 | 97 | 0.619882 | false | 3.43706 | false | false | false |
drongo-framework/drongo-wing-auth | wing_auth/views.py | 1 | 1681 | from drongo_utils.helpers import URLHelper
from wing_jinja2 import Jinja2
url = URLHelper.url
template = Jinja2.template
class AuthViews(object):
def __init__(self, app, module, base_url):
self.app = app
self.module = module
self.base_url = base_url
URLHelper.mount(app, self, base_url)
@url(pattern='/login')
@template('auth/login.html.j2')
def login_view(self, ctx):
q = ctx.request.query
if 'next' in q:
ctx.session.next = q['next'][0]
@url(pattern='/login', method='POST')
def login_do(self, ctx):
q = ctx.request.query
username = q['username'][0]
password = q['password'][0]
svc = self.module.services.UserLoginService(
username=username,
password=password
)
result = svc.check_credentials()
token = svc.create_token()
if result:
if self.module.config.token_in_session:
svc.authenticate_session(ctx, token)
_next = None
if 'next' in q:
_next = q['next'][0]
if _next is None and 'next' in ctx.session:
_next = ctx.session.next
if _next is None:
_next = '/'
ctx.response.set_redirect(_next)
else:
ctx.response.set_redirect('/auth/login')
@url(pattern='/logout')
def logout_do(self, ctx):
q = ctx.request.query
svc = self.module.services.UserLogoutService()
token = ctx.auth.get('token')
svc.expire_token(token)
_next = q.get('next', ['/'])[0]
ctx.response.set_redirect(_next)
| mit | -2,225,894,349,991,006,000 | 24.469697 | 55 | 0.545509 | false | 3.760626 | false | false | false |
malaonline/Server | server/app/migrations/0064_auto_20160201_1843.py | 1 | 1150 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-01 10:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0063_auto_20160201_1830'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='time_slot',
),
migrations.AddField(
model_name='timeslot',
name='comment',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Comment'),
),
migrations.AlterField(
model_name='timeslot',
name='attendance',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.TimeSlotAttendance'),
),
migrations.AlterField(
model_name='timeslot',
name='complaint',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.TimeSlotComplaint'),
),
]
| mit | -8,703,896,778,187,276,000 | 31.857143 | 136 | 0.610435 | false | 3.95189 | false | false | false |
ankitrgadiya/cs50 | project/miki/miki/edit.py | 1 | 2137 | import os
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for,
current_app, Markup
)
from markdown import markdown
from werkzeug.exceptions import abort
from miki.auth import login_required
from miki.db import connect
bp = Blueprint('edit', __name__)
@bp.app_template_filter('markdown')
def markdown_filter(content):
return Markup(markdown(content))
@bp.route('/edit', methods=('GET', 'POST'))
@login_required
def edit():
if request.method == 'POST':
source = request.form.get('source', None)
content = request.form.get('content', None)
if not source or not content:
abort(406)
# Extract filename
file_name = os.path.splitext(os.path.basename(source))[0]
# Write content to markdown
md = open(os.path.join(
current_app.config.get('SOURCE'),
file_name + '.md'),
'w'
)
md.write(content)
md.close()
# Write content to html
html = open(os.path.join(
current_app.config.get('OUTPUT'),
file_name + '.html'),
'w'
)
html.write(render_template(
'page.html',
content=content,
name=file_name)
)
html.close()
# Redirect to generated html
return redirect('/' + file_name + '.html')
else:
# Check for args in request
if not request.args.get("file"):
raise RuntimeError("No file parameter passed!")
# Markdown file
md = os.path.join(
current_app.config.get('SOURCE'),
request.args.get('file')
)
# Try opening markdown
try:
# Save contents
md_file = open(md, 'r')
content = md_file.read()
md_file.close()
# If file do not exist
except FileNotFoundError:
content = ''
flash('Page do not exist yet.')
return render_template(
'edit.html',
content=content,
source=request.args.get("file")
)
| gpl-3.0 | -4,613,021,155,570,770,000 | 23.848837 | 69 | 0.542349 | false | 4.133462 | false | false | false |
erh3cq/hyperspy | hyperspy/tests/io/test_io.py | 1 | 6593 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import os
import logging
import tempfile
from pathlib import Path
from unittest.mock import patch
import numpy as np
import pytest
import hyperspy.api as hs
from hyperspy.signals import Signal1D
FULLFILENAME = Path(__file__).resolve().parent.joinpath("test_io_overwriting.hspy")
class TestIOOverwriting:
def setup_method(self, method):
self.s = Signal1D(np.arange(10))
self.new_s = Signal1D(np.ones(5))
# make sure we start from a clean state
self._clean_file()
self.s.save(FULLFILENAME)
self.s_file_hashed = self._hash_file(FULLFILENAME)
def _hash_file(self, filename):
with open(filename, "rb") as file:
md5_hash = hashlib.md5(file.read())
file_hashed = md5_hash.hexdigest()
return file_hashed
def _clean_file(self):
if os.path.exists(FULLFILENAME):
os.remove(FULLFILENAME)
def _check_file_is_written(self, filename):
# Check that we have a different hash, in case the file have different
# content from the original, the hash will be different.
return not self.s_file_hashed == self._hash_file(filename)
def test_io_overwriting_True(self):
# Overwrite is True, when file exists we overwrite
self.new_s.save(FULLFILENAME, overwrite=True)
assert self._check_file_is_written(FULLFILENAME)
def test_io_overwriting_False(self):
# Overwrite if False, file exists we don't overwrite
self.new_s.save(FULLFILENAME, overwrite=False)
assert not self._check_file_is_written(FULLFILENAME)
@pytest.mark.parametrize("overwrite", [None, True, False])
def test_io_overwriting_no_existing_file(self, overwrite):
self._clean_file() # remove the file
self.new_s.save(FULLFILENAME, overwrite=overwrite)
assert self._check_file_is_written(FULLFILENAME)
def test_io_overwriting_None_existing_file_y(self):
# Overwrite is None, when file exists we ask, mock `y` here
with patch("builtins.input", return_value="y"):
self.new_s.save(FULLFILENAME)
assert self._check_file_is_written(FULLFILENAME)
def test_io_overwriting_None_existing_file_n(self):
# Overwrite is None, when file exists we ask, mock `n` here
with patch("builtins.input", return_value="n"):
self.new_s.save(FULLFILENAME)
assert not self._check_file_is_written(FULLFILENAME)
def teardown_method(self, method):
self._clean_file()
def test_glob_wildcards():
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
fnames = [os.path.join(dirpath, f"temp[1x{x}].hspy") for x in range(2)]
for f in fnames:
s.save(f)
with pytest.raises(ValueError, match="No filename matches this pattern"):
_ = hs.load(fnames[0])
t = hs.load([fnames[0]])
assert len(t) == 1
t = hs.load(fnames)
assert len(t) == 2
t = hs.load(os.path.join(dirpath, "temp*.hspy"))
assert len(t) == 2
t = hs.load(os.path.join(dirpath, "temp[*].hspy"), escape_square_brackets=True,)
assert len(t) == 2
with pytest.raises(ValueError, match="No filename matches this pattern"):
_ = hs.load(os.path.join(dirpath, "temp[*].hspy"))
# Test pathlib.Path
t = hs.load(Path(dirpath, "temp[1x0].hspy"))
assert len(t) == 1
t = hs.load([Path(dirpath, "temp[1x0].hspy"), Path(dirpath, "temp[1x1].hspy")])
assert len(t) == 2
t = hs.load(list(Path(dirpath).glob("temp*.hspy")))
assert len(t) == 2
t = hs.load(Path(dirpath).glob("temp*.hspy"))
assert len(t) == 2
def test_file_not_found_error():
with tempfile.TemporaryDirectory() as dirpath:
temp_fname = os.path.join(dirpath, "temp.hspy")
if os.path.exists(temp_fname):
os.remove(temp_fname)
with pytest.raises(ValueError, match="No filename matches this pattern"):
_ = hs.load(temp_fname)
with pytest.raises(FileNotFoundError):
_ = hs.load([temp_fname])
def test_file_reader_error():
# Only None, str or objects with attr "file_reader" are supported
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
f = os.path.join(dirpath, "temp.hspy")
s.save(f)
with pytest.raises(ValueError, match="reader"):
_ = hs.load(f, reader=123)
def test_file_reader_warning(caplog):
# Test fallback to Pillow imaging library
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
f = os.path.join(dirpath, "temp.hspy")
s.save(f)
with pytest.raises(ValueError, match="Could not load"):
with caplog.at_level(logging.WARNING):
_ = hs.load(f, reader="some_unknown_file_extension")
assert "Unable to infer file type from extension" in caplog.text
def test_file_reader_options():
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
f = os.path.join(dirpath, "temp.hspy")
s.save(f)
# Test string reader
t = hs.load(Path(dirpath, "temp.hspy"), reader="hspy")
assert len(t) == 1
np.testing.assert_allclose(t.data, np.arange(10))
# Test object reader
from hyperspy.io_plugins import hspy
t = hs.load(Path(dirpath, "temp.hspy"), reader=hspy)
assert len(t) == 1
np.testing.assert_allclose(t.data, np.arange(10))
def test_save_default_format():
s = Signal1D(np.arange(10))
with tempfile.TemporaryDirectory() as dirpath:
f = os.path.join(dirpath, "temp")
s.save(f)
t = hs.load(Path(dirpath, "temp.hspy"))
assert len(t) == 1
| gpl-3.0 | -7,554,021,842,762,585,000 | 31.638614 | 88 | 0.635219 | false | 3.542719 | true | false | false |
william-richard/moto | moto/dynamodb2/exceptions.py | 1 | 5931 | class InvalidIndexNameError(ValueError):
pass
class MockValidationException(ValueError):
def __init__(self, message):
self.exception_msg = message
class InvalidUpdateExpressionInvalidDocumentPath(MockValidationException):
invalid_update_expression_msg = (
"The document path provided in the update expression is invalid for update"
)
def __init__(self):
super(InvalidUpdateExpressionInvalidDocumentPath, self).__init__(
self.invalid_update_expression_msg
)
class InvalidUpdateExpression(MockValidationException):
invalid_update_expr_msg = "Invalid UpdateExpression: {update_expression_error}"
def __init__(self, update_expression_error):
self.update_expression_error = update_expression_error
super(InvalidUpdateExpression, self).__init__(
self.invalid_update_expr_msg.format(
update_expression_error=update_expression_error
)
)
class AttributeDoesNotExist(MockValidationException):
attr_does_not_exist_msg = (
"The provided expression refers to an attribute that does not exist in the item"
)
def __init__(self):
super(AttributeDoesNotExist, self).__init__(self.attr_does_not_exist_msg)
class ProvidedKeyDoesNotExist(MockValidationException):
provided_key_does_not_exist_msg = (
"The provided key element does not match the schema"
)
def __init__(self):
super(ProvidedKeyDoesNotExist, self).__init__(
self.provided_key_does_not_exist_msg
)
class ExpressionAttributeNameNotDefined(InvalidUpdateExpression):
name_not_defined_msg = "An expression attribute name used in the document path is not defined; attribute name: {n}"
def __init__(self, attribute_name):
self.not_defined_attribute_name = attribute_name
super(ExpressionAttributeNameNotDefined, self).__init__(
self.name_not_defined_msg.format(n=attribute_name)
)
class AttributeIsReservedKeyword(InvalidUpdateExpression):
attribute_is_keyword_msg = (
"Attribute name is a reserved keyword; reserved keyword: {keyword}"
)
def __init__(self, keyword):
self.keyword = keyword
super(AttributeIsReservedKeyword, self).__init__(
self.attribute_is_keyword_msg.format(keyword=keyword)
)
class ExpressionAttributeValueNotDefined(InvalidUpdateExpression):
attr_value_not_defined_msg = "An expression attribute value used in expression is not defined; attribute value: {attribute_value}"
def __init__(self, attribute_value):
self.attribute_value = attribute_value
super(ExpressionAttributeValueNotDefined, self).__init__(
self.attr_value_not_defined_msg.format(attribute_value=attribute_value)
)
class UpdateExprSyntaxError(InvalidUpdateExpression):
update_expr_syntax_error_msg = "Syntax error; {error_detail}"
def __init__(self, error_detail):
self.error_detail = error_detail
super(UpdateExprSyntaxError, self).__init__(
self.update_expr_syntax_error_msg.format(error_detail=error_detail)
)
class InvalidTokenException(UpdateExprSyntaxError):
token_detail_msg = 'token: "{token}", near: "{near}"'
def __init__(self, token, near):
self.token = token
self.near = near
super(InvalidTokenException, self).__init__(
self.token_detail_msg.format(token=token, near=near)
)
class InvalidExpressionAttributeNameKey(MockValidationException):
invalid_expr_attr_name_msg = (
'ExpressionAttributeNames contains invalid key: Syntax error; key: "{key}"'
)
def __init__(self, key):
self.key = key
super(InvalidExpressionAttributeNameKey, self).__init__(
self.invalid_expr_attr_name_msg.format(key=key)
)
class ItemSizeTooLarge(MockValidationException):
item_size_too_large_msg = "Item size has exceeded the maximum allowed size"
def __init__(self):
super(ItemSizeTooLarge, self).__init__(self.item_size_too_large_msg)
class ItemSizeToUpdateTooLarge(MockValidationException):
item_size_to_update_too_large_msg = (
"Item size to update has exceeded the maximum allowed size"
)
def __init__(self):
super(ItemSizeToUpdateTooLarge, self).__init__(
self.item_size_to_update_too_large_msg
)
class IncorrectOperandType(InvalidUpdateExpression):
inv_operand_msg = "Incorrect operand type for operator or function; operator or function: {f}, operand type: {t}"
def __init__(self, operator_or_function, operand_type):
self.operator_or_function = operator_or_function
self.operand_type = operand_type
super(IncorrectOperandType, self).__init__(
self.inv_operand_msg.format(f=operator_or_function, t=operand_type)
)
class IncorrectDataType(MockValidationException):
inc_data_type_msg = "An operand in the update expression has an incorrect data type"
def __init__(self):
super(IncorrectDataType, self).__init__(self.inc_data_type_msg)
class ConditionalCheckFailed(ValueError):
msg = "The conditional request failed"
def __init__(self):
super(ConditionalCheckFailed, self).__init__(self.msg)
class TransactionCanceledException(ValueError):
cancel_reason_msg = "Transaction cancelled, please refer cancellation reasons for specific reasons [{}]"
def __init__(self, errors):
msg = self.cancel_reason_msg.format(", ".join([str(err) for err in errors]))
super(TransactionCanceledException, self).__init__(msg)
class EmptyKeyAttributeException(MockValidationException):
empty_str_msg = "One or more parameter values were invalid: An AttributeValue may not contain an empty string"
def __init__(self):
super(EmptyKeyAttributeException, self).__init__(self.empty_str_msg)
| apache-2.0 | -7,290,487,194,947,429,000 | 33.283237 | 134 | 0.685888 | false | 4.170886 | false | false | false |
tisnik/fabric8-analytics-common | vscode-visual-tests/features/src/gui.py | 1 | 4392 | # vim: set fileencoding=utf-8
"""Common functions for GUI-related tests."""
from PIL import Image
from os import path
from src.litava import locate_on_screen_using_litava
TYPING_INTERVAL = 0.25
DIRECTORY_WITH_REGIONS = "regions"
OUTPUT_DIRECTORY = "."
def perform_move_mouse_cursor(context, x=0, y=0):
"""Move mouse cursor to specifief coordinates."""
assert context is not None, "Context must be provided by Behave"
context.pyautogui.moveTo(x, y)
def check_location_existence(location):
"""Check if location exist and can be found on the screen."""
assert location is not None, "Region can not be found"
def perform_click_on_the_region(context):
"""Click on region found by previous test step."""
assert context is not None, "Context must be provided by Behave"
# get the already found location
location = context.location
check_location_existence(location)
# click on the center of location
x, y = context.pyautogui.center(location)
context.pyautogui.click(x, y)
def perform_right_click_on_the_region(context):
"""Click on region found by previous test step by the right mouse button."""
assert context is not None, "Context must be provided by Behave"
# get the already found location
location = context.location
check_location_existence(location)
# click on the center of location
x, y = context.pyautogui.center(location)
context.pyautogui.click(x, y, button="right")
def perform_type(context, what_to_type):
"""Type anything onto the screen."""
context.pyautogui.typewrite(what_to_type, interval=TYPING_INTERVAL)
def region_filename_in_directory(directory, version, region):
"""Generate filename for region residing in specified directory."""
# construct proper filename
region = region.replace(" ", "_")
filename = path.join(directory + "/" + version, region + ".png")
return filename
def entry_region_check(context, region):
"""Check if context and region are set."""
assert context is not None, "Context is not set (FATAL)"
assert region is not None, "Name of region is required parameter"
def filename_for_region(context, region):
"""Proper filename for file containing pattern for region."""
assert context is not None, "Context is not set (FATAL)"
assert region is not None, "Name of region is required parameter"
version = context.vs_code_version
return region_filename_in_directory(DIRECTORY_WITH_REGIONS, version, region)
def save_screenshot(context, region):
"""Save screenshot with the filename the same as the region."""
assert context is not None, "Context is not set (FATAL)"
assert region is not None, "Name of region is required parameter"
version = context.vs_code_version
filename = region_filename_in_directory(OUTPUT_DIRECTORY, version, region)
context.pyautogui.screenshot(filename)
def find_the_pattern(context, filename):
"""Try to find the pattern in a screenshot."""
SCREENSHOT_FILENAME = "screenshot.bmp"
PATTERN_FILENAME = "pattern.bmp"
# fuzzy search
if context.use_litava:
context.pyautogui.screenshot(SCREENSHOT_FILENAME)
img = Image.open(filename)
img.save(PATTERN_FILENAME)
return locate_on_screen_using_litava(SCREENSHOT_FILENAME, PATTERN_FILENAME)
else:
return None
def perform_find_the_region(context, region, alternate_region=None):
"""Try to find region on screen based on specified pattern."""
entry_region_check(context, region)
location = context.location = None
try:
# first step - try to localize primary region
filename = filename_for_region(context, region)
location = context.pyautogui.locateOnScreen(filename)
check_location_existence(location)
except Exception:
# the primary region can't be found: try the alternate region, if any
if alternate_region is not None:
perform_find_the_region(context, alternate_region)
# first region can't be found and alternate region is not specified -> a problem
else:
location = find_the_pattern(context, filename)
if location in None:
save_screenshot(context, region)
raise Exception("Region '{r}' can not be found on the screen".format(r=region))
context.location = location
| apache-2.0 | -6,515,741,662,497,905,000 | 32.784615 | 95 | 0.698315 | false | 4.003646 | false | false | false |
jeremiedecock/snippets | python/pyqt/pyqt5/widget_QTableView_delegate_on_edit_using_spinbox_widget.py | 1 | 4158 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref:
# - http://doc.qt.io/qt-5/modelview.html#3-4-delegates
# - http://doc.qt.io/qt-5/model-view-programming.html#delegate-classes
# - http://doc.qt.io/qt-5/qabstractitemdelegate.html#details
# - http://doc.qt.io/qt-5/qitemdelegate.html#details
# - http://doc.qt.io/qt-5/qstyleditemdelegate.html#details
# - http://doc.qt.io/qt-5/qtwidgets-itemviews-spinboxdelegate-example.html
import sys
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
from PyQt5.QtWidgets import QApplication, QTableView, QStyledItemDelegate, QSpinBox
class MyData:
def __init__(self):
self._num_rows = 3
self._num_columns = 2
self._data = [[0 for j in range(self._num_columns)] for i in range(self._num_rows)]
def get_num_rows(self):
return self._num_rows
def get_num_columns(self):
return self._num_columns
def get_data(self, row_index, column_index):
value = self._data[row_index][column_index]
print("read ({},{}): {}".format(row_index, column_index, value))
return value
def set_data(self, row_index, column_index, value):
print("write ({},{}): {}".format(row_index, column_index, value))
self._data[row_index][column_index] = value
###############################################################################
class MyModel(QAbstractTableModel):
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data # DON'T CALL THIS ATTRIBUTE "data", A METHOD ALREADY HAVE THIS NAME (model.data(index, role)) !!!
def rowCount(self, parent):
return self._data.get_num_rows()
def columnCount(self, parent):
return self._data.get_num_columns()
def data(self, index, role):
if role == Qt.DisplayRole or role == Qt.EditRole:
# See https://stackoverflow.com/a/8480223
return self._data.get_data(index.row(), index.column())
return QVariant()
def setData(self, index, value, role):
if role == Qt.EditRole:
try:
self._data.set_data(index.row(), index.column(), value)
# The following line are necessary e.g. to dynamically update the QSortFilterProxyModel
self.dataChanged.emit(index, index, [Qt.EditRole])
except Exception as e:
print(e)
return False
return True
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsEnabled
###############################################################################
class MyDelegate(QStyledItemDelegate):
def createEditor(self, parent, option, index):
editor = QSpinBox(parent=parent)
# setFrame(): tell whether the line edit draws itself with a frame.
# If enabled (the default) the line edit draws itself inside a frame, otherwise the line edit draws itself without any frame.
editor.setFrame(False)
editor.setRange(0, 3)
return editor
def setEditorData(self, editor, index):
value = int(index.data(Qt.EditRole)) # equivalent of value = index.model().data(index, Qt.EditRole)
editor.setValue(value)
def setModelData(self, editor, model, index):
editor.interpretText()
value = editor.value()
model.setData(index, value, Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
if __name__ == '__main__':
app = QApplication(sys.argv)
data = MyData()
table_view = QTableView()
my_model = MyModel(data)
table_view.setModel(my_model)
delegate = MyDelegate()
table_view.setItemDelegate(delegate)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| mit | -5,714,131,013,226,474,000 | 33.081967 | 133 | 0.616402 | false | 3.732496 | false | false | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/patch_route_filter.py | 1 | 2686 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class PatchRouteFilter(SubResource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param rules: Collection of RouteFilterRules contained within a route
filter.
:type rules: list[~azure.mgmt.network.v2017_08_01.models.RouteFilterRule]
:param peerings: A collection of references to express route circuit
peerings.
:type peerings:
list[~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeering]
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'provisioning_state': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, id=None, rules=None, peerings=None, tags=None):
super(PatchRouteFilter, self).__init__(id=id)
self.rules = rules
self.peerings = peerings
self.provisioning_state = None
self.name = None
self.etag = None
self.type = None
self.tags = tags
| mit | -656,485,033,284,707,300 | 36.830986 | 91 | 0.599777 | false | 4.094512 | false | false | false |
googleads/google-ads-python | google/ads/googleads/v6/services/services/carrier_constant_service/client.py | 1 | 17815 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.resources.types import carrier_constant
from google.ads.googleads.v6.services.types import carrier_constant_service
from .transports.base import (
CarrierConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import CarrierConstantServiceGrpcTransport
class CarrierConstantServiceClientMeta(type):
"""Metaclass for the CarrierConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CarrierConstantServiceTransport]]
_transport_registry["grpc"] = CarrierConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[CarrierConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CarrierConstantServiceClient(metaclass=CarrierConstantServiceClientMeta):
"""Service to fetch carrier constants."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CarrierConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CarrierConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CarrierConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
CarrierConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def carrier_constant_path(criterion_id: str,) -> str:
"""Return a fully-qualified carrier_constant string."""
return "carrierConstants/{criterion_id}".format(
criterion_id=criterion_id,
)
@staticmethod
def parse_carrier_constant_path(path: str) -> Dict[str, str]:
"""Parse a carrier_constant path into its component segments."""
m = re.match(r"^carrierConstants/(?P<criterion_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, CarrierConstantServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the carrier constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CarrierConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CarrierConstantServiceTransport):
# transport is a CarrierConstantServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = CarrierConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_carrier_constant(
self,
request: carrier_constant_service.GetCarrierConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> carrier_constant.CarrierConstant:
r"""Returns the requested carrier constant in full
detail.
Args:
request (:class:`google.ads.googleads.v6.services.types.GetCarrierConstantRequest`):
The request object. Request message for
[CarrierConstantService.GetCarrierConstant][google.ads.googleads.v6.services.CarrierConstantService.GetCarrierConstant].
resource_name (:class:`str`):
Required. Resource name of the
carrier constant to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.CarrierConstant:
A carrier criterion that can be used
in campaign targeting.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a carrier_constant_service.GetCarrierConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, carrier_constant_service.GetCarrierConstantRequest
):
request = carrier_constant_service.GetCarrierConstantRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_carrier_constant
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("CarrierConstantServiceClient",)
| apache-2.0 | 2,523,804,490,509,615,000 | 39.488636 | 136 | 0.623463 | false | 4.597419 | false | false | false |
Aalto-LeTech/a-plus | shibboleth_login/auth_backend.py | 1 | 6416 | import logging
import urllib.parse
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.shortcuts import redirect
from .apps import app_settings, env_settings
from .parser import Parser
logger = logging.getLogger('aplus.shibboleth')
class ShibbolethAuthBackend(ModelBackend):
"""
Authenticates the trusted user from the Shibboleth middleware headers.
Creates a new user or updates changed fields on an existing user.
"""
def authenticate(self, request, shibd_meta=None):
if not shibd_meta:
return None
user_save_flag = False
UserModel = get_user_model()
username_field = getattr(UserModel, 'USERNAME_FIELD', 'username')
email_field = getattr(UserModel, 'EMAIL_FIELD', 'email')
username_len = UserModel._meta.get_field(username_field).max_length
email_len = UserModel._meta.get_field(email_field).max_length
first_name_len = UserModel._meta.get_field('first_name').max_length
last_name_len = UserModel._meta.get_field('last_name').max_length
parser = Parser(env=shibd_meta,
urldecode=env_settings.URL_DECODE)
# resolve username
username = self._get_scoped_limited(parser, env_settings.USER_ID, username_len)
if not username:
return None
username = username.lower()
# resolve email
email = self._get_scoped_limited(parser, env_settings.EMAIL, email_len)
if email:
email = UserModel.objects.normalize_email(email)
# find user
try:
user = UserModel.objects.filter(**{username_field: username}).get()
except UserModel.DoesNotExist:
user = None
# fallback, find user with email
if not user and app_settings.ALLOW_SEARCH_WITH_EMAIL:
qs = UserModel.objects.filter(**{email_field: email})
if qs.count() == 1:
user = qs.first()
# create missing users
if not user and app_settings.ALLOW_CREATE_NEW_USERS:
logger.info('Creating a new Shibboleth authenticated user: %s <%s>',
username, email)
user = UserModel(**{
username_field: username,
email_field: email or '',
})
if not email:
user.save()
# TODO: use real domain with support for this and pseudonymized users
user.email = '{:d}@localhost'.format(user.id)
user.set_unusable_password()
user_save_flag = True
if not user:
return None
# update email
if email and email != user.email:
user.email = email
user_save_flag = True
# update first_name
first_name = ' '.join(parser.get_values(env_settings.FIRST_NAME, ''))[:first_name_len]
if first_name and first_name != user.first_name:
user.first_name = first_name
user_save_flag = True
# update last_name
last_name = ' '.join(parser.get_values(env_settings.LAST_NAME, ''))[:last_name_len]
if last_name and last_name != user.last_name:
user.last_name = last_name
user_save_flag = True
# if not first_name or last_name, fallback to cn and displayName
if not user.first_name or not user.last_name:
# best effort to find best possible name..
full_name = max((
' '.join(parser.get_values(env_settings.FULL_NAME, '')),
' '.join(parser.get_values(env_settings.COMMON_NAME, '')),
), key=len)
first_, __, last_ = full_name.partition(' ')
if not user.first_name:
user.first_name = first_[:first_name_len]
if not user.last_name:
user.last_name = last_[:last_name_len]
user_save_flag = True
if user_save_flag:
# TODO: write better error reporting, when there is a validator to raise something
user.full_clean()
user.save()
# TODO: support multiple domains
profile = user.userprofile
sid_filters = env_settings.STUDENT_FILTERS.copy()
# following filter drops everything else except configured domain
sid_filters[1] = env_settings.STUDENT_DOMAIN.lower()
try:
student_ids = parser.get_urn_values(
env_settings.STUDENT_URN,
env_settings.STUDENT_IDS,
filters=sid_filters)
except KeyError as error:
logger.warning("Did not find a student id for user '%s', missing field '%s'",
username, error)
student_ids = ()
except ValueError as error:
logger.warning("Did not find a student id for user '%s', invalid data: %s",
username, error)
student_ids = ()
# example: ('123456', 'aalto.fi', 'studentID', 'int', 'mace:terena.org')
# -> (value (student number), the domain, id type, int|local, schema namespace)
student_id = next(iter(student_ids), (None,))[0]
if student_id and student_id != profile.student_id:
profile.student_id = student_id
profile.save()
return user
def _get_scoped_limited(self, parser, name, max_len):
try:
value = parser.get_single_value(name)
except KeyError:
logger.warning("Shibboleth login attempt without %s%s.",
env_settings.PREFIX, name)
return None
except ValueError as error:
logger.warning("Shibboleth login attempt with multiple values for %s%s: %s",
env_settings.PREFIX, name, str(error)[:512])
return None
if not value:
logger.warning("Shibboleth login attempt with empty %s%s.",
env_settings.PREFIX, name)
return None
if len(value) > max_len:
logger.warning("Shibboleth login attempt with too long %s%s (%d > %d).",
env_settings.PREFIX, name, len(value), max_len)
return None
if '@' not in value:
logger.warning("Shibboleth login attempt without domain in %s%s (%s).",
env_settings.PREFIX, name, value)
return None
return value
| gpl-3.0 | 8,581,214,611,712,775,000 | 37.884848 | 94 | 0.580268 | false | 4.091837 | false | false | false |
Komodo/macros | Reflow_tower.py | 1 | 1901 | # This macro is to reorder selected lines, so that the shortest will be
# pushed to top, the longest will go to bottom (look like a tower).
#
# The macro is useful to reorder Python's "import" lines.
#
# Author: Nguyễn Hồng Quân ([email protected])
import eollib
from xpcom import components
viewSvc = components.classes["@activestate.com/koViewService;1"]\
.getService(components.interfaces.koIViewService)
view = viewSvc.currentView
view = view.queryInterface(components.interfaces.koIScintillaView)
sm = view.scimoz
# Make `start` the beginning position of the first selected line,
# and `end` the ending position of the last selected line.
start = sm.positionFromLine(sm.lineFromPosition(sm.selectionStart))
end = sm.getLineEndPosition(sm.lineFromPosition(sm.selectionEnd))
# Get list of selected lines. Also strip trailing spaces
lines = [l.rstrip() for l in sm.getTextRange(start, end).splitlines()]
# Sometimes, one line of code is splited to many, with trailing slash.
# We group these sublines to one and will count the length of the longest.
groupedlines = [] # Each element is a list of sublines
concat = False
for l in lines:
if l.endswith('\\'): # This line will be concatenated with following lines
if not concat:
groupedlines.append([l])
concat = True
else:
groupedlines[-1].append(l) # Append to the latest list of sublines
else:
if concat: # Last subline to concat
groupedlines[-1].append(l)
concat = False
else:
groupedlines.append([l])
# Reorder by length
groupedlines.sort(key=lambda group: max(len(l) for l in group))
# Flatten groupedlines
lines = []
for g in groupedlines:
lines.extend(g)
# Select part of document
sm.setSel(start, end)
# Replace selection content
eol = eollib.eol2eolStr[sm.eOLMode]
sm.replaceSel(eol.join(lines))
| mpl-2.0 | 1,604,087,599,337,519,600 | 33.472727 | 79 | 0.712025 | false | 3.530726 | false | false | false |
alisheykhi/SocialPDA | graph_util.py | 1 | 8934 | import re,collections,operator
import networkx as nx
from privacy_level import privacy_level_generator
from numpy.random import zipf
from math import ceil
class ReadGraph():
extension = []
G = nx.Graph()
properties = {}
nodes = []
edges = []
privacy_level = []
sorted_degree_sequence = []
def __init__(self, file_name,level):
print "-----------------------------------------------------------"
print "___________________Developed for___________________________"
print "-----------------------------------------------------------"
print "title: SocialPDA: A Structure-Aware Approach for Personalized Degree Anonymity in Social Network Graphs"
print "Author: Ali Sheykhi and Mahdi Abadi"
print "Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran"
print "{ali.sheykhi, abadi}@modares.ac.ir"
print "-----------------------------------------------------------"
print "___________________Initial Setup___________________________"
print "-----------------------------------------------------------"
self.file_name = file_name
print "file name : ",self.file_name
ReadGraph.extension = ["csv", "txt", "gml", "net"]
self.converter(level)
def converter(self,level):
'''
chose correct converter
:return:
'''
file_type = self.file_type()
if file_type == "gml":
print "Convert gml file ... "
self.gml_to_graph(level)
if file_type == "txt":
print "Convert txt file ... "
self.txt_to_graph(level)
def file_type(self):
'''
return dataSet file type
:return: file name
'''
if self.is_valid():
return self.file_name.split(".")[-1]
def is_valid(self):
'''
check for valid graph type
:return:
'''
file_extension = self.file_name.split(".")[-1]
if (file_extension):
if (file_extension.lower() in ReadGraph.extension):
return True
else:
print "Unknown file extension \"",file_extension,"\", use:",ReadGraph.extension
return False
else:
print "file does not have an extension!"
return False
def gml_to_graph(self,level):
'''
convert gml graph to TUNGraph
:return:
'''
# try:
# file_path = "DataSet/"+self.file_name
# except:
# print "can't open "+self.file_name
# else:
# print "reading gml file ... "
# M = nx.MultiGraph(nx.read_gml('DataSet/polblogs.gml'))
# for u,v,data in M.edges_iter(data=True):
# if ReadGraph.G.has_edge(u,v):
# pass
# else:
# ReadGraph.G.add_edge(u, v)
# ReadGraph.properties ['edge_count'] = len(ReadGraph.edges)
# print len(ReadGraph.G.node)
# self.degree_sequence()
try:
file_path = "DataSet/"+self.file_name
ifile = open(file_path,'r')
except:
print "can't open "+self.file_name
else:
text = ifile.read()
ifile.close()
if text:
print "reading gml file ... "
pattern_meas = re.compile(r"source\s(\d+)\s+target\s(\d+)", re.VERBOSE | re.MULTILINE)
pattern_id = re.compile(r"id\s(\d+)", re.VERBOSE | re.MULTILINE)
for match in pattern_meas.finditer(text):
ReadGraph.edges.append("%s,%s" % (match.group(1), match.group(2)))
for match in pattern_id.finditer(text):
ReadGraph.nodes.append("%s" % match.group(1))
node_count = 0
for node in ReadGraph.nodes:
ReadGraph.G.add_node(int(node))
node_count += 1
for edge in ReadGraph.edges:
ReadGraph.G.add_edge(int(edge.split(",")[0]) ,int( edge.split(",")[1]))
sum = 0
count = 0
for NI in ReadGraph.G.degree().values():
#print "node: %d, out-degree %d, in-degree %d" % ( NI.GetId(), NI.GetOutDeg(), NI.GetInDeg())
sum += NI
count+=1
ReadGraph.properties ['edge_count'] = sum/2
self.degree_sequence(level)
def txt_to_graph(self,level):
"""
convert txt graph to TNUGraph
:return:
"""
try:
file_path = "DataSet/"+self.file_name
ifile = open(file_path ,'r')
except:
print "can't open "+self.file_name
else:
text = ifile.read()
ifile.close()
if text:
print "reading txt file ... "
nodes_list = []
if self.file_name.split(".")[0] == 'caida':
pattern_meas = re.compile(r"^(\d+)\s+(\d+)\s+([-]?\d+)$", re.VERBOSE | re.MULTILINE)
if self.file_name.split(".")[0] == 'caida_test':
pattern_meas = re.compile(r"^(\d+)\s+(\d+)\s+([-]?\d+)$", re.VERBOSE | re.MULTILINE)
if self.file_name.split(".")[0] == 'amazon':
pattern_meas = re.compile(r"^(\d+)\s+(\d+)", re.VERBOSE | re.MULTILINE)
for match in pattern_meas.finditer(text):
# nodes_list.append("%s" % int(match.group(1)))
# nodes_list.append("%s" % int(match.group(2)))
ReadGraph.G.add_edge(int(match.group(1)),int( match.group(2)))
# ReadGraph.nodes = list(set(nodes_list))
# for node in ReadGraph.nodes:
# ReadGraph.G.add_node(int(node))
# for edge in ReadGraph.edges:
# ReadGraph.G.add_edge(int(edge.split(",")[0]) ,int( edge.split(",")[1]))
sum = 0
count = 0
for NI in ReadGraph.G.degree().values():
#print "node: %d, out-degree %d, in-degree %d" % ( NI.GetId(), NI.GetOutDeg(), NI.GetInDeg())
sum += NI
count+=1
ReadGraph.properties ['edge_count'] = sum/2
self.degree_sequence(level)
def degree_sequence(self,level):
print nx.info(ReadGraph.G)
result_in_degree = ReadGraph.G.degree().values()
privacy_file_name = self.file_name.split(".")[0]+"_privacy.txt"
privacy_level = privacy_level_generator(file_name=privacy_file_name,lvl =level)
# departed = []
for node in ReadGraph.G.nodes():
if ReadGraph.G.degree(node):
current_node = dict(degree = ReadGraph.G.degree(node), id=node)
ReadGraph.sorted_degree_sequence.append(current_node)
# if ReadGraph.G.degree(node) == 1:
# departed.append(list(ReadGraph.G.edges_iter(node))[0])
# for item in departed:
# for item2 in departed:
# if item[1] == item2[0]:
# print item, item2
ReadGraph.sorted_degree_sequence.sort(key=lambda x:(x['degree']), reverse=True)
# for i in range (0,5):
# print ReadGraph.sorted_degree_sequence[i]
for i in range(0, len(ReadGraph.sorted_degree_sequence)):
if ReadGraph.sorted_degree_sequence[i]:
ReadGraph.sorted_degree_sequence[i]['privacy_level'] = int(privacy_level[i])
#ReadGraph.sorted_degree_sequence.sort(key=lambda x:(x['privacy_level'],x['degree']), reverse=True)
ReadGraph.properties['node_count'] = len(ReadGraph.sorted_degree_sequence)
max_degree = None
max_degree_id = None
for node in ReadGraph.sorted_degree_sequence:
if node['degree'] > max_degree:
max_degree = node['degree']
max_degree_id = node['id']
ReadGraph.properties ['max_degree_id'] = max_degree_id
ReadGraph.properties ['max_privacy'] = ReadGraph.sorted_degree_sequence[0]['privacy_level']
ReadGraph.properties ['max_privacy_id'] = ReadGraph.sorted_degree_sequence[0]['id']
ReadGraph.properties ['max_degree_size'] = max_degree
ReadGraph.properties ['avg_degree'] = 2 * (float (ReadGraph.properties ['edge_count'])/float (ReadGraph.properties ['node_count']))
node_occur = collections.Counter (result_in_degree)
sorted_node_oc = sorted(node_occur.items(), key=operator.itemgetter(1))
ReadGraph.properties ['k'] = sorted_node_oc[0][1]
print ReadGraph.properties
print "for example, the first node in sorted degree sequence is :" + str(ReadGraph.sorted_degree_sequence[0])
| apache-2.0 | -4,987,266,179,027,643,000 | 39.243243 | 143 | 0.504589 | false | 3.875922 | false | false | false |
aringh/odl | odl/solvers/nonsmooth/forward_backward.py | 1 | 6886 | # Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Optimization methods based on a forward-backward splitting scheme."""
from __future__ import print_function, division, absolute_import
from odl.operator import Operator
__all__ = ('forward_backward_pd',)
def forward_backward_pd(x, f, g, L, h, tau, sigma, niter,
callback=None, **kwargs):
"""The forward-backward primal-dual splitting algorithm.
The algorithm minimizes the sum of several convex functionals composed with
linear operators,::
min_x f(x) + sum_i g_i(L_i x) + h(x)
where ``f``, ``g_i`` are convex functionals, ``L_i`` are linear
operator's, and ``h`` is a convex and differentiable functional.
The method can also be used to solve the more general problem::
min_x f(x) + sum_i (g_i @ l_i)(L_i x) + h(x)
where ``l_i`` are strongly convex functionals and @ is the infimal
convolution::
(g @ l)(x) = inf_y { g(y) + l(x-y) }
Note that the strong convexity of ``l_i`` makes the convex conjugate
``l_i^*`` differentialbe; see the Notes section for more information on
this.
Parameters
----------
x : `LinearSpaceElement`
Initial point, updated in-place.
f : `Functional`
The functional ``f``. Needs to have ``f.proximal``.
g : sequence of `Functional`'s
The functionals ``g_i``. Needs to have ``g_i.convex_conj.proximal``.
L : sequence of `Operator`'s'
Sequence of linear operators ``L_i``, with as many elements as
``g``.
h : `Functional`
The functional ``h``. Needs to have ``h.gradient``.
tau : float
Step size-like parameter for ``f``.
sigma : sequence of floats
Sequence of step size-like parameters for the sequence ``g``.
niter : int
Number of iterations.
callback : callable, optional
Function called with the current iterate after each iteration.
Other Parameters
----------------
l : sequence of `Functional`'s, optional
The functionals ``l_i``. Needs to have ``g_i.convex_conj.gradient``.
If omitted, the simpler problem without ``l_i`` will be considered.
Notes
-----
The mathematical problem to solve is
.. math::
\min_x f(x) + \sum_{i=0}^n (g_i \Box l_i)(L_i x) + h(x),
where :math:`f`, :math:`g_i`, :math:`l_i` and :math:`h` are functionals and
:math:`L_i` are linear operators. The infimal convolution :math:`g \Box l`
is defined by
.. math::
(g \Box l)(x) = \inf_y g(y) + l(x - y).
The exact conditions on the involved functionals are as follows: :math:`f`
and :math:`g_i` are proper, convex and lower semicontinuous, and :math:`h`
is convex and differentiable with :math:`\\eta^{-1}`-Lipschitz continuous
gradient, :math:`\\eta > 0`.
The optional operators :math:`\\nabla l_i^*` need to be
:math:`\\nu_i`-Lipschitz continuous. Note that in the paper, the condition
is formulated as :math:`l_i` being proper, lower
semicontinuous, and :math:`\\nu_i^{-1}`-strongly convex, which implies that
:math:`l_i^*` have :math:`\\nu_i`-Lipschitz continuous gradients.
If the optional operators :math:`\\nabla l_i^*` are omitted, the simpler
problem without :math:`l_i` will be considered. Mathematically, this is
done by taking :math:`l_i` to be the functionals that are zero only in the
zero element and :math:`\\infty` otherwise. This gives that :math:`l_i^*`
are the zero functionals, and hence the corresponding gradients are the
zero operators.
To guarantee convergence, the parameters :math:`\\tau`, :math:`\\sigma` and
:math:`L_i` need to satisfy
.. math::
2 \min \{ \\frac{1}{\\tau}, \\frac{1}{\sigma_1}, \\ldots,
\\frac{1}{\sigma_m} \} \cdot \min\{ \\eta, \\nu_1, \\ldots, \\nu_m \}
\cdot \\sqrt{1 - \\tau \\sum_{i=1}^n \\sigma_i ||L_i||^2} > 1,
where, if the simpler problem is considered, all :math:`\\nu_i` can be
considered to be :math:`\\infty`.
For reference on the forward-backward primal-dual algorithm, see [BC2015].
For more on proximal operators and algorithms see [PB2014].
See Also
--------
odl.solvers.nonsmooth.primal_dual_hybrid_gradient.pdhg :
Solver for similar problems without differentiability in any
of the terms.
odl.solvers.nonsmooth.douglas_rachford.douglas_rachford_pd :
Solver for similar problems without differentiability in any
of the terms.
References
----------
[BC2015] Bot, R I, and Csetnek, E R. *On the convergence rate of
a forward-backward type primal-dual splitting algorithm for convex
optimization problems*. Optimization, 64.1 (2015), pp 5--23.
[PB2014] Parikh, N, and Boyd, S. *Proximal Algorithms*.
Foundations and Trends in Optimization, 1 (2014), pp 127-239.
"""
# Problem size
m = len(L)
# Validate input
if not all(isinstance(op, Operator) for op in L):
raise ValueError('`L` not a sequence of operators')
if not all(op.is_linear for op in L):
raise ValueError('not all operators in `L` are linear')
if not all(x in op.domain for op in L):
raise ValueError('`x` not in the domain of all operators in `L`')
if len(sigma) != m:
raise ValueError('len(sigma) != len(L)')
if len(g) != m:
raise ValueError('len(prox_cc_g) != len(L)')
# Extract operators
prox_cc_g = [gi.convex_conj.proximal for gi in g]
grad_h = h.gradient
prox_f = f.proximal
l = kwargs.pop('l', None)
if l is not None:
if len(l) != m:
raise ValueError('`grad_cc_l` not same length as `L`')
grad_cc_l = [li.convex_conj.gradient for li in l]
if kwargs:
raise TypeError('unexpected keyword argument: {}'.format(kwargs))
# Pre-allocate values
v = [Li.range.zero() for Li in L]
y = x.space.zero()
for k in range(niter):
x_old = x
tmp_1 = grad_h(x) + sum(Li.adjoint(vi) for Li, vi in zip(L, v))
prox_f(tau)(x - tau * tmp_1, out=x)
y.lincomb(2.0, x, -1, x_old)
for i in range(m):
if l is not None:
# In this case gradients were given.
tmp_2 = sigma[i] * (L[i](y) - grad_cc_l[i](v[i]))
else:
# In this case gradients were not given. Therefore the gradient
# step is omitted. For more details, see the documentation.
tmp_2 = sigma[i] * L[i](y)
prox_cc_g[i](sigma[i])(v[i] + tmp_2, out=v[i])
if callback is not None:
callback(x)
| mpl-2.0 | -3,281,394,788,236,555,000 | 35.242105 | 79 | 0.609352 | false | 3.403856 | false | false | false |
happy5214/pywikibot-core | pywikibot/comms/threadedhttp.py | 1 | 5773 | # -*- coding: utf-8 -*-
"""Http backend layer, formerly providing a httplib2 wrapper."""
from __future__ import absolute_import, unicode_literals
# (C) Pywikibot team, 2007-2015
__version__ = '$Id$'
__docformat__ = 'epytext'
# standard python libraries
import codecs
import sys
if sys.version_info[0] > 2:
from urllib.parse import urlparse
else:
from urlparse import urlparse
import pywikibot
from pywikibot.tools import UnicodeMixin
_logger = "comm.threadedhttp"
class HttpRequest(UnicodeMixin):
"""Object wrapper for HTTP requests that need to block origin thread.
self.data will be either:
* a tuple of (dict, unicode) if the request was successful
* an exception
"""
def __init__(self, uri, method="GET", params=None, body=None, headers=None,
callbacks=None, charset=None, **kwargs):
"""
Constructor.
See C{Http.request} for parameters.
"""
self.uri = uri
self.method = method
self.params = params
self.body = body
self.headers = headers
if isinstance(charset, codecs.CodecInfo):
self.charset = charset.name
elif charset:
self.charset = charset
elif headers and 'accept-charset' in headers:
self.charset = headers['accept-charset']
else:
self.charset = None
self.callbacks = callbacks
self.args = [uri, method, body, headers]
self.kwargs = kwargs
self._parsed_uri = None
self._data = None
@property
def data(self):
"""Return the requests response tuple."""
assert(self._data is not None)
return self._data
@data.setter
def data(self, value):
"""Set the requests response and invoke each callback."""
self._data = value
if self.callbacks:
for callback in self.callbacks:
callback(self)
@property
def exception(self):
"""Get the exception, if any."""
if isinstance(self.data, Exception):
return self.data
@property
def response_headers(self):
"""Return the response headers."""
if not self.exception:
return self.data.headers
@property
def raw(self):
"""Return the raw response body."""
if not self.exception:
return self.data.content
@property
def parsed_uri(self):
"""Return the parsed requested uri."""
if not self._parsed_uri:
self._parsed_uri = urlparse(self.uri)
return self._parsed_uri
@property
def hostname(self):
"""Return the host of the request."""
return self.parsed_uri.netloc
@property
def status(self):
"""Return the HTTP response status.
@rtype: int
"""
if not self.exception:
return self.data.status_code
@property
def header_encoding(self):
"""Return charset given by the response header."""
if not hasattr(self, '_header_encoding'):
content_type = self.response_headers.get('content-type', '')
pos = content_type.find('charset=')
if pos >= 0:
pos += len('charset=')
encoding = self.response_headers['content-type'][pos:]
self._header_encoding = encoding
elif 'json' in content_type:
# application/json | application/sparql-results+json
self._header_encoding = 'utf-8'
else:
self._header_encoding = None
return self._header_encoding
@property
def encoding(self):
"""Detect the response encoding."""
if not hasattr(self, '_encoding'):
if not self.charset and not self.header_encoding:
pywikibot.log(u"Http response doesn't contain a charset.")
charset = 'latin1'
else:
charset = self.charset
if (self.header_encoding and codecs.lookup(self.header_encoding) !=
(codecs.lookup(charset) if charset else None)):
if charset:
pywikibot.warning(
'Encoding "{0}" requested but "{1}" '
'received in the header.'.format(
charset, self.header_encoding))
try:
# TODO: Buffer decoded content, weakref does remove it too
# early (directly after this method)
self.raw.decode(self.header_encoding)
except UnicodeError as e:
self._encoding = e
else:
self._encoding = self.header_encoding
else:
self._encoding = None
if charset and (isinstance(self._encoding, Exception) or
not self._encoding):
try:
self.raw.decode(charset)
except UnicodeError as e:
self._encoding = e
else:
self._encoding = charset
if isinstance(self._encoding, Exception):
raise self._encoding
return self._encoding
def decode(self, encoding, errors='strict'):
"""Return the decoded response."""
return self.raw.decode(encoding, errors)
@property
def content(self):
"""Return the response decoded by the detected encoding."""
return self.decode(self.encoding)
def __unicode__(self):
"""Return the response decoded by the detected encoding."""
return self.content
def __bytes__(self):
"""Return the undecoded response."""
return self.raw
| mit | -4,610,961,845,625,407,500 | 29.544974 | 79 | 0.556556 | false | 4.786899 | false | false | false |
shaunokeefe/hoponit | hoponit/harvest/management/commands/api.py | 1 | 1352 | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from harvest import untappd
class Command(BaseCommand):
args = '<poll_id poll_id ...>'
help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
cmd = args[0]
u = untappd.UntappdApi(settings.HARVEST_UNTAPPD_CLIENT_ENDPOINT)
if cmd == "venue":
venue_id = args[1]
venue = u.foursquare_id_to_untappd(venue_id)
if cmd == "fs":
venue_id = args[1]
venue = u.foursquare_id_to_untappd(venue_id)
print venue
if cmd == "checkins":
venue_id = args[1]
checkins = u.get_venue_checkins(venue_id=venue_id)
for checkin in checkins:
print "%s" % (checkin)
if cmd == "fscheckins":
venue_id = args[1]
checkins = u.get_venue_checkins(foursquare_venue_id=venue_id)
for checkin in checkins:
print "%s" % (checkin)
if cmd == "limit":
print check_limit()
if cmd == "suburb":
suburb = args[1]
venues = u.get_venues_for_suburb(suburb)
for venue in venues:
print "%s: %s" % (venue['id'], venue['name'])
self.stdout.write('Succcess')
| mit | -7,560,068,598,439,669,000 | 29.044444 | 73 | 0.542899 | false | 3.346535 | false | false | false |
andela-ooshodi/codango-debug | codango/resources/tests/test_routes.py | 1 | 3625 | from django.test import Client, TestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from resources.models import Resource
from django.test.utils import setup_test_environment
setup_test_environment()
class CommunityViewTest(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='Abiodun', password='shuaib')
self.user.set_password('shuaib')
self.user.save()
self.login = self.client.login(username='Abiodun', password='shuaib')
def create_resources(
self,
text='some more words',
resource_file='resource_file'):
return Resource.objects.create(
id=100,
text=text,
author=self.user,
resource_file=resource_file
)
def test_can_reach_ajax_community_page(self):
self.assertTrue(self.login)
response = self.client.get(
reverse('community', args=('all',)),
content_type='application/json',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTrue(response.status_code == 200)
self.assertContains(response, "There are currently no posts")
def test_can_post_new_ajax_content(self):
self.assertTrue(self.login)
response = self.client.post(
'/resource/create',
{'text': '1', },
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "success")
def test_add_an_empty_resource(self):
self.assertTrue(self.login)
response = self.client.post(
'/resource/newresource',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 404)
def test_user_can_upvote(self):
self.assertTrue(self.login)
resource = self.create_resources()
response = self.client.post(
'/resource/100/like', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(resource.upvotes()), 1)
def test_user_can_downvote(self):
self.assertTrue(self.login)
resource = self.create_resources()
response = self.client.post(
'/resource/100/unlike', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(resource.downvotes()), 1)
def test_user_can_get_persisten_vote(self):
self.assertTrue(self.login)
resource = self.create_resources()
response = self.client.post(
'/resource/100/unlike', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.client.post(
'/resource/100/like', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(resource.upvotes()), 1)
def test_user_cannot_vote_more_than_once(self):
self.assertTrue(self.login)
resource = self.create_resources()
response = self.client.post(
'/resource/100/unlike', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.client.post(
'/resource/100/unlike', {'resource_id': 100},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(resource.upvotes()), 0)
| mit | 8,126,015,275,894,616,000 | 37.157895 | 78 | 0.622897 | false | 3.961749 | true | false | false |
MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/coordinates/test_xdr.py | 1 | 29773 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
from unittest.mock import patch
import errno
import numpy as np
import os
import shutil
import subprocess
from numpy.testing import (assert_equal, assert_almost_equal)
from MDAnalysisTests import make_Universe
from MDAnalysisTests.datafiles import (
PDB_sub_dry, PDB_sub_sol, TRR_sub_sol, TRR, XTC, GRO, PDB, CRD, PRMncdf,
NCDF, XTC_sub_sol, COORDINATES_XTC, COORDINATES_TOPOLOGY, COORDINATES_TRR)
from MDAnalysisTests.coordinates.base import (MultiframeReaderTest,
BaseReference, BaseWriterTest,
assert_timestep_almost_equal)
import MDAnalysis as mda
from MDAnalysis.coordinates.base import Timestep
from MDAnalysis.coordinates import XDR
class _XDRReader_Sub(object):
@pytest.fixture()
def atoms(self):
usol = mda.Universe(PDB_sub_sol, self.XDR_SUB_SOL)
return usol.select_atoms("not resname SOL")
def test_load_new_raises_ValueError(self):
# should fail if we load universe with a trajectory with different
# number of atoms when NOT using sub, same as before.
udry = mda.Universe(PDB_sub_dry)
with pytest.raises(ValueError):
udry.load_new(self.XDR_SUB_SOL)
def test_sub_coordinates(self, atoms):
"""
load solvated trajectory into universe with unsolvated protein.
"""
udry = mda.Universe(PDB_sub_dry)
udry.load_new(self.XDR_SUB_SOL, sub=atoms.indices)
ts = udry.atoms.ts
assert_timestep_almost_equal(ts, atoms.ts)
class TestTRRReader_Sub(_XDRReader_Sub):
XDR_SUB_SOL = TRR_sub_sol
class TestXTCReader_Sub(_XDRReader_Sub):
XDR_SUB_SOL = XTC_sub_sol
class _GromacsReader(object):
# This base class assumes same lengths and dt for XTC and TRR test cases!
filename = None
ref_unitcell = np.array(
[80.017, 80.017, 80.017, 60., 60., 90.], dtype=np.float32)
# computed with Gromacs: 362.26999999999998 nm**3 * 1000 A**3/nm**3
ref_volume = 362270.0
prec = 3
@pytest.fixture(scope='class')
def universe(self):
return mda.Universe(GRO, self.filename, convert_units=True)
def test_rewind_xdrtrj(self, universe):
universe.trajectory.rewind()
assert_equal(universe.coord.frame, 0, "rewinding to frame 1")
assert universe.trajectory._xdr._has_offsets == 1
def test_next_xdrtrj(self, universe):
universe.trajectory.rewind()
universe.trajectory.next()
assert_equal(universe.coord.frame, 1, "loading frame 1")
def test_jump_xdrtrj(self, universe):
universe.trajectory[4] # index is 0-based and frames are 0-based
assert_equal(universe.coord.frame, 4, "jumping to frame 4")
def test_jump_lastframe_xdrtrj(self, universe):
universe.trajectory[-1]
assert_equal(universe.coord.frame, 9,
"indexing last frame with trajectory[-1]")
def test_slice_xdrtrj(self, universe):
frames = [ts.frame for ts in universe.trajectory[2:9:3]]
assert_equal(frames, [2, 5, 8], "slicing xdrtrj [2:9:3]")
def test_reverse_xdrtrj(self, universe):
frames = [ts.frame for ts in universe.trajectory[::-1]]
assert_equal(frames, list(range(9, -1, -1)), "slicing xdrtrj [::-1]")
def test_coordinates(self, universe):
ca_nm = np.array(
[[6.043369675, 7.385184479, 1.381425762]], dtype=np.float32)
# coordinates in the base unit (needed for True)
ca_Angstrom = ca_nm * 10.0
universe.trajectory.rewind()
universe.trajectory.next()
universe.trajectory.next()
assert_equal(universe.coord.frame, 2, "failed to step to frame 3")
ca = universe.select_atoms('name CA and resid 122')
# low precision match (2 decimals in A, 3 in nm) because the above are
# the trr coords
assert_almost_equal(
ca.positions,
ca_Angstrom,
2,
err_msg="coords of Ca of resid 122 do not "
"match for frame 3")
def test_unitcell(self, universe):
"""Test that xtc/trr unitcell is read correctly (Issue 34)"""
universe.trajectory.rewind()
uc = universe.coord.dimensions
assert_almost_equal(
uc,
self.ref_unitcell,
self.prec,
err_msg="unit cell dimensions (rhombic dodecahedron)")
def test_volume(self, universe):
# need to reduce precision for test (nm**3 <--> A**3)
universe.trajectory.rewind()
vol = universe.coord.volume
assert_almost_equal(
vol,
self.ref_volume,
0,
err_msg="unit cell volume (rhombic dodecahedron)")
def test_dt(self, universe):
assert_almost_equal(
universe.trajectory.dt, 100.0, 4, err_msg="wrong timestep dt")
def test_totaltime(self, universe):
# test_totaltime(): need to reduce precision because dt is only precise
# to ~4 decimals and accumulating the inaccuracy leads to even lower
# precision in the totaltime (consequence of fixing Issue 64)
assert_almost_equal(
universe.trajectory.totaltime,
900.0,
3,
err_msg="wrong total length of trajectory")
def test_frame(self, universe):
universe.trajectory[4] # index is 0-based and frames are 0-based
assert_equal(universe.trajectory.frame, 4, "wrong frame number")
def test_time(self, universe):
universe.trajectory[4]
assert_almost_equal(
universe.trajectory.time, 400.0, 3, err_msg="wrong time of frame")
def test_get_Writer(self, universe, tmpdir):
ext = os.path.splitext(self.filename)[1]
outfile = str(tmpdir.join('xdr-reader-test' + ext))
with universe.trajectory.Writer(outfile) as W:
assert_equal(universe.trajectory.format, W.format)
assert_equal(universe.atoms.n_atoms, W.n_atoms)
def test_Writer(self, tmpdir):
universe = mda.Universe(GRO, self.filename, convert_units=True)
ext = os.path.splitext(self.filename)[1]
outfile = str(tmpdir.join('/xdr-reader-test' + ext))
with universe.trajectory.Writer(outfile) as W:
W.write(universe.atoms)
universe.trajectory.next()
W.write(universe.atoms)
universe.trajectory.rewind()
u = mda.Universe(GRO, outfile)
assert_equal(u.trajectory.n_frames, 2)
# prec = 6: TRR test fails; here I am generous and take self.prec =
# 3...
assert_almost_equal(u.atoms.positions, universe.atoms.positions,
self.prec)
def test_EOFraisesStopIteration(self, universe):
def go_beyond_EOF():
universe.trajectory[-1]
universe.trajectory.next()
with pytest.raises(StopIteration):
go_beyond_EOF()
class TestXTCReader(_GromacsReader):
filename = XTC
class TestXTCReaderClass(object):
def test_with_statement(self):
from MDAnalysis.coordinates.XTC import XTCReader
try:
with XTCReader(XTC) as trj:
N = trj.n_frames
frames = [ts.frame for ts in trj]
except:
raise AssertionError("with_statement not working for XTCReader")
assert_equal(
N,
10,
err_msg="with_statement: XTCReader reads wrong number of frames")
assert_equal(
frames,
np.arange(0, N),
err_msg="with_statement: XTCReader does not read all frames")
class TestTRRReader(_GromacsReader):
filename = TRR
def test_velocities(self, universe):
# frame 0, v in nm/ps
# from gmxdump -f MDAnalysisTests/data/adk_oplsaa.trr
# v[47675]={-7.86469e-01, 1.57479e+00, 2.79722e-01}
# v[47676]={ 2.70593e-08, 1.08052e-06, 6.97028e-07}
v_native = np.array(
[[-7.86469e-01, 1.57479e+00, 2.79722e-01],
[2.70593e-08, 1.08052e-06, 6.97028e-07]],
dtype=np.float32)
# velocities in the MDA base unit A/ps (needed for True)
v_base = v_native * 10.0
universe.trajectory.rewind()
assert_equal(universe.coord.frame, 0, "failed to read frame 1")
assert_almost_equal(
universe.trajectory.ts._velocities[[47675, 47676]],
v_base,
self.prec,
err_msg="ts._velocities for indices 47675,47676 do not "
"match known values")
assert_almost_equal(
universe.atoms.velocities[[47675, 47676]],
v_base,
self.prec,
err_msg="velocities for indices 47675,47676 do not "
"match known values")
for index, v_known in zip([47675, 47676], v_base):
assert_almost_equal(
universe.atoms[index].velocity,
v_known,
self.prec,
err_msg="atom[{0:d}].velocity does not match known values".
format(index))
class _XDRNoConversion(object):
filename = None
@pytest.fixture()
def universe(self):
return mda.Universe(PDB, self.filename, convert_units=False)
def test_coordinates(self, universe):
# note: these are the native coordinates in nm
ca_nm = np.array(
[[6.043369675, 7.385184479, 1.381425762]], dtype=np.float32)
universe.trajectory.rewind()
universe.trajectory.next()
universe.trajectory.next()
assert_equal(universe.trajectory.ts.frame, 2,
"failed to step to frame 3")
ca = universe.select_atoms('name CA and resid 122')
# low precision match because we also look at the trr: only 3 decimals
# in nm in xtc!
assert_almost_equal(
ca.positions,
ca_nm,
3,
err_msg="native coords of Ca of resid 122 "
"do not match for frame 3 with "
"convert_units=False")
class TestXTCNoConversion(_XDRNoConversion):
filename = XTC
class TestTRRNoConversion(_XDRNoConversion):
filename = TRR
class _GromacsWriter(object):
infilename = None # XTC or TRR
Writers = {
'.trr': mda.coordinates.TRR.TRRWriter,
'.xtc': mda.coordinates.XTC.XTCWriter,
}
@pytest.fixture(scope='class')
def universe(self):
return mda.Universe(GRO, self.infilename)
@pytest.fixture()
def Writer(self):
ext = os.path.splitext(self.infilename)[1]
return self.Writers[ext]
@pytest.fixture()
def outfile(self, tmpdir):
ext = os.path.splitext(self.infilename)[1]
return str(tmpdir.join('xdr-writer-test' + ext))
def test_write_trajectory(self, universe, Writer, outfile):
"""Test writing Gromacs trajectories (Issue 38)"""
with Writer(outfile, universe.atoms.n_atoms, dt=universe.trajectory.dt) as W:
for ts in universe.trajectory:
W.write(universe)
uw = mda.Universe(GRO, outfile)
# check that the coordinates are identical for each time step
for orig_ts, written_ts in zip(universe.trajectory, uw.trajectory):
assert_almost_equal(
written_ts._pos,
orig_ts._pos,
3,
err_msg="coordinate mismatch between "
"original and written trajectory at "
"frame %d (orig) vs %d (written)" % (orig_ts.frame,
written_ts.frame))
def test_timestep_not_modified_by_writer(self, universe, Writer, outfile):
trj = universe.trajectory
ts = trj.ts
trj[-1] # last timestep (so that time != 0)
x = ts._pos.copy()
time = ts.time
with Writer(outfile, trj.n_atoms, dt=trj.dt) as W:
# last timestep (so that time != 0) (say it again, just in case...)
trj[-1]
W.write(universe)
assert_equal(
ts._pos,
x,
err_msg="Positions in Timestep were modified by writer.")
assert_equal(
ts.time, time, err_msg="Time in Timestep was modified by writer.")
class TestXTCWriter(_GromacsWriter):
__test__ = True
infilename = XTC
class TestTRRWriter(_GromacsWriter):
__test__ = True
infilename = TRR
def test_velocities(self, universe, Writer, outfile):
with Writer(outfile, universe.atoms.n_atoms, dt=universe.trajectory.dt) as W:
for ts in universe.trajectory:
W.write(universe)
uw = mda.Universe(GRO, outfile)
# check that the velocities are identical for each time step
for orig_ts, written_ts in zip(universe.trajectory, uw.trajectory):
assert_almost_equal(
written_ts._velocities,
orig_ts._velocities,
3,
err_msg="velocities mismatch between "
"original and written trajectory at "
"frame %d (orig) vs %d (written)" % (orig_ts.frame,
written_ts.frame))
def test_gaps(self, universe, Writer, outfile):
"""Tests the writing and reading back of TRRs with gaps in any of
the coordinates/velocities properties."""
with Writer(outfile, universe.atoms.n_atoms, dt=universe.trajectory.dt) as W:
for ts in universe.trajectory:
# Inset some gaps in the properties: coords every 4 steps, vels
# every 2.
if ts.frame % 4 == 0:
ts.has_positions = False
if ts.frame % 2 == 0:
ts.has_velocities = False
W.write(universe)
uw = mda.Universe(GRO, outfile)
# check that the velocities are identical for each time step, except
# for the gaps (that we must make sure to raise exceptions on).
for orig_ts, written_ts in zip(universe.trajectory, uw.trajectory):
if ts.frame % 4 != 0:
assert_almost_equal(
written_ts.positions,
orig_ts.positions,
3,
err_msg="coordinates mismatch "
"between original and written "
"trajectory at frame {} (orig) "
"vs {} (written)".format(orig_ts.frame, written_ts.frame))
else:
with pytest.raises(mda.NoDataError):
getattr(written_ts, 'positions')
if ts.frame % 2 != 0:
assert_almost_equal(
written_ts.velocities,
orig_ts.velocities,
3,
err_msg="velocities mismatch "
"between original and written "
"trajectory at frame {} (orig) "
"vs {} (written)".format(orig_ts.frame, written_ts.frame))
else:
with pytest.raises(mda.NoDataError):
getattr(written_ts, 'velocities')
class _GromacsWriterIssue101(object):
Writers = {
'.trr': mda.coordinates.TRR.TRRWriter,
'.xtc': mda.coordinates.XTC.XTCWriter,
}
ext = None # set to '.xtc' or '.trr'
prec = 3
@pytest.fixture()
def Writer(self):
return self.Writers[self.ext]
@pytest.fixture()
def outfile(self, tmpdir):
return str(tmpdir.join('/xdr-writer-issue101' + self.ext))
def test_single_frame_GRO(self, Writer, outfile):
self._single_frame(GRO, Writer, outfile)
def test_single_frame_PDB(self, Writer, outfile):
self._single_frame(PDB, Writer, outfile)
def test_single_frame_CRD(self, Writer, outfile):
self._single_frame(CRD, Writer, outfile)
def _single_frame(self, filename, Writer, outfile):
u = mda.Universe(filename)
with Writer(outfile, u.atoms.n_atoms) as W:
W.write(u.atoms)
w = mda.Universe(filename, outfile)
assert_equal(w.trajectory.n_frames, 1,
"single frame trajectory has wrong number of frames")
assert_almost_equal(
w.atoms.positions,
u.atoms.positions,
self.prec,
err_msg="coordinates do not match for {0!r}".format(filename))
class TestXTCWriterSingleFrame(_GromacsWriterIssue101):
ext = ".xtc"
prec = 2
class TestTRRWriterSingleFrame(_GromacsWriterIssue101):
ext = ".trr"
class _GromacsWriterIssue117(object):
"""Issue 117: Cannot write XTC or TRR from AMBER NCDF"""
ext = None
prec = 5
@pytest.fixture()
def universe(self):
return mda.Universe(PRMncdf, NCDF)
def test_write_trajectory(self, universe, tmpdir):
"""Test writing Gromacs trajectories from AMBER NCDF (Issue 117)"""
outfile = str(tmpdir.join('xdr-writer-issue117' + self.ext))
with mda.Writer(outfile, n_atoms=universe.atoms.n_atoms) as W:
for ts in universe.trajectory:
W.write(universe)
uw = mda.Universe(PRMncdf, outfile)
# check that the coordinates are identical for each time step
for orig_ts, written_ts in zip(universe.trajectory, uw.trajectory):
assert_almost_equal(
written_ts._pos,
orig_ts._pos,
self.prec,
err_msg="coordinate mismatch "
"between original and written "
"trajectory at frame %d (orig) vs %d "
"(written)" % (orig_ts.frame, written_ts.frame))
class TestXTCWriterIssue117(_GromacsWriterIssue117):
__test__ = True
ext = ".xtc"
prec = 2
class TestTRRWriterIssue117(_GromacsWriterIssue117):
__test__ = True
ext = ".trr"
def test_triclinic_box():
"""Test coordinates.core.triclinic_box() (Issue 61)"""
unitcell = np.array([80.017, 55, 100.11, 60.00, 30.50, 90.00])
box = mda.coordinates.core.triclinic_vectors(unitcell)
new_unitcell = mda.coordinates.core.triclinic_box(box[0], box[1], box[2])
assert_almost_equal(
new_unitcell,
unitcell,
3,
err_msg="unitcell round-trip connversion failed (Issue 61)")
class XTCReference(BaseReference):
def __init__(self):
super(XTCReference, self).__init__()
self.trajectory = COORDINATES_XTC
self.topology = COORDINATES_TOPOLOGY
self.reader = mda.coordinates.XTC.XTCReader
self.writer = mda.coordinates.XTC.XTCWriter
self.ext = 'xtc'
self.prec = 3
self.changing_dimensions = True
class TestXTCReader_2(MultiframeReaderTest):
@staticmethod
@pytest.fixture()
def ref():
return XTCReference()
class TestXTCWriter_2(BaseWriterTest):
@staticmethod
@pytest.fixture()
def ref():
return XTCReference()
def test_different_precision(self, ref, tmpdir):
out = 'precision-test' + ref.ext
# store more then 9 atoms to enable compression
n_atoms = 40
with tmpdir.as_cwd():
with ref.writer(out, n_atoms, precision=5) as w:
u = make_Universe(size=(n_atoms, 1, 1), trajectory=True)
u.trajectory.ts.positions = np.random.random(size=(n_atoms, 3))
w.write(u)
xtc = mda.lib.formats.libmdaxdr.XTCFile(out)
frame = xtc.read()
assert_equal(len(xtc), 1)
assert_equal(xtc.n_atoms, n_atoms)
assert_equal(frame.prec, 10.0**5)
class TRRReference(BaseReference):
def __init__(self):
super(TRRReference, self).__init__()
self.trajectory = COORDINATES_TRR
self.topology = COORDINATES_TOPOLOGY
self.changing_dimensions = True
self.reader = mda.coordinates.TRR.TRRReader
self.writer = mda.coordinates.TRR.TRRWriter
self.ext = 'trr'
self.prec = 3
self.first_frame.velocities = self.first_frame.positions / 10
self.first_frame.forces = self.first_frame.positions / 100
self.second_frame.velocities = self.second_frame.positions / 10
self.second_frame.forces = self.second_frame.positions / 100
self.last_frame.velocities = self.last_frame.positions / 10
self.last_frame.forces = self.last_frame.positions / 100
self.jump_to_frame.velocities = self.jump_to_frame.positions / 10
self.jump_to_frame.forces = self.jump_to_frame.positions / 100
def iter_ts(self, i):
ts = self.first_frame.copy()
ts.positions = 2**i * self.first_frame.positions
ts.velocities = ts.positions / 10
ts.forces = ts.positions / 100
ts.time = i
ts.frame = i
return ts
class TestTRRReader_2(MultiframeReaderTest):
@staticmethod
@pytest.fixture()
def ref():
return TRRReference()
class TestTRRWriter_2(BaseWriterTest):
@staticmethod
@pytest.fixture()
def ref():
return TRRReference()
# tests writing and reading in one!
def test_lambda(self, ref, universe, tmpdir):
outfile = 'write-lambda-test' + ref.ext
with tmpdir.as_cwd():
with ref.writer(outfile, universe.trajectory.n_atoms) as W:
for i, ts in enumerate(universe.trajectory):
ts.data['lambda'] = i / float(universe.trajectory.n_frames)
W.write(universe)
reader = ref.reader(outfile)
for i, ts in enumerate(reader):
assert_almost_equal(ts.data['lambda'], i / float(reader.n_frames))
class _GromacsReader_offsets(object):
# This base class assumes same lengths and dt for XTC and TRR test cases!
filename = None
ref_unitcell = np.array(
[80.017, 80.017, 80.017, 60., 60., 90.], dtype=np.float32)
# computed with Gromacs: 362.26999999999998 nm**3 * 1000 A**3/nm**3
ref_volume = 362270.0
ref_offsets = None
_reader = None
prec = 3
@pytest.fixture(scope='class')
def traj(self, tmpdir_factory):
# copy of original test trajectory in a temporary folder. This is
# needed since offsets are automatically generated in the same
# directory. Here we also clean up nicely all files we generate
tmpdir = tmpdir_factory.mktemp('xtc')
shutil.copy(self.filename, str(tmpdir))
traj = str(tmpdir.join(os.path.basename(self.filename)))
# ensure initialization of offsets
self._reader(traj)
return traj
@pytest.fixture()
def trajectory(self, traj):
return self._reader(traj)
def test_offsets(self, trajectory, traj):
trajectory._read_offsets(store=True)
assert_almost_equal(
trajectory._xdr.offsets,
self.ref_offsets,
err_msg="wrong frame offsets")
outfile_offsets = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(outfile_offsets)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
assert_almost_equal(
trajectory._xdr.offsets,
saved_offsets['offsets'],
err_msg="error saving frame offsets")
assert_almost_equal(
self.ref_offsets,
saved_offsets['offsets'],
err_msg="saved frame offsets don't match "
"the known ones")
trajectory._load_offsets()
assert_almost_equal(
trajectory._xdr.offsets,
self.ref_offsets,
err_msg="error loading frame offsets")
assert_equal(saved_offsets['ctime'], os.path.getctime(traj))
assert_equal(saved_offsets['size'], os.path.getsize(traj))
def test_reload_offsets(self, traj):
self._reader(traj, refresh_offsets=True)
def test_nonexistant_offsets_file(self, traj):
# assert that a nonexistant file returns False during read-in
outfile_offsets = XDR.offsets_filename(traj)
with patch.object(np, "load") as np_load_mock:
np_load_mock.side_effect = IOError
saved_offsets = XDR.read_numpy_offsets(outfile_offsets)
assert_equal(saved_offsets, False)
def test_reload_offsets_if_offsets_readin_fails(self, trajectory):
# force the np.load call that is called in read_numpy_offsets
# during _load_offsets to give an IOError
# ensure that offsets are then read-in from the trajectory
with patch.object(np, "load") as np_load_mock:
np_load_mock.side_effect = IOError
trajectory._load_offsets()
assert_almost_equal(
trajectory._xdr.offsets,
self.ref_offsets,
err_msg="error loading frame offsets")
def test_persistent_offsets_size_mismatch(self, traj):
# check that stored offsets are not loaded when trajectory
# size differs from stored size
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
saved_offsets['size'] += 1
with open(fname, 'wb') as f:
np.savez(f, **saved_offsets)
with pytest.warns(UserWarning, match="Reload offsets"):
self._reader(traj)
def test_persistent_offsets_ctime_mismatch(self, traj):
# check that stored offsets are not loaded when trajectory
# ctime differs from stored ctime
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
saved_offsets['ctime'] += 1
with open(fname, 'wb') as f:
np.savez(f, **saved_offsets)
with pytest.warns(UserWarning, match="Reload offsets"):
self._reader(traj)
def test_persistent_offsets_natoms_mismatch(self, traj):
# check that stored offsets are not loaded when trajectory
# ctime differs from stored ctime
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
saved_offsets['n_atoms'] += 1
np.savez(fname, **saved_offsets)
with pytest.warns(UserWarning, match="Reload offsets"):
self._reader(traj)
def test_persistent_offsets_last_frame_wrong(self, traj):
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
idx_frame = 3
saved_offsets['offsets'][idx_frame] += 42
np.savez(fname, **saved_offsets)
with pytest.warns(UserWarning, match="seek failed"):
reader = self._reader(traj)
reader[idx_frame]
def test_unsupported_format(self, traj):
fname = XDR.offsets_filename(traj)
saved_offsets = XDR.read_numpy_offsets(fname)
assert isinstance(saved_offsets, dict), \
"read_numpy_offsets did not return a dict"
idx_frame = 3
saved_offsets.pop('n_atoms')
np.savez(fname, **saved_offsets)
# ok as long as this doesn't throw
reader = self._reader(traj)
reader[idx_frame]
def test_persistent_offsets_readonly(self, tmpdir):
shutil.copy(self.filename, str(tmpdir))
if os.name == 'nt':
# Windows platform has a unique way to deny write access
subprocess.call("icacls {fname} /deny Users:W".format(fname=tmpdir),
shell=True)
else:
os.chmod(str(tmpdir), 0o555)
filename = str(tmpdir.join(os.path.basename(self.filename)))
# try to write a offsets file
self._reader(filename)
assert_equal(os.path.exists(XDR.offsets_filename(filename)), False)
class TestXTCReader_offsets(_GromacsReader_offsets):
__test__ = True
filename = XTC
ref_offsets = np.array([
0, 165188, 330364, 495520, 660708, 825872, 991044, 1156212, 1321384,
1486544
])
_reader = mda.coordinates.XTC.XTCReader
class TestTRRReader_offsets(_GromacsReader_offsets):
__test__ = True
filename = TRR
ref_offsets = np.array([
0, 1144464, 2288928, 3433392, 4577856, 5722320, 6866784, 8011248,
9155712, 10300176
])
_reader = mda.coordinates.TRR.TRRReader
| gpl-2.0 | 1,926,006,717,862,227,500 | 34.444048 | 85 | 0.604004 | false | 3.663918 | true | false | false |
freedomtan/tensorflow | tensorflow/python/tpu/tpu_outside_compilation_test.py | 2 | 22688 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU outside compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorboard.plugins.histogram import summary_v2 as histogram_summary_v2
from tensorboard.plugins.scalar import summary_v2 as scalar_summary_v2
from tensorflow.core.util import event_pb2
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import summary_ops_v2 as summary
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.tpu.ops import tpu_ops
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
def get_tpu_cluster_resolver():
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
return tpu_lib.TPUStrategyV2(resolver)
def computation_with_string_ops(x):
output = string_ops.string_format("1{}", x)
return string_ops.string_to_number(output)
def _events_from_logdir(test_case, logdir):
"""Reads summary events from log directory."""
test_case.assertTrue(gfile.Exists(logdir))
files = gfile.ListDirectory(logdir)
test_case.assertLen(files, 1)
records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def _rewrite_func_wrapper(tf_func):
def tpu_fn(*args, **kwargs):
# tpu.rewrite only accepts list of tensors as input. We need to flatten
# keyword arguments to meet this requirement.
concrete = tf_func.get_concrete_function(*(list(args) +
list(kwargs.values())))
return tpu.rewrite(concrete.__call__, list(args) + list(kwargs.values()))
return def_function.function(tpu_fn)
def _tpu_partitioned_call_wrapper(tf_func):
"""Wrap a tensorflow Function with TPUPartitionedCall."""
def inner_func(*args, **kwargs):
concrete = tf_func.get_concrete_function(*args, **kwargs)
# TPUPartitionedCall only accepts list of tensors as input args.
# Flatten keyword arguments and do some basic ordering:
# Positional args + Flattened keyword args + Captured args.
op_args = list(args) + list(kwargs.values()) + concrete.captured_inputs
return tpu_functional.TPUPartitionedCall(
args=op_args,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in concrete.function_def.signature.output_arg],
f=concrete)
return def_function.function(inner_func)
class TpuOutsideCompilationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(TpuOutsideCompilationTest, self).setUp()
config.set_soft_device_placement(False)
def testHostNoInput(self):
strategy = get_tpu_strategy()
def outside_fn():
logging_ops.print_v2("Outside compiled")
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOnly(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn, x2)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOutput(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output = tpu.outside_compilation(outside_fn, x2)
return output
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))
def testHostMultipleInputs(self):
strategy = get_tpu_strategy()
val0 = np.arange(6).reshape((2, 3)).astype(np.float32)
val1 = np.arange(6).reshape((3, 2)).astype(np.float32)
def outside_fn(arg0, arg1):
tmp = array_ops.reshape(arg1, array_ops.shape(arg0))
ret0 = arg0 + tmp
ret1 = math_ops.matmul(arg0, arg1)
ret2 = array_ops.concat([arg0, tmp], 0)
return ret0, ret1, ret2
@def_function.function
def train_step():
def tpu_fn(x, y):
a = x + 7.0
b = y * 2.0
c, d, e = tpu.outside_compilation(outside_fn, a, b)
return (math_ops.reduce_max(c) + math_ops.reduce_min(d) +
math_ops.reduce_sum(e))
return strategy.run(tpu_fn, args=(val0, val1))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(213., shape=(strategy.num_replicas_in_sync)))
def testMultipleClusters(self):
strategy = get_tpu_strategy()
def outside_fn1(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
def outside_fn2(x):
logging_ops.print_v2("Outside compiled", x)
return x - 18.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output1 = tpu.outside_compilation(outside_fn1, x2)
x3 = output1 + 3.0
output2 = tpu.outside_compilation(outside_fn2, x3)
return output2
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(21., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testOutsideCompilationControlFlowIf(self, take_true_branch):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
input_value = 51.0 if take_true_branch else 25.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
if x < 50.0:
return tpu.outside_compilation(outside_fn, x2)
else:
return x2
return strategy.run(tpu_fn, args=(input_value,))
output_value = 36.0
if take_true_branch:
output_value = 56.0
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationControlFlowWhile(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
while x2 < 50.0:
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationHostControlFlow(self):
"""Tests that control flow on host for outside_compilation works."""
strategy = get_tpu_strategy()
def outside_fn(x):
n = 0
while n < 4:
x = x + 6.0
n = n + 1
return x
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testSummary(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testSummaryInCond(self, take_true_branch):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step(take_true_branch):
def computation(x):
x = x + 1.0
if x < 5.0:
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
x = y
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step(take_true_branch)),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testSummaryInWhile(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
n = 0
while n < 3:
x = x + 1.0
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
x = y
n = n + 1
return y + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(31., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationAtHeadAndTail(self):
"""Tests that outside_compilation at head/tail of TPU computation works."""
strategy = get_tpu_strategy()
def host_computation(x):
return x * 2.0
@def_function.function
def train_step():
def computation(x):
w = tpu.outside_compilation(host_computation, x)
y = w + 1.0
z = tpu.outside_compilation(host_computation, y)
return z + 5.0
return strategy.run(computation, args=(2.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(15., shape=(strategy.num_replicas_in_sync)))
def testGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
return d
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(8748., shape=(strategy.num_replicas_in_sync)))
def testGradientOfGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients of gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
e = gradients_impl.gradients(
[d], [x], colocate_gradients_with_ops=True)[0]
return e
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(2916., shape=(strategy.num_replicas_in_sync)))
def testColocateGradientWithOutsideCompiledOp(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
@def_function.function
def tpu_fn(x):
x1 = tpu.outside_compilation(math_ops.sqrt, x)
grad = gradients_impl.gradients([x1], [x],
colocate_gradients_with_ops=True)[0]
sqrt = [
op for op in ops.get_default_graph().get_operations()
if op.type == "Sqrt"
][0]
sqrt_grad = [
op for op in ops.get_default_graph().get_operations()
if op.type == "SqrtGrad"
][0]
assert sqrt.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) == b"0"
assert (sqrt_grad.get_attr(
tpu._OUTSIDE_COMPILATION_ATTR) == b"0.gradients/uid")
return grad
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(.1, shape=(strategy.num_replicas_in_sync)))
class OutsideCompilationOnUnsupportedOpTest(test.TestCase,
parameterized.TestCase):
def setUp(self):
super(OutsideCompilationOnUnsupportedOpTest, self).setUp()
config.set_soft_device_placement(True)
def testStringOpWithManualOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return tpu.outside_compilation(computation_with_string_ops, x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testStringOpWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return computation_with_string_ops(x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
def testHistogramSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
histogram_summary_v2.histogram("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
@parameterized.parameters((True), (False))
def testSummaryControlFlowIfWithAutoOutsideCompilation(
self, take_true_branch):
strategy = get_tpu_strategy()
@def_function.function
def step():
def computation(x):
x = x + 1.0
if x < 5:
scalar_summary_v2.scalar("x", x, step=0)
x = x * 2.0
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
if take_true_branch:
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
#
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "cond/x")
@test_util.disable_mlir_bridge(
"TODO(b/168493455): Reenable this test once deadlock resolved."
)
def testAutoOutsideCompilationWithFunctionalNodes(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(a, b):
def fn(a, b):
fn1 = lambda: computation_with_string_ops(a * 100)
fn2 = lambda: computation_with_string_ops(a)
pred = math_ops.greater_equal(a, b)
result = array_ops.identity(
control_flow_ops.cond(pred, fn1, fn2),
name="uncompilable_control_flow")
return result
return strategy.run(fn, args=(a, b))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0.0, -1.0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testRandomOpsWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
def computation():
return random_ops.random_normal(shape=[1, 2, 3])
return strategy.run(computation, args=())
self.assertAllEqual(
strategy.experimental_local_results(train_step())[0].shape, [1, 2, 3])
@test_util.disable_mlir_bridge(
"TODO(b/167235391): Reenable this test once function calls are handled "
"by MLIR bridge."
)
def testOutsideCompilationWithTPUPartitionedCallOp(self):
"""Tests that control flow with TPUPartitionedCall including outside_compilation works."""
get_tpu_strategy()
def host_computation(x):
return x + 1
@def_function.function()
def train_step(x):
x2 = x + 5.0
logging_ops.print_v2(x2)
x2 = tpu.outside_compilation(host_computation, x2)
return x2 + 4.0
tpu_fn = _rewrite_func_wrapper(train_step)
partitioned_tpu_fn = _tpu_partitioned_call_wrapper(tpu_fn)
concrete = partitioned_tpu_fn.get_concrete_function(
x=tensor_spec.TensorSpec(
shape=(1), dtype=dtypes.float32, name="input_tensor"))
self.assertIsInstance(
concrete(array_ops.ones((1), dtype=dtypes.float32))[0], ops.Tensor)
if __name__ == "__main__":
test.main()
| apache-2.0 | -2,471,741,532,479,555,000 | 30.511111 | 94 | 0.650432 | false | 3.608716 | true | false | false |
atomictom/WiseGuy | old_sequential_nn.py | 1 | 3808 | # Neural Network
# -- nn.py
#
# @package NeuralNetwork
import Queue
import random
import math
import time
import threading
NUM_THREADS = 1
NUM_TESTS = 100
NUM_INPUTS = 50
NUM_HIDDEN = 30
NUM_OUTPUTS = 50
USE_BACKPROPOGATE = True
OUTPUTS = []
# test_input = [ .0, .3, .6, .2, .8 ]
class Node:
def __init__(self):
self.connected_edges = []
def sigmoid(self, num):
return math.tanh(num)
class InputNode(Node):
def __init__(self):
self.input = Queue.Queue()
class HiddenNode(Node):
def __init__(self):
self.values = []
self.final = 0
self.last_input = None
def activate(self):
sum = 0
for value in self.values:
sum += value
for value in self.values:
sum += value
return self.sigmoid(sum)
class OutputNode(Node):
def __init__(self):
self.values = []
def checkThreshold(self):
sum = 0
for value in self.values:
sum += value
fin = self.sigmoid(sum)
if fin < 0.5:
return 0
else:
return 1
def initEdgeWeights(nodes, num_edges):
random.seed()
for node in nodes:
node.connected_edges = [ random.uniform(-1.0, 1.0) for x in range(num_edges) ]
def recvInputVector(input, input_nodes):
for i in range(NUM_INPUTS):
input_nodes[i].input.put(input[i])
def derivSig(num):
return 1 - num**2
def run(inputs, hidden, outputs):
for input in inputs:
val = input.input.get()
for i in range(NUM_HIDDEN):
hidden[i].values.append(input.connected_edges[i] * val)
hidden[i].last_input = val
for node in hidden:
node.final = node.activate()
for i in range(NUM_OUTPUTS):
outputs[i].values.append(node.connected_edges[i] * node.final)
for out in outputs:
OUTPUTS.append(out.checkThreshold())
def backPropagate(targets, inputs, hidden):
out_deltas = []
for i in range(NUM_OUTPUTS):
error = targets[i] - OUTPUTS[i]
out_deltas.append(error * derivSig(OUTPUTS[i]))
for i in range(NUM_HIDDEN):
for j in range(NUM_OUTPUTS):
delta = out_deltas[j] * hidden[i].final
hidden[i].connected_edges[j] += .5 * delta
hidden_deltas = []
for i in range(NUM_HIDDEN):
error = 0
for j in range(NUM_OUTPUTS):
error += out_deltas[j] * hidden[i].connected_edges[j]
hidden_deltas.append(error * derivSig(hidden[i].final))
for i in range(NUM_INPUTS):
for j in range(NUM_HIDDEN):
delta = hidden_deltas[j] * hidden[j].last_input
inputs[i].connected_edges[j] += .5 * delta
error = 0
for i in range(len(targets)):
error += .5 * (targets[i] - OUTPUTS[i])**2
return error
def main():
global OUTPUTS
# initialize all node objects
input_nodes = [ InputNode() for x in range(NUM_INPUTS) ]
hidden_nodes = [ HiddenNode() for x in range(NUM_HIDDEN) ]
output_nodes = [ OutputNode() for x in range(NUM_OUTPUTS) ]
# create the weights
initEdgeWeights(input_nodes, len(hidden_nodes))
initEdgeWeights(hidden_nodes, len(output_nodes))
new_inputs = [ random.random() for i in range(NUM_INPUTS) ]
desired_outputs = [ random.choice([0, 1]) for i in range(NUM_OUTPUTS) ]
print new_inputs
print desired_outputs
for i in range(NUM_TESTS):
# initialize input nodes with random data
recvInputVector(new_inputs, input_nodes)
run(input_nodes, hidden_nodes, output_nodes)
if USE_BACKPROPOGATE:
print backPropagate(desired_outputs, input_nodes, hidden_nodes)
print OUTPUTS
OUTPUTS = []
if __name__ == "__main__":
main()
# vim:ts=4:sw=4:sta:et:
| gpl-3.0 | 5,303,224,215,373,582,000 | 23.254777 | 86 | 0.591649 | false | 3.439928 | false | false | false |
openstack/monasca-persister | monasca_persister/repositories/influxdb/abstract_repository.py | 1 | 3517 | # (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import influxdb
from oslo_config import cfg
from monasca_persister.repositories import abstract_repository
from monasca_persister.repositories import data_points
DATABASE_NOT_FOUND_MSG = "database not found"
class AbstractInfluxdbRepository(abstract_repository.AbstractRepository, metaclass=abc.ABCMeta):
def __init__(self):
super(AbstractInfluxdbRepository, self).__init__()
self.conf = cfg.CONF
self._influxdb_client = influxdb.InfluxDBClient(
self.conf.influxdb.ip_address,
self.conf.influxdb.port,
self.conf.influxdb.user,
self.conf.influxdb.password)
if self.conf.influxdb.db_per_tenant:
self.data_points_class = data_points.DataPointsAsDict
else:
self.data_points_class = data_points.DataPointsAsList
def write_batch(self, data_points):
if self.conf.influxdb.db_per_tenant:
for tenant_id, tenant_data_points in data_points.items():
database = '%s_%s' % (self.conf.influxdb.database_name,
tenant_id)
self._write_batch(tenant_data_points, database)
else:
self._write_batch(data_points, self.conf.influxdb.database_name)
def _write_batch(self, data_points, database):
# NOTE (brtknr): Loop twice to ensure database is created if missing.
for retry in range(2):
try:
batch_size = self.conf.influxdb.batch_size
self._influxdb_client.write_points(data_points, 'ms',
protocol='line',
database=database,
batch_size=batch_size)
break
except influxdb.exceptions.InfluxDBClientError as ex:
# When a databse is not found, the returned exception resolves
# to: {"error":"database not found: \"test\""}
if DATABASE_NOT_FOUND_MSG in str(ex):
self._influxdb_client.create_database(database)
# NOTE (brtknr): Only apply default retention policy at
# database creation time so that existing policies are
# not overridden since administrators may want different
# retention policy per tenant.
hours = self.conf.influxdb.default_retention_hours
if hours > 0:
rp = '{}h'.format(hours)
default_rp = dict(database=database, default=True,
name=rp, duration=rp,
replication='1')
self._influxdb_client.create_retention_policy(**default_rp)
else:
raise
| apache-2.0 | 7,030,092,911,305,735,000 | 45.276316 | 96 | 0.589423 | false | 4.423899 | false | false | false |
hsmade/expenses | expenses/migrations/0001_initial.py | 1 | 3009 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Deposit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255)),
('amount', models.DecimalField(max_digits=6, decimal_places=2)),
('date', models.DateField(verbose_name=b'date deposited')),
('account_number', models.CharField(max_length=34, verbose_name=b'opposite account number, if any', blank=True)),
],
),
migrations.CreateModel(
name='Month',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('year', models.IntegerField()),
('month', models.IntegerField()),
('balance', models.DecimalField(max_digits=6, decimal_places=2)),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255)),
('amount', models.DecimalField(max_digits=6, decimal_places=2)),
('due_date', models.DateField(blank=True)),
('automatic_increase', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Withdrawal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255)),
('amount', models.DecimalField(max_digits=6, decimal_places=2)),
('date', models.DateField(verbose_name=b'date deposited')),
('account_number', models.CharField(max_length=34, verbose_name=b'opposite account number, if any', blank=True)),
('month', models.ForeignKey(to='expenses.Month')),
],
),
migrations.CreateModel(
name='WithdrawalType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True)),
],
),
migrations.AddField(
model_name='withdrawal',
name='type',
field=models.ForeignKey(to='expenses.WithdrawalType'),
),
migrations.AddField(
model_name='deposit',
name='month',
field=models.ForeignKey(to='expenses.Month'),
),
]
| gpl-2.0 | 1,071,978,329,737,851,000 | 41.380282 | 129 | 0.549684 | false | 4.451183 | false | false | false |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons/oscurart_tools/oscurart_render.py | 1 | 16832 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import (
Operator,
Panel,
)
import os
# -------------------------------- RENDER ALL SCENES ---------------------
def defRenderAll(frametype, scenes):
activescene = bpy.context.scene
FC = bpy.context.scene.frame_current
FS = bpy.context.scene.frame_start
FE = bpy.context.scene.frame_end
print("---------------------")
types = {'MESH', 'META', 'CURVE'}
for ob in bpy.data.objects:
if ob.type in types:
if not len(ob.material_slots):
ob.data.materials.append(None)
slotlist = {ob: [sl.material for sl in ob.material_slots]
for ob in bpy.data.objects if ob.type in types if len(ob.material_slots)}
for scene in scenes:
proptolist = list(eval(scene.oscurart.overrides))
renpath = scene.render.filepath
if frametype:
scene.frame_start = FC
scene.frame_end = FC
scene.frame_end = FC
scene.frame_start = FC
for group, material in proptolist:
for object in bpy.data.groups[group].objects:
lenslots = len(object.material_slots)
if object.type in types:
if len(object.data.materials):
object.data.materials.clear()
for newslot in range(lenslots):
object.data.materials.append(
bpy.data.materials[material])
filename = os.path.basename(bpy.data.filepath.rpartition(".")[0])
uselayers = {layer: layer.use for layer in scene.render.layers}
for layer, usado in uselayers.items():
if usado:
for i in scene.render.layers:
i.use = False
layer.use = 1
print("SCENE: %s" % scene.name)
print("LAYER: %s" % layer.name)
print("OVERRIDE: %s" % str(proptolist))
scene.render.filepath = os.path.join(
os.path.dirname(renpath), filename, scene.name, layer.name, "%s_%s_%s" %
(filename, scene.name, layer.name))
bpy.context.window.screen.scene = scene
bpy.ops.render.render(
animation=True,
write_still=True,
layer=layer.name,
scene=scene.name)
print("DONE")
print("---------------------")
for layer, usado in uselayers.items():
layer.use = usado
scene.render.filepath = renpath
for ob, slots in slotlist.items():
ob.data.materials.clear()
for slot in slots:
ob.data.materials.append(slot)
if frametype:
scene.frame_start = FS
scene.frame_end = FE
scene.frame_end = FE
scene.frame_start = FS
bpy.context.window.screen.scene = activescene
class renderAll (Operator):
"""Renders all scenes executing the Oscurart overrides if those are set up. Saves the renders in their respective folders using the scenes and render layers names."""
bl_idname = "render.render_all_scenes_osc"
bl_label = "Render All Scenes"
frametype = bpy.props.BoolProperty(default=False)
def execute(self, context):
defRenderAll(self.frametype, [scene for scene in bpy.data.scenes])
return {'FINISHED'}
# --------------------------------RENDER SELECTED SCENES------------------
bpy.types.Scene.use_render_scene = bpy.props.BoolProperty()
class renderSelected (Operator):
"""Renders the seleccted scenes on the checkboxes, executing the Oscurart overrides if it was set up. Saves the renders in their respective folders using the scenes and render layers names."""
bl_idname = "render.render_selected_scenes_osc"
bl_label = "Render Selected Scenes"
frametype = bpy.props.BoolProperty(default=False)
def execute(self, context):
defRenderAll(
self.frametype,
[sc for sc in bpy.data.scenes if sc.use_render_scene])
return {'FINISHED'}
# --------------------------------RENDER CURRENT SCENE--------------------
class renderCurrent (Operator):
"""Renders the active scene executing the Oscurart overrides if it was set up. Saves the renders in their respective folders using the scenes and render layers names."""
bl_idname = "render.render_current_scene_osc"
bl_label = "Render Current Scene"
frametype = bpy.props.BoolProperty(default=False)
def execute(self, context):
defRenderAll(self.frametype, [bpy.context.scene])
return {'FINISHED'}
# --------------------------RENDER CROP----------------------
bpy.types.Scene.rcPARTS = bpy.props.IntProperty(
default=0, min=2, max=50, step=1)
def OscRenderCropFunc():
SCENENAME = os.path.split(bpy.data.filepath)[-1].partition(".")[0]
PARTS = bpy.context.scene.rcPARTS
CHUNKYSIZE = 1 / PARTS
FILEPATH = bpy.context.scene.render.filepath
bpy.context.scene.render.use_border = True
bpy.context.scene.render.use_crop_to_border = True
for PART in range(PARTS):
bpy.context.scene.render.border_min_y = PART * CHUNKYSIZE
bpy.context.scene.render.border_max_y = (
PART * CHUNKYSIZE) + CHUNKYSIZE
bpy.context.scene.render.filepath = "%s_part%s" % (
os.path.join(FILEPATH,
SCENENAME,
bpy.context.scene.name,
SCENENAME),
PART)
bpy.ops.render.render(animation=False, write_still=True)
bpy.context.scene.render.filepath = FILEPATH
class renderCrop (Operator):
"""It renders croping the image in to a X number of pieces. Usefull for rendering really big images."""
bl_idname = "render.render_crop_osc"
bl_label = "Render Crop: Render!"
def execute(self, context):
OscRenderCropFunc()
return {'FINISHED'}
# ---------------------------BATCH MAKER------------------
def defoscBatchMaker(TYPE, BIN):
if os.name == "nt":
print("PLATFORM: WINDOWS")
SYSBAR = os.sep
EXTSYS = ".bat"
QUOTES = '"'
else:
print("PLATFORM:LINUX")
SYSBAR = os.sep
EXTSYS = ".sh"
QUOTES = ''
FILENAME = bpy.data.filepath.rpartition(SYSBAR)[-1].rpartition(".")[0]
BINDIR = bpy.app[4]
SHFILE = os.path.join(
bpy.data.filepath.rpartition(SYSBAR)[0],
FILENAME + EXTSYS)
with open(SHFILE, "w") as FILE:
# assign permission in linux
if EXTSYS == ".sh":
try:
os.chmod(SHFILE, stat.S_IRWXU)
except:
print(
"** Oscurart Batch maker can not modify the permissions.")
if not BIN:
FILE.writelines("%s%s%s -b %s -x 1 -o %s -P %s%s.py -s %s -e %s -a" %
(QUOTES, BINDIR, QUOTES, bpy.data.filepath, bpy.context.scene.render.filepath,
bpy.data.filepath.rpartition(SYSBAR)[0] + SYSBAR, TYPE,
str(bpy.context.scene.frame_start), str(bpy.context.scene.frame_end)))
else:
FILE.writelines("%s -b %s -x 1 -o %s -P %s%s.py -s %s -e %s -a" %
("blender", bpy.data.filepath, bpy.context.scene.render.filepath,
bpy.data.filepath.rpartition(SYSBAR)[0] + SYSBAR, TYPE,
str(bpy.context.scene.frame_start), str(bpy.context.scene.frame_end)))
RLATFILE = "%s%sosRlat.py" % (
bpy.data.filepath.rpartition(SYSBAR)[0],
SYSBAR)
if not os.path.isfile(RLATFILE):
with open(RLATFILE, "w") as file:
if EXTSYS == ".sh":
try:
os.chmod(RLATFILE, stat.S_IRWXU)
except:
print(
"** Oscurart Batch maker can not modify the permissions.")
file.writelines(
"import bpy \nbpy.ops.render.render_all_scenes_osc()\nbpy.ops.wm.quit_blender()")
else:
print("The All Python files Skips: Already exist!")
RSLATFILE = "%s%sosRSlat.py" % (
bpy.data.filepath.rpartition(SYSBAR)[0],
SYSBAR)
if not os.path.isfile(RSLATFILE):
with open(RSLATFILE, "w") as file:
if EXTSYS == ".sh":
try:
os.chmod(RSLATFILE, stat.S_IRWXU)
except:
print(
"** Oscurart Batch maker can not modify the permissions.")
file.writelines(
"import bpy \nbpy.ops.render.render_selected_scenes_osc()\nbpy.ops.wm.quit_blender()")
else:
print("The Selected Python files Skips: Already exist!")
class oscBatchMaker (Operator):
"""It creates .bat(win) or .sh(unix) file, to execute and render from Console/Terminal."""
bl_idname = "file.create_batch_maker_osc"
bl_label = "Make render batch"
bl_options = {'REGISTER', 'UNDO'}
type = bpy.props.EnumProperty(
name="Render Mode",
description="Select Render Mode",
items=(('osRlat', "All Scenes", "Render All Layers At Time"),
('osRSlat', "Selected Scenes", "Render Only The Selected Scenes")),
default='osRlat',
)
bin = bpy.props.BoolProperty(
default=False,
name="Use Environment Variable")
def execute(self, context):
defoscBatchMaker(self.type, self.bin)
return {'FINISHED'}
# --------------------------------------PYTHON BATCH----------------------
def defoscPythonBatchMaker(BATCHTYPE, SIZE):
# REVISO SISTEMA
if os.name == "nt":
print("PLATFORM: WINDOWS")
SYSBAR = "\\"
EXTSYS = ".bat"
QUOTES = '"'
else:
print("PLATFORM:LINUX")
SYSBAR = "/"
EXTSYS = ".sh"
QUOTES = ''
# CREO VARIABLES
FILENAME = bpy.data.filepath.rpartition(SYSBAR)[-1].rpartition(".")[0]
SHFILE = "%s%s%s_PythonSecureBatch.py" % (
bpy.data.filepath.rpartition(SYSBAR)[0],
SYSBAR,
FILENAME)
BATCHLOCATION = "%s%s%s%s" % (
bpy.data.filepath.rpartition(SYSBAR)[0],
SYSBAR,
FILENAME,
EXTSYS)
with open(SHFILE, "w") as FILEBATCH:
if EXTSYS == ".bat":
BATCHLOCATION = BATCHLOCATION.replace("\\", "/")
# SI EL OUTPUT TIENE DOBLE BARRA LA REEMPLAZO
FRO = bpy.context.scene.render.filepath
if bpy.context.scene.render.filepath.count("//"):
FRO = bpy.context.scene.render.filepath.replace(
"//",
bpy.data.filepath.rpartition(SYSBAR)[0] + SYSBAR)
if EXTSYS == ".bat":
FRO = FRO.replace("\\", "/")
# CREO BATCH
bpy.ops.file.create_batch_maker_osc(type=BATCHTYPE)
SCRIPT = ('''
import os
REPITE= True
BAT= '%s'
SCENENAME ='%s'
DIR='%s%s'
def RENDER():
os.system(BAT)
def CLEAN():
global REPITE
FILES = [root+'/'+FILE for root, dirs, files in os.walk(os.getcwd()) if
len(files) > 0 for FILE in files if FILE.count('~') == False]
RESPUESTA=False
for FILE in FILES:
if os.path.getsize(FILE) < %s:
os.remove(FILE)
RESPUESTA= True
if RESPUESTA:
REPITE=True
else:
REPITE=False
REPITE=True
while REPITE:
REPITE=False
RENDER()
os.chdir(DIR)
CLEAN()
''' % (BATCHLOCATION, FILENAME, FRO, FILENAME, SIZE))
# DEFINO ARCHIVO DE BATCH
FILEBATCH.writelines(SCRIPT)
# ARCHIVO CALL
CALLFILENAME = bpy.data.filepath.rpartition(SYSBAR)[-1].rpartition(".")[0]
CALLFILE = "%s%s%s_CallPythonSecureBatch%s" % (
bpy.data.filepath.rpartition(SYSBAR)[0],
SYSBAR,
CALLFILENAME,
EXTSYS)
with open(CALLFILE, "w") as CALLFILEBATCH:
SCRIPT = "python %s" % (SHFILE)
CALLFILEBATCH.writelines(SCRIPT)
if EXTSYS == ".sh":
try:
os.chmod(CALLFILE, stat.S_IRWXU)
os.chmod(SHFILE, stat.S_IRWXU)
except:
print("** Oscurart Batch maker can not modify the permissions.")
class oscPythonBatchMaker (Operator):
"""It creates a file as “Make Render Batch” but it requires Phyton installed and the respective environment variables set up. If the render crahses, the batch automatically erase the broken frame and writes it again. Its not recommended if there is more than one machine rendering."""
bl_idname = "file.create_batch_python"
bl_label = "Make Batch Python"
bl_options = {'REGISTER', 'UNDO'}
size = bpy.props.IntProperty(name="Size in Bytes", default=10, min=0)
type = bpy.props.EnumProperty(
name="Render Mode",
description="Select Render Mode",
items=(('osRlat', "All Scenes", "Render All Layers At Time"),
('osRSlat', "Selected Scenes", "Render Only The Selected Scenes")),
default='osRlat',
)
def execute(self, context):
defoscPythonBatchMaker(self.type, self.size)
return {'FINISHED'}
# ---------------------------------- BROKEN FRAMES ---------------------
class VarColArchivos (bpy.types.PropertyGroup):
filename = bpy.props.StringProperty(name="", default="")
value = bpy.props.IntProperty(name="", default=10)
fullpath = bpy.props.StringProperty(name="", default="")
checkbox = bpy.props.BoolProperty(name="", default=True)
bpy.utils.register_class(VarColArchivos)
class SumaFile(Operator):
"""Look for broken rendered files and shows it."""
bl_idname = "object.add_broken_file"
bl_label = "Add Broken Files"
def execute(self, context):
os.chdir(os.path.dirname(bpy.data.filepath))
absdir = os.path.join(
os.path.dirname(bpy.data.filepath),
bpy.context.scene.render.filepath.replace(r"//",
""))
for root, folder, files in os.walk(absdir):
for f in files:
if os.path.getsize(os.path.join(root, f)) < 10:
print(f)
i = bpy.context.scene.broken_files.add()
i.filename = f
i.fullpath = os.path.join(root, f)
i.value = os.path.getsize(os.path.join(root, f))
i.checkbox = True
return {'FINISHED'}
class ClearFile(Operator):
"""Erase the list of broken frames."""
bl_idname = "object.clear_broken_file"
bl_label = "Clear Broken Files"
def execute(self, context):
bpy.context.scene.broken_files.clear()
return {'FINISHED'}
class DeleteFiles(Operator):
"""Erase the broken frames files from Disk."""
bl_idname = "object.delete_broken_file"
bl_label = "Delete Broken Files"
def execute(self, context):
for file in bpy.context.scene.broken_files:
if file.checkbox:
os.remove(file.fullpath)
bpy.context.scene.broken_files.clear()
return {'FINISHED'}
bpy.types.Scene.broken_files = bpy.props.CollectionProperty(
type=VarColArchivos)
class BrokenFramesPanel (Panel):
bl_label = "Oscurart Broken Render Files"
bl_idname = "OBJECT_PT_osc_broken_files"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
def draw(self, context):
layout = self.layout
col = layout.column(align=1)
for i in bpy.context.scene.broken_files:
colrow = col.row(align=1)
colrow.prop(i, "filename")
colrow.prop(i, "value")
colrow.prop(i, "checkbox")
col = layout.column(align=1)
colrow = col.row(align=1)
colrow.operator("object.add_broken_file")
colrow.operator("object.clear_broken_file")
colrow = col.row(align=1)
colrow.operator("object.delete_broken_file")
| gpl-3.0 | -1,883,779,148,697,409,800 | 33.483607 | 288 | 0.573508 | false | 3.732919 | false | false | false |
fajran/tempel | src/tempel/settings.py | 1 | 3006 | DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'tempel.db'
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
TIME_ZONE = 'Asia/Jakarta'
LANGUAGE_CODE = 'en'
SITE_ID = 1
USE_I18N = False
import os
MEDIA_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'dbb4413767843d993a472619e74bc9e3657815c21694f043f80d9a0857e428b5'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.i18n",
"django.core.context_processors.request",
"django.core.context_processors.media",
)
ROOT_URLCONF = 'tempel.urls'
TEMPLATE_DIRS = (
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'tempel',
)
def t(name, label, ext='txt', mime='text/plain'):
return {'name': name, 'label': label, 'mime': mime, 'ext': ext}
TEMPEL_LANGUAGES = [
t('bash', 'Bash', 'sh', 'text/x-sh'),
t('c', 'C', 'c', 'text/x-csrc'),
t('cpp', 'C++', 'cpp', 'text/x-c++src'),
t('css', 'CSS', 'css', 'text/css'),
t('diff', 'Diff', 'diff', 'text/x-diff'),
t('html', 'HTML', 'html', 'text/html'),
t('html+django', 'HTML+Django', 'html', 'text/html'),
t('ini', 'INI', 'ini', 'text/plain'),
t('java', 'Java', 'java', 'text/x-java'),
t('lua', 'Lua', 'lua', 'text/plain'),
t('make', 'Makefile', 'makefile', 'text/plain'),
t('perl', 'Perl', 'pl', 'text/x-perl'),
t('php', 'PHP', 'php', 'application/x-httpd-php'),
t('python', 'Python', 'py', 'text/x-python'),
t('rst', 'reStructuredText', 'rst', 'text/plain'),
t('ruby', 'Ruby', 'rb', 'application/x-ruby'),
t('sql', 'SQL', 'sql', 'text/plain'),
t('text', 'Plain text', 'txt', 'text/plain'),
t('xml', 'XML', 'xml', 'application/xml'),
t('yaml', 'YAML', 'yaml', 'text/plain'),
]
TEMPEL_EDIT_AGE = 5 # minutes
| agpl-3.0 | 2,420,023,313,409,658,000 | 31.673913 | 80 | 0.531936 | false | 3.204691 | false | false | false |
fabsx00/chucky-old | tools/functionAnomaly/Ranker.py | 1 | 1907 | import pickle
from scipy.spatial.distance import squareform
import os
from mlutils.anomalyDetection.anomalyCalculator import AnomalyCalculator
from RankingEntry import RankingEntry
class Ranker:
def __init__(self, projectRoot):
self.projectRoot = projectRoot
def loadTermDocMatrix(self):
termDocMatrixFilename = self.projectRoot + 'termDocMatrix.pickl'
self.termDocMatrix = pickle.load(file(termDocMatrixFilename))
self.labels = self.termDocMatrix.index2Doc
def loadDistanceMatrix(self):
DFilename = self.projectRoot + 'D_euclidean.pickl'
self.D = pickle.load(file(DFilename))
if self._isCompressedDistanceMatrix(self.D):
self.D = squareform(self.D)
def _isCompressedDistanceMatrix(self, D):
return len(D.shape) == 1
def loadH(self):
HFilename = self.projectRoot + 'H.pickl'
self.H = pickle.load(file(HFilename))
def rank(self):
print 'Implement "rank()"'
pass
def outputRanking(self):
for r in self.ranking: print r
def determineAnomaliesFromDistanceMatrix(self, anomalyScore, k):
anomalyCalculator = AnomalyCalculator()
scores = anomalyCalculator.analyzeDistanceMatrix(self.D, anomalyScore, k)
return scores
def rankingFromScores(self, scores):
self.ranking = []
for i in xrange(len(self.labels)):
score = scores[i]
label = self.labels[i]
self.ranking.append(RankingEntry(score, label))
self.ranking.sort(reverse=True)
def saveRanking(self, filename):
rankingsDir = self.projectRoot + 'rankings'
if not os.path.exists(rankingsDir):
os.mkdir(rankingsDir)
outFilename = rankingsDir + '/' + filename
pickle.dump(self.ranking, file(outFilename, 'w'))
| gpl-3.0 | -3,632,520,410,175,390,700 | 30.278689 | 81 | 0.643419 | false | 4.014737 | false | false | false |
CzechHackathon2014/juice-my-device | jmd/web/views.py | 1 | 2699 | from django.shortcuts import render
from places.forms import LocationForm
# Create your views here.
def home(request, tmpl='web/home.html'):
data = {}
loc_form = LocationForm(request.POST or None)
data['locaction_form'] = loc_form
return render(request, tmpl, data)
# from django.conf import settings
#
# import foursquare
# from places.models import Place
# def get_venues(lat, lng, rad=500):
# pass
#
# FOOD_CATEGORY = '4d4b7105d754a06374d81259'
# def venues(request, tmpl='web/frsq.html'):
# data = {}
# loc_form = LocationForm(request.GET or None)
# # print loc_form.is_valid()
#
# if loc_form.is_valid():
# frm = loc_form.cleaned_data
#
# lat = frm.get('lat')
# lng = frm.get('lng')
# rad = frm.get('radius')
#
# client = foursquare.Foursquare(client_id=settings.YOUR_CLIENT_ID, client_secret=settings.YOUR_CLIENT_SECRET)
#
# # list_venues = client.venues.search(params={'ll': '%s,%s' % (lat, lng), 'radius':1000})
# list_venues = client.venues.search(params={'ll': '%s,%s' % (lat, lng), 'radius': rad, 'categoryId':FOOD_CATEGORY})
# # list_venues = client.venues.search(params={'ll': '%s,%s' % (lat, lng), 'categoryId':'4d4b7105d754a06374d81259'})
#
# place_list = []
#
# for v in list_venues['venues']:
# lct = v['location']
# # print lct['lng']
# try:
# pl = Place.objects.get(venue_uid=v['id'])
# place_list.append(pl)
# except Place.DoesNotExist:
# pl = Place.objects.create(venue_uid=v['id'], name=v['name'], lat=lct['lat'], lng=lct['lng'])
#
# pl.distance = lct['distance']
# # print (list_venues)
# # for venue in list_venues:
# # print venue.name
# place_list.append(pl)
#
#
# # data['venue'] = list_venues
# data['place_list'] = place_list
# data['places_count'] = len(place_list)
# return render(request, tmpl, data)
#
#
# def frsq(request, tmpl='web/frsq.html'):
# data = {}
#
# # client = foursquare.Foursquare(client_id=settings.YOUR_CLIENT_ID, client_secret=settings.YOUR_CLIENT_SECRET, redirect_uri='http://localhost:8000/frsq/done/')
# # auth_uri = client.oauth.auth_url()
#
# client = foursquare.Foursquare(client_id=settings.YOUR_CLIENT_ID, client_secret=settings.YOUR_CLIENT_SECRET)
#
#
# # venues = client.venues.search(params={'query': 'coffee'})
# venues = client.venues('40a55d80f964a52020f31ee3')
#
# print venues
# # print auth_uri
# data['venue'] = venues
# return render(request, tmpl, data) | mit | 2,596,081,016,116,069,000 | 32.75 | 165 | 0.587996 | false | 2.92416 | false | false | false |
tschaume/pymatgen | pymatgen/core/periodic_table.py | 1 | 49560 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Module contains classes presenting Element and Specie (Element + oxidation state) and PeriodicTable."""
import re
import json
import warnings
from io import open
from pathlib import Path
from enum import Enum
from typing import Optional, Callable
from itertools import product, \
combinations
from collections import Counter
import numpy as np
from pymatgen.core.units import Mass, Length, FloatWithUnit, Unit, \
SUPPORTED_UNIT_NAMES
from pymatgen.util.string import formula_double_format
from monty.json import MSONable
# Loads element data from json file
with open(str(Path(__file__).absolute().parent / "periodic_table.json"), "rt") as f:
_pt_data = json.load(f)
_pt_row_sizes = (2, 8, 8, 18, 18, 32, 32)
class Element(Enum):
"""Enum representing an element in the periodic table."""
# This name = value convention is redundant and dumb, but unfortunately is
# necessary to preserve backwards compatibility with a time when Element is
# a regular object that is constructed with Element(symbol).
H = "H"
He = "He"
Li = "Li"
Be = "Be"
B = "B"
C = "C"
N = "N"
O = "O"
F = "F"
Ne = "Ne"
Na = "Na"
Mg = "Mg"
Al = "Al"
Si = "Si"
P = "P"
S = "S"
Cl = "Cl"
Ar = "Ar"
K = "K"
Ca = "Ca"
Sc = "Sc"
Ti = "Ti"
V = "V"
Cr = "Cr"
Mn = "Mn"
Fe = "Fe"
Co = "Co"
Ni = "Ni"
Cu = "Cu"
Zn = "Zn"
Ga = "Ga"
Ge = "Ge"
As = "As"
Se = "Se"
Br = "Br"
Kr = "Kr"
Rb = "Rb"
Sr = "Sr"
Y = "Y"
Zr = "Zr"
Nb = "Nb"
Mo = "Mo"
Tc = "Tc"
Ru = "Ru"
Rh = "Rh"
Pd = "Pd"
Ag = "Ag"
Cd = "Cd"
In = "In"
Sn = "Sn"
Sb = "Sb"
Te = "Te"
I = "I"
Xe = "Xe"
Cs = "Cs"
Ba = "Ba"
La = "La"
Ce = "Ce"
Pr = "Pr"
Nd = "Nd"
Pm = "Pm"
Sm = "Sm"
Eu = "Eu"
Gd = "Gd"
Tb = "Tb"
Dy = "Dy"
Ho = "Ho"
Er = "Er"
Tm = "Tm"
Yb = "Yb"
Lu = "Lu"
Hf = "Hf"
Ta = "Ta"
W = "W"
Re = "Re"
Os = "Os"
Ir = "Ir"
Pt = "Pt"
Au = "Au"
Hg = "Hg"
Tl = "Tl"
Pb = "Pb"
Bi = "Bi"
Po = "Po"
At = "At"
Rn = "Rn"
Fr = "Fr"
Ra = "Ra"
Ac = "Ac"
Th = "Th"
Pa = "Pa"
U = "U"
Np = "Np"
Pu = "Pu"
Am = "Am"
Cm = "Cm"
Bk = "Bk"
Cf = "Cf"
Es = "Es"
Fm = "Fm"
Md = "Md"
No = "No"
Lr = "Lr"
def __init__(self, symbol: str):
"""
Basic immutable element object with all relevant properties.
Only one instance of Element for each symbol is stored after creation,
ensuring that a particular element behaves like a singleton. For all
attributes, missing data (i.e., data for which is not available) is
represented by a None unless otherwise stated.
Args:
symbol (str): Element symbol, e.g., "H", "Fe"
.. attribute:: Z
Atomic number
.. attribute:: symbol
Element symbol
.. attribute:: X
Pauling electronegativity. Elements without an electronegativity
number are assigned a value of zero by default.
.. attribute:: number
Alternative attribute for atomic number
.. attribute:: max_oxidation_state
Maximum oxidation state for element
.. attribute:: min_oxidation_state
Minimum oxidation state for element
.. attribute:: oxidation_states
Tuple of all known oxidation states
.. attribute:: common_oxidation_states
Tuple of all common oxidation states
.. attribute:: full_electronic_structure
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
.. attribute:: row
Returns the periodic table row of the element.
.. attribute:: group
Returns the periodic table group of the element.
.. attribute:: block
Return the block character "s,p,d,f"
.. attribute:: is_noble_gas
True if element is noble gas.
.. attribute:: is_transition_metal
True if element is a transition metal.
.. attribute:: is_post_transition_metal
True if element is a post transition metal.
.. attribute:: is_rare_earth_metal
True if element is a rare earth metal.
.. attribute:: is_metalloid
True if element is a metalloid.
.. attribute:: is_alkali
True if element is an alkali metal.
.. attribute:: is_alkaline
True if element is an alkaline earth metal (group II).
.. attribute:: is_halogen
True if element is a halogen.
.. attribute:: is_lanthanoid
True if element is a lanthanoid.
.. attribute:: is_actinoid
True if element is a actinoid.
.. attribute:: iupac_ordering
Ordering according to Table VI of "Nomenclature of Inorganic Chemistry
(IUPAC Recommendations 2005)". This ordering effectively follows the
groups and rows of the periodic table, except the Lanthanides, Actanides
and hydrogen.
.. attribute:: long_name
Long name for element. E.g., "Hydrogen".
.. attribute:: atomic_mass
Atomic mass for the element.
.. attribute:: atomic_radius
Atomic radius for the element. This is the empirical value. Data is
obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: atomic_radius_calculated
Calculated atomic radius for the element. This is the empirical value.
Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: van_der_waals_radius
Van der Waals radius for the element. This is the empirical
value. Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: mendeleev_no
Mendeleev number
.. attribute:: electrical_resistivity
Electrical resistivity
.. attribute:: velocity_of_sound
Velocity of sound
.. attribute:: reflectivity
Reflectivity
.. attribute:: refractive_index
Refractice index
.. attribute:: poissons_ratio
Poisson's ratio
.. attribute:: molar_volume
Molar volume
.. attribute:: electronic_structure
Electronic structure. Simplified form with HTML formatting.
E.g., The electronic structure for Fe is represented as
[Ar].3d<sup>6</sup>.4s<sup>2</sup>
.. attribute:: atomic_orbitals
Atomic Orbitals. Energy of the atomic orbitals as a dict.
E.g., The orbitals energies in eV are represented as
{'1s': -1.0, '2s': -0.1}
Data is obtained from
https://www.nist.gov/pml/data/atomic-reference-data-electronic-structure-calculations
The LDA values for neutral atoms are used
.. attribute:: thermal_conductivity
Thermal conductivity
.. attribute:: boiling_point
Boiling point
.. attribute:: melting_point
Melting point
.. attribute:: critical_temperature
Critical temperature
.. attribute:: superconduction_temperature
Superconduction temperature
.. attribute:: liquid_range
Liquid range
.. attribute:: bulk_modulus
Bulk modulus
.. attribute:: youngs_modulus
Young's modulus
.. attribute:: brinell_hardness
Brinell hardness
.. attribute:: rigidity_modulus
Rigidity modulus
.. attribute:: mineral_hardness
Mineral hardness
.. attribute:: vickers_hardness
Vicker's hardness
.. attribute:: density_of_solid
Density of solid phase
.. attribute:: coefficient_of_linear_thermal_expansion
Coefficient of linear thermal expansion
.. attribute:: average_ionic_radius
Average ionic radius for element in ang. The average is taken over all
oxidation states of the element for which data is present.
.. attribute:: average_cationic_radius
Average cationic radius for element in ang. The average is taken over all
positive oxidation states of the element for which data is present.
.. attribute:: average_anionic_radius
Average ionic radius for element in ang. The average is taken over all
negative oxidation states of the element for which data is present.
.. attribute:: ionic_radii
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
self.symbol = "%s" % symbol
d = _pt_data[symbol]
# Store key variables for quick access
self.Z = d["Atomic no"]
at_r = d.get("Atomic radius", "no data")
if str(at_r).startswith("no data"):
self._atomic_radius = None
else:
self._atomic_radius = Length(at_r, "ang")
self._atomic_mass = Mass(d["Atomic mass"], "amu")
self.long_name = d["Name"]
self._data = d
@property
def X(self):
"""
:return: Electronegativity of element. Note that if an element does not
have an electronegativity, a NaN float is returned.
"""
if "X" in self._data:
return self._data["X"]
warnings.warn("No electronegativity for %s. Setting to NaN. "
"This has no physical meaning, and is mainly done to "
"avoid errors caused by the code expecting a float."
% self.symbol)
return float("NaN")
@property
def atomic_radius(self):
"""
Returns: The atomic radius of the element in Ångstroms.
"""
return self._atomic_radius
@property
def atomic_mass(self):
"""
Returns: The atomic mass of the element in amu.
"""
return self._atomic_mass
def __getattr__(self, item):
if item in ["mendeleev_no", "electrical_resistivity",
"velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"electronic_structure", "thermal_conductivity",
"boiling_point", "melting_point",
"critical_temperature", "superconduction_temperature",
"liquid_range", "bulk_modulus", "youngs_modulus",
"brinell_hardness", "rigidity_modulus",
"mineral_hardness", "vickers_hardness",
"density_of_solid", "atomic_radius_calculated",
"van_der_waals_radius", "atomic_orbitals",
"coefficient_of_linear_thermal_expansion",
"ground_state_term_symbol", "valence"]:
kstr = item.capitalize().replace("_", " ")
val = self._data.get(kstr, None)
if str(val).startswith("no data"):
val = None
elif isinstance(val, dict):
pass
else:
try:
val = float(val)
except ValueError:
nobracket = re.sub(r'\(.*\)', "", val)
toks = nobracket.replace("about", "").strip().split(" ", 1)
if len(toks) == 2:
try:
if "10<sup>" in toks[1]:
base_power = re.findall(r'([+-]?\d+)', toks[1])
factor = "e" + base_power[1]
if toks[0] in [">", "high"]:
toks[0] = "1" # return the border value
toks[0] += factor
if item == "electrical_resistivity":
unit = "ohm m"
elif item == "coefficient_of_linear_thermal_expansion":
unit = "K^-1"
else:
unit = toks[1]
val = FloatWithUnit(toks[0], unit)
else:
unit = toks[1].replace("<sup>", "^").replace(
"</sup>", "").replace("Ω",
"ohm")
units = Unit(unit)
if set(units.keys()).issubset(
SUPPORTED_UNIT_NAMES):
val = FloatWithUnit(toks[0], unit)
except ValueError:
# Ignore error. val will just remain a string.
pass
return val
raise AttributeError("Element has no attribute %s!" % item)
@property
def data(self):
"""
Returns dict of data for element.
"""
return self._data.copy()
@property
def average_ionic_radius(self):
"""
Average ionic radius for element (with units). The average is taken
over all oxidation states of the element for which data is present.
"""
if "Ionic radii" in self._data:
radii = self._data["Ionic radii"]
radius = sum(radii.values()) / len(radii)
else:
radius = 0.0
return FloatWithUnit(radius, "ang")
@property
def average_cationic_radius(self):
"""
Average cationic radius for element (with units). The average is
taken over all positive oxidation states of the element for which
data is present.
"""
if "Ionic radii" in self._data:
radii = [v for k, v in self._data["Ionic radii"].items()
if int(k) > 0]
if radii:
return FloatWithUnit(sum(radii) / len(radii), "ang")
return FloatWithUnit(0.0, "ang")
@property
def average_anionic_radius(self):
"""
Average anionic radius for element (with units). The average is
taken over all negative oxidation states of the element for which
data is present.
"""
if "Ionic radii" in self._data:
radii = [v for k, v in self._data["Ionic radii"].items()
if int(k) < 0]
if radii:
return FloatWithUnit(sum(radii) / len(radii), "ang")
return FloatWithUnit(0.0, "ang")
@property
def ionic_radii(self):
"""
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
if "Ionic radii" in self._data:
return {int(k): FloatWithUnit(v, "ang") for k, v in self._data["Ionic radii"].items()}
return {}
@property
def number(self):
"""Alternative attribute for atomic number"""
return self.Z
@property
def max_oxidation_state(self):
"""Maximum oxidation state for element"""
if "Oxidation states" in self._data:
return max(self._data["Oxidation states"])
return 0
@property
def min_oxidation_state(self):
"""Minimum oxidation state for element"""
if "Oxidation states" in self._data:
return min(self._data["Oxidation states"])
return 0
@property
def oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Oxidation states", list()))
@property
def common_oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Common oxidation states", list()))
@property
def icsd_oxidation_states(self):
"""Tuple of all oxidation states with at least 10 instances in
ICSD database AND at least 1% of entries for that element"""
return tuple(self._data.get("ICSD oxidation states", list()))
@property
def metallic_radius(self):
"""
Metallic radius of the element. Radius is given in ang.
"""
return FloatWithUnit(self._data["Metallic radius"], "ang")
@property
def full_electronic_structure(self):
"""
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
"""
estr = self._data["Electronic structure"]
def parse_orbital(orbstr):
m = re.match(r"(\d+)([spdfg]+)<sup>(\d+)</sup>", orbstr)
if m:
return int(m.group(1)), m.group(2), int(m.group(3))
return orbstr
data = [parse_orbital(s) for s in estr.split(".")]
if data[0][0] == "[":
sym = data[0].replace("[", "").replace("]", "")
data = Element(sym).full_electronic_structure + data[1:]
return data
@property
def valence(self):
"""
# From full electron config obtain valence subshell
# angular moment (L) and number of valence e- (v_e)
"""
# the number of valence of noble gas is 0
if self.group == 18:
return (np.nan, 0)
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
valence = []
full_electron_config = self.full_electronic_structure
for _, l_symbol, ne in full_electron_config[::-1]:
l = L_symbols.lower().index(l_symbol)
if ne < (2 * l + 1) * 2:
valence.append((l, ne))
if len(valence) > 1:
raise ValueError("Ambiguous valence")
return valence[0]
@property
def term_symbols(self):
"""
All possible Russell-Saunders term symbol of the Element
eg. L = 1, n_e = 2 (s2)
returns
[['1D2'], ['3P0', '3P1', '3P2'], ['1S0']]
"""
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
L, v_e = self.valence
# for one electron in subshell L
ml = list(range(-L, L + 1))
ms = [1 / 2, -1 / 2]
# all possible configurations of ml,ms for one e in subshell L
ml_ms = list(product(ml, ms))
# Number of possible configurations for r electrons in subshell L.
n = (2 * L + 1) * 2
# the combination of n_e electrons configurations
# C^{n}_{n_e}
e_config_combs = list(combinations(range(n), v_e))
# Total ML = sum(ml1, ml2), Total MS = sum(ms1, ms2)
TL = [sum([ml_ms[comb[e]][0] for e in range(v_e)])
for comb in e_config_combs]
TS = [sum([ml_ms[comb[e]][1] for e in range(v_e)])
for comb in e_config_combs]
comb_counter = Counter([r for r in zip(TL, TS)])
term_symbols = []
while sum(comb_counter.values()) > 0:
# Start from the lowest freq combination,
# which corresponds to largest abs(L) and smallest abs(S)
L, S = min(comb_counter)
J = list(np.arange(abs(L - S), abs(L) + abs(S) + 1))
term_symbols.append([str(int(2 * (abs(S)) + 1)) + L_symbols[abs(L)] + str(j) for j in J])
# Without J
# term_symbols.append(str(int(2 * (abs(S)) + 1)) \
# + L_symbols[abs(L)])
# Delete all configurations included in this term
for ML in range(-L, L - 1, -1):
for MS in np.arange(S, -S + 1, 1):
if (ML, MS) in comb_counter:
comb_counter[(ML, MS)] -= 1
if comb_counter[(ML, MS)] == 0:
del comb_counter[(ML, MS)]
return term_symbols
@property
def ground_state_term_symbol(self):
"""
Ground state term symbol
Selected based on Hund's Rule
"""
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
term_symbols = self.term_symbols
term_symbol_flat = {term: {"multiplicity": int(term[0]),
"L": L_symbols.index(term[1]),
"J": float(term[2:])}
for term in sum(term_symbols, [])}
multi = [int(item['multiplicity'])
for terms, item in term_symbol_flat.items()]
max_multi_terms = {symbol: item
for symbol, item in term_symbol_flat.items()
if item['multiplicity'] == max(multi)}
Ls = [item['L'] for terms, item in max_multi_terms.items()]
max_L_terms = {symbol: item
for symbol, item in term_symbol_flat.items()
if item['L'] == max(Ls)}
J_sorted_terms = sorted(max_L_terms.items(),
key=lambda k: k[1]['J'])
L, v_e = self.valence
if v_e <= (2 * L + 1):
return J_sorted_terms[0][0]
return J_sorted_terms[-1][0]
def __eq__(self, other):
return isinstance(other, Element) and self.Z == other.Z
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.Z
def __repr__(self):
return "Element " + self.symbol
def __str__(self):
return self.symbol
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted into LiFePO4.
"""
x1 = float("inf") if self.X != self.X else self.X
x2 = float("inf") if other.X != other.X else other.X
if x1 != x2:
return x1 < x2
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
@staticmethod
def from_Z(z: int):
"""
Get an element from an atomic number.
Args:
z (int): Atomic number
Returns:
Element with atomic number z.
"""
for sym, data in _pt_data.items():
if data["Atomic no"] == z:
return Element(sym)
raise ValueError("No element with this atomic number %s" % z)
@staticmethod
def from_row_and_group(row: int, group: int):
"""
Returns an element from a row and group number.
Args:
row (int): Row number
group (int): Group number
.. note::
The 18 group number system is used, i.e., Noble gases are group 18.
"""
for sym in _pt_data.keys():
el = Element(sym)
if el.row == row and el.group == group:
return el
raise ValueError("No element with this row and group!")
@staticmethod
def is_valid_symbol(symbol: str):
"""
Returns true if symbol is a valid element symbol.
Args:
symbol (str): Element symbol
Returns:
True if symbol is a valid element (e.g., "H"). False otherwise
(e.g., "Zebra").
"""
return symbol in Element.__members__
@property
def row(self):
"""
Returns the periodic table row of the element.
"""
z = self.Z
total = 0
if 57 <= z <= 71:
return 8
if 89 <= z <= 103:
return 9
for i, size in enumerate(_pt_row_sizes):
total += size
if total >= z:
return i + 1
return 8
@property
def group(self):
"""
Returns the periodic table group of the element.
"""
z = self.Z
if z == 1:
return 1
if z == 2:
return 18
if 3 <= z <= 18:
if (z - 2) % 8 == 0:
return 18
if (z - 2) % 8 <= 2:
return (z - 2) % 8
return 10 + (z - 2) % 8
if 19 <= z <= 54:
if (z - 18) % 18 == 0:
return 18
return (z - 18) % 18
if (z - 54) % 32 == 0:
return 18
if (z - 54) % 32 >= 18:
return (z - 54) % 32 - 14
return (z - 54) % 32
@property
def block(self):
"""
Return the block character "s,p,d,f"
"""
if (self.is_actinoid or self.is_lanthanoid) and self.Z not in [71, 103]:
return "f"
if self.is_actinoid or self.is_lanthanoid:
return "d"
if self.group in [1, 2]:
return "s"
if self.group in range(13, 19):
return "p"
if self.group in range(3, 13):
return "d"
raise ValueError("unable to determine block")
@property
def is_noble_gas(self):
"""
True if element is noble gas.
"""
return self.Z in (2, 10, 18, 36, 54, 86, 118)
@property
def is_transition_metal(self):
"""
True if element is a transition metal.
"""
ns = list(range(21, 31))
ns.extend(list(range(39, 49)))
ns.append(57)
ns.extend(list(range(72, 81)))
ns.append(89)
ns.extend(list(range(104, 113)))
return self.Z in ns
@property
def is_post_transition_metal(self):
"""
True if element is a post-transition or poor metal.
"""
return self.symbol in ("Al", "Ga", "In", "Tl", "Sn", "Pb", "Bi")
@property
def is_rare_earth_metal(self) -> bool:
"""
True if element is a rare earth metal.
"""
return self.is_lanthanoid or self.is_actinoid
@property
def is_metal(self) -> bool:
"""
:return: True if is a metal.
"""
return (self.is_alkali or self.is_alkaline or
self.is_post_transition_metal or self.is_transition_metal or
self.is_lanthanoid or self.is_actinoid)
@property
def is_metalloid(self) -> bool:
"""
True if element is a metalloid.
"""
return self.symbol in ("B", "Si", "Ge", "As", "Sb", "Te", "Po")
@property
def is_alkali(self) -> bool:
"""
True if element is an alkali metal.
"""
return self.Z in (3, 11, 19, 37, 55, 87)
@property
def is_alkaline(self) -> bool:
"""
True if element is an alkaline earth metal (group II).
"""
return self.Z in (4, 12, 20, 38, 56, 88)
@property
def is_halogen(self):
"""
True if element is a halogen.
"""
return self.Z in (9, 17, 35, 53, 85)
@property
def is_chalcogen(self):
"""
True if element is a chalcogen.
"""
return self.Z in (8, 16, 34, 52, 84)
@property
def is_lanthanoid(self):
"""
True if element is a lanthanoid.
"""
return 56 < self.Z < 72
@property
def is_actinoid(self):
"""
True if element is a actinoid.
"""
return 88 < self.Z < 104
@property
def is_quadrupolar(self):
"""
Checks if this element can be quadrupolar
"""
return len(self.data.get("NMR Quadrupole Moment", {})) > 0
@property
def nmr_quadrupole_moment(self):
"""
Get a dictionary the nuclear electric quadrupole moment in units of
e*millibarns for various isotopes
"""
return {k: FloatWithUnit(v, "mbarn")
for k, v in self.data.get("NMR Quadrupole Moment", {}).items()}
@property
def iupac_ordering(self):
"""
Ordering according to Table VI of "Nomenclature of Inorganic Chemistry
(IUPAC Recommendations 2005)". This ordering effectively follows the
groups and rows of the periodic table, except the Lanthanides, Actanides
and hydrogen.
"""
return self._data["IUPAC ordering"]
def __deepcopy__(self, memo):
return Element(self.symbol)
@staticmethod
def from_dict(d):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return Element(d["element"])
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol}
@staticmethod
def print_periodic_table(filter_function: Optional[Callable] = None):
"""
A pretty ASCII printer for the periodic table, based on some
filter_function.
Args:
filter_function: A filtering function taking an Element as input
and returning a boolean. For example, setting
filter_function = lambda el: el.X > 2 will print a periodic
table containing only elements with electronegativity > 2.
"""
for row in range(1, 10):
rowstr = []
for group in range(1, 19):
try:
el = Element.from_row_and_group(row, group)
except ValueError:
el = None
if el and ((not filter_function) or filter_function(el)):
rowstr.append("{:3s}".format(el.symbol))
else:
rowstr.append(" ")
print(" ".join(rowstr))
class Specie(MSONable):
"""
An extension of Element with an oxidation state and other optional
properties. Properties associated with Specie should be "idealized"
values, not calculated values. For example, high-spin Fe2+ may be
assigned an idealized spin of +5, but an actual Fe2+ site may be
calculated to have a magmom of +4.5. Calculated properties should be
assigned to Site objects, and not Specie.
"""
supported_properties = ("spin",)
def __init__(self, symbol: str,
oxidation_state: float = 0.0,
properties: dict = None):
"""
Initializes a Specie.
Args:
symbol (str): Element symbol, e.g., Fe
oxidation_state (float): Oxidation state of element, e.g., 2 or -2
properties: Properties associated with the Specie, e.g.,
{"spin": 5}. Defaults to None. Properties must be one of the
Specie supported_properties.
.. attribute:: oxi_state
Oxidation state associated with Specie
.. attribute:: ionic_radius
Ionic radius of Specie (with specific oxidation state).
.. versionchanged:: 2.6.7
Properties are now checked when comparing two Species for equality.
"""
self._el = Element(symbol)
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doesn't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
return getattr(self._el, a)
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
return (isinstance(other, Specie) and self.symbol == other.symbol
and self.oxi_state == other.oxi_state
and self._properties == other._properties)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Equal Specie should have the same str representation, hence
should hash equally. Unequal Specie will have differnt str
representations.
"""
return self.__str__().__hash__()
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state, followed by spin.
"""
x1 = float("inf") if self.X != self.X else self.X
x2 = float("inf") if other.X != other.X else other.X
if x1 != x2:
return x1 < x2
if self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
if self.oxi_state:
other_oxi = 0 if (isinstance(other, Element)
or other.oxi_state is None) else other.oxi_state
return self.oxi_state < other_oxi
if getattr(self, "spin", False):
other_spin = getattr(other, "spin", 0)
return self.spin < other_spin
return False
@property
def element(self):
"""
Underlying element object
"""
return self._el
@property
def ionic_radius(self):
"""
Ionic radius of specie. Returns None if data is not present.
"""
if self._oxi_state in self.ionic_radii:
return self.ionic_radii[self._oxi_state]
d = self._el.data
oxstr = str(int(self._oxi_state))
if oxstr in d.get("Ionic radii hs", {}):
warnings.warn("No default ionic radius for %s. Using hs data." %
self)
return d["Ionic radii hs"][oxstr]
if oxstr in d.get("Ionic radii ls", {}):
warnings.warn("No default ionic radius for %s. Using ls data." %
self)
return d["Ionic radii ls"][oxstr]
warnings.warn("No ionic radius for {}!".format(self))
return None
@property
def oxi_state(self):
"""
Oxidation state of Specie.
"""
return self._oxi_state
@staticmethod
def from_string(species_string: str):
"""
Returns a Specie from a string representation.
Args:
species_string (str): A typical string representation of a
species, e.g., "Mn2+", "Fe3+", "O2-".
Returns:
A Specie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-])(.*)", species_string)
if m:
sym = m.group(1)
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).replace(",", "").split("=")
properties = {toks[0]: float(toks[1])}
return Specie(sym, oxi, properties)
raise ValueError("Invalid Species String")
def __repr__(self):
return "Specie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_nmr_quadrupole_moment(self, isotope=None):
"""
Gets the nuclear electric quadrupole moment in units of
e*millibarns
Args:
isotope (str): the isotope to get the quadrupole moment for
default is None, which gets the lowest mass isotope
"""
quad_mom = self._el.nmr_quadrupole_moment
if not quad_mom:
return 0.0
if isotope is None:
isotopes = list(quad_mom.keys())
isotopes.sort(key=lambda x: int(x.split("-")[1]), reverse=False)
return quad_mom.get(isotopes[0], 0.0)
if isotope not in quad_mom:
raise ValueError("No quadrupole moment for isotope {}".format(
isotope))
return quad_mom.get(isotope, 0.0)
def get_shannon_radius(self, cn: str, spin: str = "",
radius_type: str = "ionic"):
"""
Get the local environment specific ionic radius for species.
Args:
cn (str): Coordination using roman letters. Supported values are
I-IX, as well as IIIPY, IVPY and IVSQ.
spin (str): Some species have different radii for different
spins. You can get specific values using "High Spin" or
"Low Spin". Leave it as "" if not available. If only one spin
data is available, it is returned and this spin parameter is
ignored.
radius_type (str): Either "crystal" or "ionic" (default).
Returns:
Shannon radius for specie in the specified environment.
"""
radii = self._el.data["Shannon radii"]
radii = radii[str(int(self._oxi_state))][cn] # type: ignore
if len(radii) == 1: # type: ignore
k, data = list(radii.items())[0] # type: ignore
if k != spin:
warnings.warn(
"Specified spin state of %s not consistent with database "
"spin of %s. Only one spin data available, and "
"that value is returned." % (spin, k)
)
else:
data = radii[spin]
return data["%s_radius" % radius_type]
def get_crystal_field_spin(self, coordination: str = "oct",
spin_config: str = "high"):
"""
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
"""
if coordination not in ("oct", "tet") or spin_config not in ("high", "low"):
raise ValueError("Invalid coordination or spin config.")
elec = self.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(
"Invalid element {} for crystal field calculation.".format(self.symbol))
nelectrons = elec[-1][2] + elec[-2][2] - self.oxi_state
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(
"Invalid oxidation state {} for element {}".format(self.oxi_state, self.symbol))
if spin_config == "high":
if nelectrons <= 5:
return nelectrons
return 10 - nelectrons
if spin_config == "low":
if coordination == "oct":
if nelectrons <= 3:
return nelectrons
if nelectrons <= 6:
return 6 - nelectrons
if nelectrons <= 8:
return nelectrons - 6
return 10 - nelectrons
if coordination == "tet":
if nelectrons <= 2:
return nelectrons
if nelectrons <= 4:
return 4 - nelectrons
if nelectrons <= 7:
return nelectrons - 4
return 10 - nelectrons
raise RuntimeError()
def __deepcopy__(self, memo):
return Specie(self.symbol, self.oxi_state, self._properties)
def as_dict(self):
"""
:return: Json-able dictionary representation.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: Specie.
"""
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
class DummySpecie(Specie):
"""
A special specie for representing non-traditional elements or species. For
example, representation of vacancies (charged or otherwise), or special
sites, etc.
.. attribute:: oxi_state
Oxidation state associated with Specie.
.. attribute:: Z
DummySpecie is always assigned an atomic number equal to the hash
number of the symbol. Obviously, it makes no sense whatsoever to use
the atomic number of a Dummy specie for anything scientific. The purpose
of this is to ensure that for most use cases, a DummySpecie behaves no
differently from an Element or Specie.
.. attribute:: X
DummySpecie is always assigned an electronegativity of 0.
"""
def __init__(self,
symbol: str = "X",
oxidation_state: float = 0,
properties: dict = None):
"""
Args:
symbol (str): An assigned symbol for the dummy specie. Strict
rules are applied to the choice of the symbol. The dummy
symbol cannot have any part of first two letters that will
constitute an Element symbol. Otherwise, a composition may
be parsed wrongly. E.g., "X" is fine, but "Vac" is not
because Vac contains V, a valid Element.
oxidation_state (float): Oxidation state for dummy specie.
Defaults to zero.
"""
for i in range(1, min(2, len(symbol)) + 1):
if Element.is_valid_symbol(symbol[:i]):
raise ValueError("{} contains {}, which is a valid element "
"symbol.".format(symbol, symbol[:i]))
# Set required attributes for DummySpecie to function like a Specie in
# most instances.
self._symbol = symbol
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
raise AttributeError(a)
def __hash__(self):
return self.symbol.__hash__()
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
if not isinstance(other, DummySpecie):
return False
return (isinstance(other, Specie) and
self.symbol == other.symbol and
self.oxi_state == other.oxi_state and
self._properties == other._properties)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state.
"""
if self.X != other.X:
return self.X < other.X
if self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
other_oxi = 0 if isinstance(other, Element) else other.oxi_state
return self.oxi_state < other_oxi
@property
def Z(self) -> int:
"""
DummySpecie is always assigned an atomic number equal to the hash of
the symbol. The expectation is that someone would be an actual dummy
to use atomic numbers for a Dummy specie.
"""
return self.symbol.__hash__()
@property
def oxi_state(self) -> float:
"""
Oxidation state associated with DummySpecie
"""
return self._oxi_state
@property
def X(self) -> float:
"""
DummySpecie is always assigned an electronegativity of 0. The effect of
this is that DummySpecie are always sorted in front of actual Specie.
"""
return 0.0
@property
def symbol(self) -> str:
"""
:return: Symbol for DummySpecie.
"""
return self._symbol
def __deepcopy__(self, memo):
return DummySpecie(self.symbol, self._oxi_state)
@staticmethod
def from_string(species_string: str):
"""
Returns a Dummy from a string representation.
Args:
species_string (str): A string representation of a dummy
species, e.g., "X2+", "X3+".
Returns:
A DummySpecie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-]*)(.*)", species_string)
if m:
sym = m.group(1)
if m.group(2) == "" and m.group(3) == "":
oxi = 0.0
else:
oxi = 1.0 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).split("=")
properties = {toks[0]: float(toks[1])}
return DummySpecie(sym, oxi, properties)
raise ValueError("Invalid DummySpecies String")
def as_dict(self):
"""
:return: MSONAble dict representation.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: DummySpecie
"""
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
def __repr__(self):
return "DummySpecie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_el_sp(obj):
"""
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
"""
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj
if isinstance(obj, (list, tuple)):
return [get_el_sp(o) for o in obj]
try:
c = float(obj)
i = int(c)
i = i if i == c else None
except (ValueError, TypeError):
i = None
if i is not None:
return Element.from_Z(i)
try:
return Specie.from_string(obj)
except (ValueError, KeyError):
try:
return Element(obj)
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj)
except Exception:
raise ValueError("Can't parse Element or String from type"
" %s: %s." % (type(obj), obj))
| mit | 5,697,098,511,376,204,000 | 30.466032 | 106 | 0.528804 | false | 3.989936 | false | false | false |
NewEvolution/python-webapp | pythonapp/settings.py | 1 | 3002 | """
Django settings for pythonapp project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(#58epjjm5q2%qa$1t)%-m7sse%vzt0f+%gr6i)1ts+9)=b0#$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'webapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'pythonapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
# PyJade part: ##############################
('pyjade.ext.django.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
))
],
},
},
]
WSGI_APPLICATION = 'pythonapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| mit | 2,576,626,741,817,498,600 | 26.290909 | 71 | 0.659227 | false | 3.586619 | false | false | false |
indrajitr/ansible | test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py | 1 | 106596 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <[email protected]>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import argparse
import ast
import datetime
import json
import errno
import os
import re
import subprocess
import sys
import tempfile
import traceback
from collections import OrderedDict
from contextlib import contextmanager
from distutils.version import StrictVersion, LooseVersion
from fnmatch import fnmatch
import yaml
from ansible import __version__ as ansible_version
from ansible.executor.module_common import REPLACER_WINDOWS
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils._text import to_native
from ansible.plugins.loader import fragment_loader
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
from ansible.utils.plugin_docs import BLACKLIST, add_collection_to_versions_and_dates, add_fragments, get_docstring
from ansible.utils.version import SemanticVersion
from .module_args import AnsibleModuleImportError, AnsibleModuleNotInitialized, get_argument_spec
from .schema import ansible_module_kwargs_schema, doc_schema, return_schema
from .utils import CaptureStd, NoArgsAnsibleModule, compare_unordered_lists, is_empty, parse_yaml, parse_isodate
from voluptuous.humanize import humanize_error
from ansible.module_utils.six import PY3, with_metaclass, string_types
if PY3:
# Because there is no ast.TryExcept in Python 3 ast module
TRY_EXCEPT = ast.Try
# REPLACER_WINDOWS from ansible.executor.module_common is byte
# string but we need unicode for Python 3
REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8')
else:
TRY_EXCEPT = ast.TryExcept
BLACKLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea'))
INDENT_REGEX = re.compile(r'([\t]*)')
TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(?<!_)(?<!str\()type\([^)].*')
SYS_EXIT_REGEX = re.compile(r'[^#]*sys.exit\s*\(.*')
BLACKLIST_IMPORTS = {
'requests': {
'new_only': True,
'error': {
'code': 'use-module-utils-urls',
'msg': ('requests import found, should use '
'ansible.module_utils.urls instead')
}
},
r'boto(?:\.|$)': {
'new_only': True,
'error': {
'code': 'use-boto3',
'msg': 'boto import found, new modules should use boto3'
}
},
}
SUBPROCESS_REGEX = re.compile(r'subprocess\.Po.*')
OS_CALL_REGEX = re.compile(r'os\.call.*')
LOOSE_ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version.split('.')[:3]))
def compare_dates(d1, d2):
try:
date1 = parse_isodate(d1, allow_date=True)
date2 = parse_isodate(d2, allow_date=True)
return date1 == date2
except ValueError:
# At least one of d1 and d2 cannot be parsed. Simply compare values.
return d1 == d2
class ReporterEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Exception):
return str(o)
return json.JSONEncoder.default(self, o)
class Reporter:
def __init__(self):
self.files = OrderedDict()
def _ensure_default_entry(self, path):
try:
self.files[path]
except KeyError:
self.files[path] = {
'errors': [],
'warnings': [],
'traces': [],
'warning_traces': []
}
def _log(self, path, code, msg, level='error', line=0, column=0):
self._ensure_default_entry(path)
lvl_dct = self.files[path]['%ss' % level]
lvl_dct.append({
'code': code,
'msg': msg,
'line': line,
'column': column
})
def error(self, *args, **kwargs):
self._log(*args, level='error', **kwargs)
def warning(self, *args, **kwargs):
self._log(*args, level='warning', **kwargs)
def trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['traces'].append(tracebk)
def warning_trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['warning_traces'].append(tracebk)
@staticmethod
@contextmanager
def _output_handle(output):
if output != '-':
handle = open(output, 'w+')
else:
handle = sys.stdout
yield handle
handle.flush()
handle.close()
@staticmethod
def _filter_out_ok(reports):
temp_reports = OrderedDict()
for path, report in reports.items():
if report['errors'] or report['warnings']:
temp_reports[path] = report
return temp_reports
def plain(self, warnings=False, output='-'):
"""Print out the test results in plain format
output is ignored here for now
"""
ret = []
for path, report in Reporter._filter_out_ok(self.files).items():
traces = report['traces'][:]
if warnings and report['warnings']:
traces.extend(report['warning_traces'])
for trace in traces:
print('TRACE:')
print('\n '.join((' %s' % trace).splitlines()))
for error in report['errors']:
error['path'] = path
print('%(path)s:%(line)d:%(column)d: E%(code)s %(msg)s' % error)
ret.append(1)
if warnings:
for warning in report['warnings']:
warning['path'] = path
print('%(path)s:%(line)d:%(column)d: W%(code)s %(msg)s' % warning)
return 3 if ret else 0
def json(self, warnings=False, output='-'):
"""Print out the test results in json format
warnings is not respected in this output
"""
ret = [len(r['errors']) for r in self.files.values()]
with Reporter._output_handle(output) as handle:
print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle)
return 3 if sum(ret) else 0
class Validator(with_metaclass(abc.ABCMeta, object)):
"""Validator instances are intended to be run on a single object. if you
are scanning multiple objects for problems, you'll want to have a separate
Validator for each one."""
def __init__(self, reporter=None):
self.reporter = reporter
@abc.abstractproperty
def object_name(self):
"""Name of the object we validated"""
pass
@abc.abstractproperty
def object_path(self):
"""Path of the object we validated"""
pass
@abc.abstractmethod
def validate(self):
"""Run this method to generate the test results"""
pass
class ModuleValidator(Validator):
BLACKLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt')
BLACKLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml',
'shippable.yml',
'.gitattributes', '.gitmodules', 'COPYING',
'__init__.py', 'VERSION', 'test-docs.sh'))
BLACKLIST = BLACKLIST_FILES.union(BLACKLIST['MODULE'])
PS_DOC_BLACKLIST = frozenset((
'async_status.ps1',
'slurp.ps1',
'setup.ps1'
))
PS_ARG_VALIDATE_BLACKLIST = frozenset((
'win_dsc.ps1', # win_dsc is a dynamic arg spec, the docs won't ever match
))
WHITELIST_FUTURE_IMPORTS = frozenset(('absolute_import', 'division', 'print_function'))
def __init__(self, path, analyze_arg_spec=False, collection=None, collection_version=None,
base_branch=None, git_cache=None, reporter=None, routing=None):
super(ModuleValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(self.path)
self.name = os.path.splitext(self.basename)[0]
self.analyze_arg_spec = analyze_arg_spec
self._Version = LooseVersion
self._StrictVersion = StrictVersion
self.collection = collection
self.collection_name = 'ansible.builtin'
if self.collection:
self._Version = SemanticVersion
self._StrictVersion = SemanticVersion
collection_namespace_path, collection_name = os.path.split(self.collection)
self.collection_name = '%s.%s' % (os.path.basename(collection_namespace_path), collection_name)
self.routing = routing
self.collection_version = None
if collection_version is not None:
self.collection_version_str = collection_version
self.collection_version = SemanticVersion(collection_version)
self.base_branch = base_branch
self.git_cache = git_cache or GitCache()
self._python_module_override = False
with open(path) as f:
self.text = f.read()
self.length = len(self.text.splitlines())
try:
self.ast = ast.parse(self.text)
except Exception:
self.ast = None
if base_branch:
self.base_module = self._get_base_file()
else:
self.base_module = None
def _create_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return LooseVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._Version(v)
def _create_strict_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return StrictVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._StrictVersion(v)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.base_module:
return
try:
os.remove(self.base_module)
except Exception:
pass
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def _get_collection_meta(self):
"""Implement if we need this for version_added comparisons
"""
pass
def _python_module(self):
if self.path.endswith('.py') or self._python_module_override:
return True
return False
def _powershell_module(self):
if self.path.endswith('.ps1'):
return True
return False
def _just_docs(self):
"""Module can contain just docs and from __future__ boilerplate
"""
try:
for child in self.ast.body:
if not isinstance(child, ast.Assign):
# allowed from __future__ imports
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
for future_import in child.names:
if future_import.name not in self.WHITELIST_FUTURE_IMPORTS:
break
else:
continue
return False
return True
except AttributeError:
return False
def _get_base_branch_module_path(self):
"""List all paths within lib/ansible/modules to try and match a moved module"""
return self.git_cache.base_module_paths.get(self.object_name)
def _has_alias(self):
"""Return true if the module has any aliases."""
return self.object_name in self.git_cache.head_aliased_modules
def _get_base_file(self):
# In case of module moves, look for the original location
base_path = self._get_base_branch_module_path()
command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)]
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if int(p.returncode) != 0:
return None
t = tempfile.NamedTemporaryFile(delete=False)
t.write(stdout)
t.close()
return t.name
def _is_new_module(self):
if self._has_alias():
return False
return not self.object_name.startswith('_') and bool(self.base_branch) and not bool(self.base_module)
def _check_interpreter(self, powershell=False):
if powershell:
if not self.text.startswith('#!powershell\n'):
self.reporter.error(
path=self.object_path,
code='missing-powershell-interpreter',
msg='Interpreter line is not "#!powershell"'
)
return
if not self.text.startswith('#!/usr/bin/python'):
self.reporter.error(
path=self.object_path,
code='missing-python-interpreter',
msg='Interpreter line is not "#!/usr/bin/python"',
)
def _check_type_instead_of_isinstance(self, powershell=False):
if powershell:
return
for line_no, line in enumerate(self.text.splitlines()):
typekeyword = TYPE_REGEX.match(line)
if typekeyword:
# TODO: add column
self.reporter.error(
path=self.object_path,
code='unidiomatic-typecheck',
msg=('Type comparison using type() found. '
'Use isinstance() instead'),
line=line_no + 1
)
def _check_for_sys_exit(self):
# Optimize out the happy path
if 'sys.exit' not in self.text:
return
for line_no, line in enumerate(self.text.splitlines()):
sys_exit_usage = SYS_EXIT_REGEX.match(line)
if sys_exit_usage:
# TODO: add column
self.reporter.error(
path=self.object_path,
code='use-fail-json-not-sys-exit',
msg='sys.exit() call found. Should be exit_json/fail_json',
line=line_no + 1
)
def _check_gpl3_header(self):
header = '\n'.join(self.text.split('\n')[:20])
if ('GNU General Public License' not in header or
('version 3' not in header and 'v3.0' not in header)):
self.reporter.error(
path=self.object_path,
code='missing-gplv3-license',
msg='GPLv3 license header not found in the first 20 lines of the module'
)
elif self._is_new_module():
if len([line for line in header
if 'GNU General Public License' in line]) > 1:
self.reporter.error(
path=self.object_path,
code='use-short-gplv3-license',
msg='Found old style GPLv3 license header: '
'https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright'
)
def _check_for_subprocess(self):
for child in self.ast.body:
if isinstance(child, ast.Import):
if child.names[0].name == 'subprocess':
for line_no, line in enumerate(self.text.splitlines()):
sp_match = SUBPROCESS_REGEX.search(line)
if sp_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-popen',
msg=('subprocess.Popen call found. Should be module.run_command'),
line=(line_no + 1),
column=(sp_match.span()[0] + 1)
)
def _check_for_os_call(self):
if 'os.call' in self.text:
for line_no, line in enumerate(self.text.splitlines()):
os_call_match = OS_CALL_REGEX.search(line)
if os_call_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-os-call',
msg=('os.call() call found. Should be module.run_command'),
line=(line_no + 1),
column=(os_call_match.span()[0] + 1)
)
def _find_blacklist_imports(self):
for child in self.ast.body:
names = []
if isinstance(child, ast.Import):
names.extend(child.names)
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
names.extend(grandchild.names)
for name in names:
# TODO: Add line/col
for blacklist_import, options in BLACKLIST_IMPORTS.items():
if re.search(blacklist_import, name.name):
new_only = options['new_only']
if self._is_new_module() and new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
elif not new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
def _find_module_utils(self, main):
linenos = []
found_basic = False
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
names = []
try:
names.append(child.module)
if child.module.endswith('.basic'):
found_basic = True
except AttributeError:
pass
names.extend([n.name for n in child.names])
if [n for n in names if n.startswith('ansible.module_utils')]:
linenos.append(child.lineno)
for name in child.names:
if ('module_utils' in getattr(child, 'module', '') and
isinstance(name, ast.alias) and
name.name == '*'):
msg = (
'module-utils-specific-import',
('module_utils imports should import specific '
'components, not "*"')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
if (isinstance(name, ast.alias) and
name.name == 'basic'):
found_basic = True
if not found_basic:
self.reporter.warning(
path=self.object_path,
code='missing-module-utils-basic-import',
msg='Did not find "ansible.module_utils.basic" import'
)
return linenos
def _get_first_callable(self):
linenos = []
for child in self.ast.body:
if isinstance(child, (ast.FunctionDef, ast.ClassDef)):
linenos.append(child.lineno)
return min(linenos)
def _find_main_call(self, look_for="main"):
""" Ensure that the module ends with:
if __name__ == '__main__':
main()
OR, in the case of modules that are in the docs-only deprecation phase
if __name__ == '__main__':
removed_module()
"""
lineno = False
if_bodies = []
for child in self.ast.body:
if isinstance(child, ast.If):
try:
if child.test.left.id == '__name__':
if_bodies.extend(child.body)
except AttributeError:
pass
bodies = self.ast.body
bodies.extend(if_bodies)
for child in bodies:
# validate that the next to last line is 'if __name__ == "__main__"'
if child.lineno == (self.length - 1):
mainchecked = False
try:
if isinstance(child, ast.If) and \
child.test.left.id == '__name__' and \
len(child.test.ops) == 1 and \
isinstance(child.test.ops[0], ast.Eq) and \
child.test.comparators[0].s == '__main__':
mainchecked = True
except Exception:
pass
if not mainchecked:
self.reporter.error(
path=self.object_path,
code='missing-if-name-main',
msg='Next to last line should be: if __name__ == "__main__":',
line=child.lineno
)
# validate that the final line is a call to main()
if isinstance(child, ast.Expr):
if isinstance(child.value, ast.Call):
if (isinstance(child.value.func, ast.Name) and
child.value.func.id == look_for):
lineno = child.lineno
if lineno < self.length - 1:
self.reporter.error(
path=self.object_path,
code='last-line-main-call',
msg=('Call to %s() not the last line' % look_for),
line=lineno
)
if not lineno:
self.reporter.error(
path=self.object_path,
code='missing-main-call',
msg=('Did not find a call to %s()' % look_for)
)
return lineno or 0
def _find_has_import(self):
for child in self.ast.body:
found_try_except_import = False
found_has = False
if isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
found_try_except_import = True
if isinstance(grandchild, ast.Assign):
for target in grandchild.targets:
if target.id.lower().startswith('has_'):
found_has = True
if found_try_except_import and not found_has:
# TODO: Add line/col
self.reporter.warning(
path=self.object_path,
code='try-except-missing-has',
msg='Found Try/Except block without HAS_ assignment'
)
def _ensure_imports_below_docs(self, doc_info, first_callable):
try:
min_doc_line = min(
[doc_info[key]['lineno'] for key in doc_info if doc_info[key]['lineno']]
)
except ValueError:
# We can't perform this validation, as there are no DOCs provided at all
return
max_doc_line = max(
[doc_info[key]['end_lineno'] for key in doc_info if doc_info[key]['end_lineno']]
)
import_lines = []
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
# allowed from __future__ imports
for future_import in child.names:
if future_import.name not in self.WHITELIST_FUTURE_IMPORTS:
self.reporter.error(
path=self.object_path,
code='illegal-future-imports',
msg=('Only the following from __future__ imports are allowed: %s'
% ', '.join(self.WHITELIST_FUTURE_IMPORTS)),
line=child.lineno
)
break
else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import
continue
import_lines.append(child.lineno)
if child.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation variables. '
'All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, (ast.Import, ast.ImportFrom)):
import_lines.append(grandchild.lineno)
if grandchild.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation '
'variables. All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
for import_line in import_lines:
if not (max_doc_line < import_line < first_callable):
msg = (
'import-placement',
('Imports should be directly below DOCUMENTATION/EXAMPLES/'
'RETURN.')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
def _validate_ps_replacers(self):
# loop all (for/else + error)
# get module list for each
# check "shape" of each module name
module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'
csharp_requires = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*(Ansible\..+)'
found_requires = False
for req_stmt in re.finditer(module_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-utils-per-requires',
msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.psm1'):
self.reporter.error(
path=self.object_path,
code='invalid-requires-extension',
msg='Module #Requires should not end in .psm1: "%s"' % module_name
)
for req_stmt in re.finditer(csharp_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-csharp-utils-per-requires',
msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.cs'):
self.reporter.error(
path=self.object_path,
code='illegal-extension-cs',
msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name
)
# also accept the legacy #POWERSHELL_COMMON replacer signal
if not found_requires and REPLACER_WINDOWS not in self.text:
self.reporter.error(
path=self.object_path,
code='missing-module-utils-import-csharp-requirements',
msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found'
)
def _find_ps_docs_py_file(self):
if self.object_name in self.PS_DOC_BLACKLIST:
return
py_path = self.path.replace('.ps1', '.py')
if not os.path.isfile(py_path):
self.reporter.error(
path=self.object_path,
code='missing-python-doc',
msg='Missing python documentation file'
)
return py_path
def _get_docs(self):
docs = {
'DOCUMENTATION': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'EXAMPLES': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'RETURN': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
}
for child in self.ast.body:
if isinstance(child, ast.Assign):
for grandchild in child.targets:
if not isinstance(grandchild, ast.Name):
continue
if grandchild.id == 'DOCUMENTATION':
docs['DOCUMENTATION']['value'] = child.value.s
docs['DOCUMENTATION']['lineno'] = child.lineno
docs['DOCUMENTATION']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
elif grandchild.id == 'EXAMPLES':
docs['EXAMPLES']['value'] = child.value.s
docs['EXAMPLES']['lineno'] = child.lineno
docs['EXAMPLES']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
elif grandchild.id == 'RETURN':
docs['RETURN']['value'] = child.value.s
docs['RETURN']['lineno'] = child.lineno
docs['RETURN']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
return docs
def _validate_docs_schema(self, doc, schema, name, error_code):
# TODO: Add line/col
errors = []
try:
schema(doc)
except Exception as e:
for error in e.errors:
error.data = doc
errors.extend(e.errors)
for error in errors:
path = [str(p) for p in error.path]
local_error_code = getattr(error, 'ansible_error_code', error_code)
if isinstance(error.data, dict):
error_message = humanize_error(error.data, error)
else:
error_message = error
if path:
combined_path = '%s.%s' % (name, '.'.join(path))
else:
combined_path = name
self.reporter.error(
path=self.object_path,
code=local_error_code,
msg='%s: %s' % (combined_path, error_message)
)
def _validate_docs(self):
doc_info = self._get_docs()
doc = None
documentation_exists = False
examples_exist = False
returns_exist = False
# We have three ways of marking deprecated/removed files. Have to check each one
# individually and then make sure they all agree
filename_deprecated_or_removed = False
deprecated = False
removed = False
doc_deprecated = None # doc legally might not exist
routing_says_deprecated = False
if self.object_name.startswith('_') and not os.path.islink(self.object_path):
filename_deprecated_or_removed = True
# We are testing a collection
if self.routing:
routing_deprecation = self.routing.get('plugin_routing', {}).get('modules', {}).get(self.name, {}).get('deprecation', {})
if routing_deprecation:
# meta/runtime.yml says this is deprecated
routing_says_deprecated = True
deprecated = True
if not removed:
if not bool(doc_info['DOCUMENTATION']['value']):
self.reporter.error(
path=self.object_path,
code='missing-documentation',
msg='No DOCUMENTATION provided'
)
else:
documentation_exists = True
doc, errors, traces = parse_yaml(
doc_info['DOCUMENTATION']['value'],
doc_info['DOCUMENTATION']['lineno'],
self.name, 'DOCUMENTATION'
)
if doc:
add_collection_to_versions_and_dates(doc, self.collection_name, is_module=True)
for error in errors:
self.reporter.error(
path=self.object_path,
code='documentation-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if not errors and not traces:
missing_fragment = False
with CaptureStd():
try:
get_docstring(self.path, fragment_loader, verbose=True,
collection_name=self.collection_name, is_module=True)
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.error(
path=self.object_path,
code='missing-doc-fragment',
msg='DOCUMENTATION fragment missing: %s' % fragment
)
missing_fragment = True
except Exception as e:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
self.reporter.error(
path=self.object_path,
code='documentation-error',
msg='Unknown DOCUMENTATION error, see TRACE: %s' % e
)
if not missing_fragment:
add_fragments(doc, self.object_path, fragment_loader=fragment_loader, is_module=True)
if 'options' in doc and doc['options'] is None:
self.reporter.error(
path=self.object_path,
code='invalid-documentation-options',
msg='DOCUMENTATION.options must be a dictionary/hash when used',
)
if 'deprecated' in doc and doc.get('deprecated'):
doc_deprecated = True
doc_deprecation = doc['deprecated']
documentation_collection = doc_deprecation.get('removed_from_collection')
if documentation_collection != self.collection_name:
self.reporter.error(
path=self.object_path,
code='deprecation-wrong-collection',
msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % (
documentation_collection, self.collection_name)
)
else:
doc_deprecated = False
if os.path.islink(self.object_path):
# This module has an alias, which we can tell as it's a symlink
# Rather than checking for `module: $filename` we need to check against the true filename
self._validate_docs_schema(
doc,
doc_schema(
os.readlink(self.object_path).split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
),
'DOCUMENTATION',
'invalid-documentation',
)
else:
# This is the normal case
self._validate_docs_schema(
doc,
doc_schema(
self.object_name.split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
),
'DOCUMENTATION',
'invalid-documentation',
)
if not self.collection:
existing_doc = self._check_for_new_args(doc)
self._check_version_added(doc, existing_doc)
if not bool(doc_info['EXAMPLES']['value']):
self.reporter.error(
path=self.object_path,
code='missing-examples',
msg='No EXAMPLES provided'
)
else:
_doc, errors, traces = parse_yaml(doc_info['EXAMPLES']['value'],
doc_info['EXAMPLES']['lineno'],
self.name, 'EXAMPLES', load_all=True)
for error in errors:
self.reporter.error(
path=self.object_path,
code='invalid-examples',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if not bool(doc_info['RETURN']['value']):
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code='missing-return',
msg='No RETURN provided'
)
else:
self.reporter.warning(
path=self.object_path,
code='missing-return-legacy',
msg='No RETURN provided'
)
else:
data, errors, traces = parse_yaml(doc_info['RETURN']['value'],
doc_info['RETURN']['lineno'],
self.name, 'RETURN')
if data:
add_collection_to_versions_and_dates(data, self.collection_name, is_module=True, return_docs=True)
self._validate_docs_schema(data, return_schema(for_collection=bool(self.collection)),
'RETURN', 'return-syntax-error')
for error in errors:
self.reporter.error(
path=self.object_path,
code='return-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
# Check for mismatched deprecation
if not self.collection:
mismatched_deprecation = True
if not (filename_deprecated_or_removed or removed or deprecated or doc_deprecated):
mismatched_deprecation = False
else:
if (filename_deprecated_or_removed and deprecated and doc_deprecated):
mismatched_deprecation = False
if (filename_deprecated_or_removed and removed and not (documentation_exists or examples_exist or returns_exist)):
mismatched_deprecation = False
if mismatched_deprecation:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='Module deprecation/removed must agree in documentaiton, by prepending filename with'
' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all'
' documentation for removed'
)
else:
# We are testing a collection
if self.object_name.startswith('_'):
self.reporter.error(
path=self.object_path,
code='collections-no-underscore-on-deprecation',
msg='Deprecated content in collections MUST NOT start with "_", update meta/runtime.yml instead',
)
if not (doc_deprecated == routing_says_deprecated):
# DOCUMENTATION.deprecated and meta/runtime.yml disagree
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.'
)
elif routing_says_deprecated:
# Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated.
# Make sure they give the same version or date.
routing_date = routing_deprecation.get('removal_date')
routing_version = routing_deprecation.get('removal_version')
# The versions and dates in the module documentation are auto-tagged, so remove the tag
# to make comparison possible and to avoid confusing the user.
documentation_date = doc_deprecation.get('removed_at_date')
documentation_version = doc_deprecation.get('removed_in')
if not compare_dates(routing_date, documentation_date):
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % (
routing_date, documentation_date)
)
if routing_version != documentation_version:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % (
routing_version, documentation_version)
)
# In the future we should error if ANSIBLE_METADATA exists in a collection
return doc_info, doc
def _check_version_added(self, doc, existing_doc):
version_added_raw = doc.get('version_added')
try:
collection_name = doc.get('version_added_collection')
version_added = self._create_strict_version(
str(doc.get('version_added', '0.0') or '0.0'),
collection_name=collection_name)
except ValueError as e:
version_added = doc.get('version_added', '0.0')
if version_added != 'historical' or self._is_new_module():
self.reporter.error(
path=self.object_path,
code='module-invalid-version-added',
msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e)
)
return
if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw)
)
if not self._is_new_module():
return
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
if (version_added < strict_ansible_version or
strict_ansible_version < version_added):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (should_be, version_added_raw)
)
def _validate_ansible_module_call(self, docs):
try:
spec, args, kwargs = get_argument_spec(self.path, self.collection)
except AnsibleModuleNotInitialized:
self.reporter.error(
path=self.object_path,
code='ansible-module-not-initialized',
msg="Execution of the module did not result in initialization of AnsibleModule",
)
return
except AnsibleModuleImportError as e:
self.reporter.error(
path=self.object_path,
code='import-error',
msg="Exception attempting to import module for argument_spec introspection, '%s'" % e
)
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
self._validate_docs_schema(kwargs, ansible_module_kwargs_schema(for_collection=bool(self.collection)),
'AnsibleModule', 'invalid-ansiblemodule-schema')
self._validate_argument_spec(docs, spec, kwargs)
def _validate_list_of_module_args(self, name, terms, spec, context):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)):
# This is already reported by schema checking
continue
bad_term = False
for term in check:
if not isinstance(term, string_types):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must contain strings in the lists or tuples; found value %r" % (term, )
self.reporter.error(
path=self.object_path,
code=name + '-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(check)) != len(check):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code=name + '-collision',
msg=msg,
)
if not set(check) <= set(spec):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code=name + '-unknown',
msg=msg,
)
def _validate_required_if(self, terms, spec, context, module):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]:
# This is already reported by schema checking
continue
if len(check) == 4 and not isinstance(check[3], bool):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have forth value omitted or of type bool; got %r" % (check[3], )
self.reporter.error(
path=self.object_path,
code='required_if-is_one_of-type',
msg=msg,
)
requirements = check[2]
if not isinstance(requirements, (list, tuple)):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
continue
bad_term = False
for term in requirements:
if not isinstance(term, string_types):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have only strings in third value (requirements); got %r" % (term, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(requirements)) != len(requirements):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms in requirements"
self.reporter.error(
path=self.object_path,
code='required_if-requirements-collision',
msg=msg,
)
if not set(requirements) <= set(spec):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_if-requirements-unknown',
msg=msg,
)
key = check[0]
if key not in spec:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have its key %s in argument_spec" % key
self.reporter.error(
path=self.object_path,
code='required_if-unknown-key',
msg=msg,
)
continue
if key in requirements:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains its key %s in requirements" % key
self.reporter.error(
path=self.object_path,
code='required_if-key-in-requirements',
msg=msg,
)
value = check[1]
if value is not None:
_type = spec[key].get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type)
try:
with CaptureStd():
dummy = _type_checker(value)
except (Exception, SystemExit):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type)
self.reporter.error(
path=self.object_path,
code='required_if-value-type',
msg=msg,
)
def _validate_required_by(self, terms, spec, context):
if terms is None:
return
if not isinstance(terms, Mapping):
# This is already reported by schema checking
return
for key, value in terms.items():
if isinstance(value, string_types):
value = [value]
if not isinstance(value, (list, tuple)):
# This is already reported by schema checking
continue
for term in value:
if not isinstance(term, string_types):
# This is already reported by schema checking
continue
if len(set(value)) != len(value) or key in value:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code='required_by-collision',
msg=msg,
)
if not set(value) <= set(spec) or key not in spec:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_by-unknown',
msg=msg,
)
def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None):
if not self.analyze_arg_spec:
return
if docs is None:
docs = {}
if context is None:
context = []
if last_context_spec is None:
last_context_spec = kwargs
try:
if not context:
add_fragments(docs, self.object_path, fragment_loader=fragment_loader, is_module=True)
except Exception:
# Cannot merge fragments
return
# Use this to access type checkers later
module = NoArgsAnsibleModule({})
self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context)
self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context)
self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context)
self._validate_required_if(last_context_spec.get('required_if'), spec, context, module)
self._validate_required_by(last_context_spec.get('required_by'), spec, context)
provider_args = set()
args_from_argspec = set()
deprecated_args_from_argspec = set()
doc_options = docs.get('options', {})
if doc_options is None:
doc_options = {}
for arg, data in spec.items():
restricted_argument_names = ('message', 'syslog_facility')
if arg.lower() in restricted_argument_names:
msg = "Argument '%s' in argument_spec " % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
if 'aliases' in data:
for al in data['aliases']:
if al.lower() in restricted_argument_names:
msg = "Argument alias '%s' in argument_spec " % al
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
if not isinstance(data, dict):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must be a dictionary/hash when used"
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec',
msg=msg,
)
continue
removed_at_date = data.get('removed_at_date', None)
if removed_at_date is not None:
try:
if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a removed_at_date '%s' before today" % removed_at_date
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when removed_at_date is not in ISO format. Since schema
# validation already reported this as an error, don't report it a second time.
pass
deprecated_aliases = data.get('deprecated_aliases', None)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'date' in deprecated_alias:
try:
date = deprecated_alias['date']
if parse_isodate(date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal date '%s' before today" % (
deprecated_alias['name'], deprecated_alias['date'])
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when deprecated_alias['date'] is not in ISO format. Since
# schema validation already reported this as an error, don't report it a second
# time.
pass
has_version = False
if self.collection and self.collection_version is not None:
compare_version = self.collection_version
version_of_what = "this collection (%s)" % self.collection_version_str
code_prefix = 'collection'
has_version = True
elif not self.collection:
compare_version = LOOSE_ANSIBLE_VERSION
version_of_what = "Ansible (%s)" % ansible_version
code_prefix = 'ansible'
has_version = True
removed_in_version = data.get('removed_in_version', None)
if removed_in_version is not None:
try:
collection_name = data.get('removed_from_collection')
removed_in = self._create_version(str(removed_in_version), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= removed_in:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a deprecated removed_in_version %r," % removed_in_version
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: " % (removed_in_version, )
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'version' in deprecated_alias:
try:
collection_name = deprecated_alias.get('collection_name')
version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= version:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal in version %r," % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % (
deprecated_alias['name'], deprecated_alias['version'], e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r:" % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
aliases = data.get('aliases', [])
if arg in aliases:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is specified as its own alias"
self.reporter.error(
path=self.object_path,
code='parameter-alias-self',
msg=msg
)
if len(aliases) > len(set(aliases)):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has at least one alias specified multiple times in aliases"
self.reporter.error(
path=self.object_path,
code='parameter-alias-repeated',
msg=msg
)
if not context and arg == 'state':
bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set()))
for bad_state in bad_states:
self.reporter.error(
path=self.object_path,
code='parameter-state-invalid-choice',
msg="Argument 'state' includes the value '%s' as a choice" % bad_state)
if not data.get('removed_in_version', None) and not data.get('removed_at_date', None):
args_from_argspec.add(arg)
args_from_argspec.update(aliases)
else:
deprecated_args_from_argspec.add(arg)
deprecated_args_from_argspec.update(aliases)
if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'):
if data.get('options') is not None and not isinstance(data.get('options'), Mapping):
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec-options',
msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used",
)
elif data.get('options'):
# Record provider options from network modules, for later comparison
for provider_arg, provider_data in data.get('options', {}).items():
provider_args.add(provider_arg)
provider_args.update(provider_data.get('aliases', []))
if data.get('required') and data.get('default', object) != object:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is marked as required but specifies a default. Arguments with a" \
" default should not be marked as required"
self.reporter.error(
path=self.object_path,
code='no-default-for-required-parameter',
msg=msg
)
if arg in provider_args:
# Provider args are being removed from network module top level
# don't validate docs<->arg_spec checks below
continue
_type = data.get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type)
_elements = data.get('elements')
if (_type == 'list') and not _elements:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as list but elements is not defined"
self.reporter.error(
path=self.object_path,
code='parameter-list-no-elements',
msg=msg
)
if _elements:
if not callable(_elements):
module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_elements)
if _type != 'list':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements
self.reporter.error(
path=self.object_path,
code='parameter-invalid-elements',
msg=msg
)
arg_default = None
if 'default' in data and not is_empty(data['default']):
try:
with CaptureStd():
arg_default = _type_checker(data['default'])
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type)
self.reporter.error(
path=self.object_path,
code='incompatible-default-type',
msg=msg
)
continue
elif data.get('default') is None and _type == 'bool' and 'options' not in data:
arg_default = False
doc_options_args = []
for alias in sorted(set([arg] + list(aliases))):
if alias in doc_options:
doc_options_args.append(alias)
if len(doc_options_args) == 0:
# Undocumented arguments will be handled later (search for undocumented-parameter)
doc_options_arg = {}
else:
doc_options_arg = doc_options[doc_options_args[0]]
if len(doc_options_args) > 1:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " with aliases %s is documented multiple times, namely as %s" % (
", ".join([("'%s'" % alias) for alias in aliases]),
", ".join([("'%s'" % alias) for alias in doc_options_args])
)
self.reporter.error(
path=self.object_path,
code='parameter-documented-multiple-times',
msg=msg
)
try:
doc_default = None
if 'default' in doc_options_arg and not is_empty(doc_options_arg['default']):
with CaptureStd():
doc_default = _type_checker(doc_options_arg['default'])
elif doc_options_arg.get('default') is None and _type == 'bool' and 'suboptions' not in doc_options_arg:
doc_default = False
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type)
self.reporter.error(
path=self.object_path,
code='doc-default-incompatible-type',
msg=msg
)
continue
if arg_default != doc_default:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default)
self.reporter.error(
path=self.object_path,
code='doc-default-does-not-match-spec',
msg=msg
)
doc_type = doc_options_arg.get('type')
if 'type' in data and data['type'] is not None:
if doc_type is None:
if not arg.startswith('_'): # hidden parameter, for example _raw_params
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation doesn't define type" % (data['type'])
self.reporter.error(
path=self.object_path,
code='parameter-type-not-in-doc',
msg=msg
)
elif data['type'] != doc_type:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type)
self.reporter.error(
path=self.object_path,
code='doc-type-does-not-match-spec',
msg=msg
)
else:
if doc_type is None:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " uses default type ('str') but documentation doesn't define type"
self.reporter.error(
path=self.object_path,
code='doc-missing-type',
msg=msg
)
elif doc_type != 'str':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " implies type as 'str' but documentation defines as %r" % doc_type
self.reporter.error(
path=self.object_path,
code='implied-parameter-type-mismatch',
msg=msg
)
doc_choices = []
try:
for choice in doc_options_arg.get('choices', []):
try:
with CaptureStd():
doc_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='doc-choices-incompatible-type',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
arg_choices = []
try:
for choice in data.get('choices', []):
try:
with CaptureStd():
arg_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='incompatible-choices',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
if not compare_unordered_lists(arg_choices, doc_choices):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices)
self.reporter.error(
path=self.object_path,
code='doc-choices-do-not-match-spec',
msg=msg
)
doc_required = doc_options_arg.get('required', False)
data_required = data.get('required', False)
if (doc_required or data_required) and not (doc_required and data_required):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if doc_required:
msg += " is not required, but is documented as being required"
else:
msg += " is required, but is not documented as being required"
self.reporter.error(
path=self.object_path,
code='doc-required-mismatch',
msg=msg
)
doc_elements = doc_options_arg.get('elements', None)
doc_type = doc_options_arg.get('type', 'str')
data_elements = data.get('elements', None)
if (doc_elements and not doc_type == 'list'):
msg = "Argument '%s " % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines parameter elements as %s but it is valid only when value of parameter type is list" % doc_elements
self.reporter.error(
path=self.object_path,
code='doc-elements-invalid',
msg=msg
)
if (doc_elements or data_elements) and not (doc_elements == data_elements):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if data_elements:
msg += " specifies elements as %s," % data_elements
else:
msg += " does not specify elements,"
if doc_elements:
msg += "but elements is documented as being %s" % doc_elements
else:
msg += "but elements is not documented"
self.reporter.error(
path=self.object_path,
code='doc-elements-mismatch',
msg=msg
)
spec_suboptions = data.get('options')
doc_suboptions = doc_options_arg.get('suboptions', {})
if spec_suboptions:
if not doc_suboptions:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has sub-options but documentation does not define it"
self.reporter.error(
path=self.object_path,
code='missing-suboption-docs',
msg=msg
)
self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs,
context=context + [arg], last_context_spec=data)
for arg in args_from_argspec:
if not str(arg).isidentifier():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is not a valid python identifier"
self.reporter.error(
path=self.object_path,
code='parameter-invalid',
msg=msg
)
if docs:
args_from_docs = set()
for arg, data in doc_options.items():
args_from_docs.add(arg)
args_from_docs.update(data.get('aliases', []))
args_missing_from_docs = args_from_argspec.difference(args_from_docs)
docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec)
for arg in args_missing_from_docs:
if arg in provider_args:
# Provider args are being removed from network module top level
# So they are likely not documented on purpose
continue
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in the argument_spec, but not documented in the module documentation"
self.reporter.error(
path=self.object_path,
code='undocumented-parameter',
msg=msg
)
for arg in docs_missing_from_args:
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec"
self.reporter.error(
path=self.object_path,
code='nonexistent-parameter-documented',
msg=msg
)
def _check_for_new_args(self, doc):
if not self.base_branch or self._is_new_module():
return
with CaptureStd():
try:
existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(
self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=True)
existing_options = existing_doc.get('options', {}) or {}
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.warning(
path=self.object_path,
code='missing-existing-doc-fragment',
msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment
)
return
except Exception as e:
self.reporter.warning_trace(
path=self.object_path,
tracebk=e
)
self.reporter.warning(
path=self.object_path,
code='unknown-doc-fragment',
msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')
)
return
try:
mod_collection_name = existing_doc.get('version_added_collection')
mod_version_added = self._create_strict_version(
str(existing_doc.get('version_added', '0.0')),
collection_name=mod_collection_name)
except ValueError:
mod_collection_name = self.collection_name
mod_version_added = self._create_strict_version('0.0')
options = doc.get('options', {}) or {}
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
for option, details in options.items():
try:
names = [option] + details.get('aliases', [])
except (TypeError, AttributeError):
# Reporting of this syntax error will be handled by schema validation.
continue
if any(name in existing_options for name in names):
# The option already existed. Make sure version_added didn't change.
for name in names:
existing_collection_name = existing_options.get(name, {}).get('version_added_collection')
existing_version = existing_options.get(name, {}).get('version_added')
if existing_version:
break
current_collection_name = details.get('version_added_collection')
current_version = details.get('version_added')
if current_collection_name != existing_collection_name:
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added-collection',
msg=('version_added for existing option (%s) should '
'belong to collection %r. Currently belongs to %r' %
(option, current_collection_name, existing_collection_name))
)
elif str(current_version) != str(existing_version):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for existing option (%s) should '
'be %r. Currently %r' %
(option, existing_version, current_version))
)
continue
try:
collection_name = details.get('version_added_collection')
version_added = self._create_strict_version(
str(details.get('version_added', '0.0')),
collection_name=collection_name)
except ValueError as e:
self.reporter.error(
path=self.object_path,
code='option-invalid-version-added',
msg=('version_added for option (%s) is not a valid '
'version for %s. Currently %r. Error: %s' %
(option, collection_name, details.get('version_added', '0.0'), e))
)
continue
if collection_name != self.collection_name:
continue
if (strict_ansible_version != mod_version_added and
(version_added < strict_ansible_version or
strict_ansible_version < version_added)):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for new option (%s) should '
'be %r. Currently %r' %
(option, should_be, version_added))
)
return existing_doc
@staticmethod
def is_blacklisted(path):
base_name = os.path.basename(path)
file_name = os.path.splitext(base_name)[0]
if file_name.startswith('_') and os.path.islink(path):
return True
if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.BLACKLIST):
return True
for pat in ModuleValidator.BLACKLIST_PATTERNS:
if fnmatch(base_name, pat):
return True
return False
def validate(self):
super(ModuleValidator, self).validate()
if not self._python_module() and not self._powershell_module():
self.reporter.error(
path=self.object_path,
code='invalid-extension',
msg=('Official Ansible modules must have a .py '
'extension for python modules or a .ps1 '
'for powershell modules')
)
self._python_module_override = True
if self._python_module() and self.ast is None:
self.reporter.error(
path=self.object_path,
code='python-syntax-error',
msg='Python SyntaxError while parsing module'
)
try:
compile(self.text, self.path, 'exec')
except Exception:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
end_of_deprecation_should_be_removed_only = False
if self._python_module():
doc_info, docs = self._validate_docs()
# See if current version => deprecated.removed_in, ie, should be docs only
if docs and docs.get('deprecated', False):
if 'removed_in' in docs['deprecated']:
removed_in = None
collection_name = docs['deprecated'].get('removed_from_collection')
version = docs['deprecated']['removed_in']
if collection_name != self.collection_name:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-source',
msg=('The deprecation version for a module must be added in this collection')
)
else:
try:
removed_in = self._create_strict_version(str(version), collection_name=collection_name)
except ValueError as e:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-version',
msg=('The deprecation version %r cannot be parsed: %s' % (version, e))
)
if removed_in:
if not self.collection:
strict_ansible_version = self._create_strict_version(
'.'.join(ansible_version.split('.')[:2]), self.collection_name)
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
elif self.collection_version:
strict_ansible_version = self.collection_version
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
# handle deprecation by date
if 'removed_at_date' in docs['deprecated']:
try:
removed_at_date = docs['deprecated']['removed_at_date']
if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today():
msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date
self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg)
except ValueError:
# This happens if the date cannot be parsed. This is already checked by the schema.
pass
if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only:
self._validate_ansible_module_call(docs)
self._check_for_sys_exit()
self._find_blacklist_imports()
main = self._find_main_call()
self._find_module_utils(main)
self._find_has_import()
first_callable = self._get_first_callable()
self._ensure_imports_below_docs(doc_info, first_callable)
self._check_for_subprocess()
self._check_for_os_call()
if self._powershell_module():
self._validate_ps_replacers()
docs_path = self._find_ps_docs_py_file()
# We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util
pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic'
if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_BLACKLIST:
with ModuleValidator(docs_path, base_branch=self.base_branch, git_cache=self.git_cache) as docs_mv:
docs = docs_mv._validate_docs()[1]
self._validate_ansible_module_call(docs)
self._check_gpl3_header()
if not self._just_docs() and not end_of_deprecation_should_be_removed_only:
self._check_interpreter(powershell=self._powershell_module())
self._check_type_instead_of_isinstance(
powershell=self._powershell_module()
)
if end_of_deprecation_should_be_removed_only:
# Ensure that `if __name__ == '__main__':` calls `removed_module()` which ensure that the module has no code in
main = self._find_main_call('removed_module')
# FIXME: Ensure that the version in the call to removed_module is less than +2.
# Otherwise it's time to remove the file (This may need to be done in another test to
# avoid breaking whenever the Ansible version bumps)
class PythonPackageValidator(Validator):
BLACKLIST_FILES = frozenset(('__pycache__',))
def __init__(self, path, reporter=None):
super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(path)
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def validate(self):
super(PythonPackageValidator, self).validate()
if self.basename in self.BLACKLIST_FILES:
return
init_file = os.path.join(self.path, '__init__.py')
if not os.path.exists(init_file):
self.reporter.error(
path=self.object_path,
code='subdirectory-missing-init',
msg='Ansible module subdirectories must contain an __init__.py'
)
def setup_collection_loader():
collections_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATH', '').split(os.pathsep)
_AnsibleCollectionFinder(collections_paths)
def re_compile(value):
"""
Argparse expects things to raise TypeError, re.compile raises an re.error
exception
This function is a shorthand to convert the re.error exception to a
TypeError
"""
try:
return re.compile(value)
except re.error as e:
raise TypeError(e)
def run():
parser = argparse.ArgumentParser(prog="validate-modules")
parser.add_argument('modules', nargs='+',
help='Path to module or module directory')
parser.add_argument('-w', '--warnings', help='Show warnings',
action='store_true')
parser.add_argument('--exclude', help='RegEx exclusion pattern',
type=re_compile)
parser.add_argument('--arg-spec', help='Analyze module argument spec',
action='store_true', default=False)
parser.add_argument('--base-branch', default=None,
help='Used in determining if new options were added')
parser.add_argument('--format', choices=['json', 'plain'], default='plain',
help='Output format. Default: "%(default)s"')
parser.add_argument('--output', default='-',
help='Output location, use "-" for stdout. '
'Default "%(default)s"')
parser.add_argument('--collection',
help='Specifies the path to the collection, when '
'validating files within a collection. Ensure '
'that ANSIBLE_COLLECTIONS_PATH is set so the '
'contents of the collection can be located')
parser.add_argument('--collection-version',
help='The collection\'s version number used to check '
'deprecations')
args = parser.parse_args()
args.modules = [m.rstrip('/') for m in args.modules]
reporter = Reporter()
git_cache = GitCache(args.base_branch)
check_dirs = set()
routing = None
if args.collection:
setup_collection_loader()
routing_file = 'meta/runtime.yml'
# Load meta/runtime.yml if it exists, as it may contain deprecation information
if os.path.isfile(routing_file):
try:
with open(routing_file) as f:
routing = yaml.safe_load(f)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (routing_file, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: YAML load failed: %s' % (routing_file, 0, 0, re.sub(r'\s+', ' ', str(ex))))
for module in args.modules:
if os.path.isfile(module):
path = module
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_blacklisted(path):
continue
with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
git_cache=git_cache, reporter=reporter, routing=routing) as mv1:
mv1.validate()
check_dirs.add(os.path.dirname(path))
for root, dirs, files in os.walk(module):
basedir = root[len(module) + 1:].split('/', 1)[0]
if basedir in BLACKLIST_DIRS:
continue
for dirname in dirs:
if root == module and dirname in BLACKLIST_DIRS:
continue
path = os.path.join(root, dirname)
if args.exclude and args.exclude.search(path):
continue
check_dirs.add(path)
for filename in files:
path = os.path.join(root, filename)
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_blacklisted(path):
continue
with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
git_cache=git_cache, reporter=reporter, routing=routing) as mv2:
mv2.validate()
if not args.collection:
for path in sorted(check_dirs):
pv = PythonPackageValidator(path, reporter=reporter)
pv.validate()
if args.format == 'plain':
sys.exit(reporter.plain(warnings=args.warnings, output=args.output))
else:
sys.exit(reporter.json(warnings=args.warnings, output=args.output))
class GitCache:
def __init__(self, base_branch):
self.base_branch = base_branch
if self.base_branch:
self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, 'lib/ansible/modules/'])
else:
self.base_tree = []
try:
self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', 'lib/ansible/modules/'])
except GitError as ex:
if ex.status == 128:
# fallback when there is no .git directory
self.head_tree = self._get_module_files()
else:
raise
except OSError as ex:
if ex.errno == errno.ENOENT:
# fallback when git is not installed
self.head_tree = self._get_module_files()
else:
raise
self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in ('.py', '.ps1'))
self.base_module_paths.pop('__init__.py', None)
self.head_aliased_modules = set()
for path in self.head_tree:
filename = os.path.basename(path)
if filename.startswith('_') and filename != '__init__.py':
if os.path.islink(path):
self.head_aliased_modules.add(os.path.basename(os.path.realpath(path)))
@staticmethod
def _get_module_files():
module_files = []
for (dir_path, dir_names, file_names) in os.walk('lib/ansible/modules/'):
for file_name in file_names:
module_files.append(os.path.join(dir_path, file_name))
return module_files
@staticmethod
def _git(args):
cmd = ['git'] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise GitError(stderr, p.returncode)
return stdout.decode('utf-8').splitlines()
class GitError(Exception):
def __init__(self, message, status):
super(GitError, self).__init__(message)
self.status = status
def main():
try:
run()
except KeyboardInterrupt:
pass
| gpl-3.0 | 4,129,365,761,759,745,000 | 42.561913 | 157 | 0.487542 | false | 4.810723 | false | false | false |
updownlife/multipleK | dependencies/biopython-1.65/Tests/test_GACrossover.py | 1 | 18105 | #!/usr/bin/env python
"""Tests different Genetic Algorithm crossover classes.
"""
# standard library
import unittest
# biopython
from Bio.Seq import MutableSeq
from Bio.Alphabet import SingleLetterAlphabet
# local stuff
from Bio.GA.Organism import Organism
from Bio.GA.Crossover.General import SafeFitnessCrossover
from Bio.GA.Crossover.GeneralPoint import GeneralPointCrossover
from Bio.GA.Crossover.GeneralPoint import InterleaveCrossover
from Bio.GA.Crossover.TwoPoint import TwoPointCrossover
from Bio.GA.Crossover.Point import SinglePointCrossover
from Bio.GA.Crossover.Uniform import UniformCrossover
class TestAlphabet(SingleLetterAlphabet):
"""Simple test alphabet.
"""
letters = ["1", "2", "3"]
def contains(self, oalpha):
return True
def test_fitness(genome):
"""Simple class for calculating fitnesses.
"""
seq_genome = genome.toseq()
return int(str(seq_genome))
class SinglePointTest(unittest.TestCase):
"""Test simple point crossovers.
"""
def setUp(self):
self.alphabet = TestAlphabet()
genome_1 = MutableSeq("11111", self.alphabet)
self.org_1 = Organism(genome_1, test_fitness)
genome_2 = MutableSeq("22222", self.alphabet)
self.org_2 = Organism(genome_2, test_fitness)
self.crossover = SinglePointCrossover(1.0)
def test_basic_crossover(self):
"""Test basic point crossover functionality.
"""
start_genome_1 = self.org_1.genome[:]
start_genome_2 = self.org_2.genome[:]
new_org_1, new_org_2 = self.crossover.do_crossover(self.org_1,
self.org_2)
self.assertNotEqual(str(new_org_1.genome), str(start_genome_1),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_2.genome), str(start_genome_2),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_1), str(self.org_1),
"Returned an exact copy of the original organism.")
self.assertNotEqual(str(new_org_2), str(self.org_2),
"Returned an exact copy of the original organism.")
class UniformTest(unittest.TestCase):
"""Test simple point crossovers.
"""
def setUp(self):
self.alphabet = TestAlphabet()
genome_1 = MutableSeq("11111111", self.alphabet)
self.org_1 = Organism(genome_1, test_fitness)
genome_2 = MutableSeq("22222222", self.alphabet)
self.org_2 = Organism(genome_2, test_fitness)
genome_3 = MutableSeq("333", self.alphabet)
self.org_3 = Organism(genome_3, test_fitness)
self.crossover = UniformCrossover(1.0, 0.8)
def test_basic_crossover(self):
"""Test basic uniform crossover functionality.
"""
start_genome_1 = self.org_1.genome[:]
start_genome_2 = self.org_2.genome[:]
new_org_1, new_org_2 = self.crossover.do_crossover(self.org_1,
self.org_2)
self.assertNotEqual(str(new_org_1.genome), str(start_genome_1),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_2.genome), str(start_genome_2),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_1), str(self.org_1),
"Returned an exact copy of the original organism.")
self.assertNotEqual(str(new_org_2), str(self.org_2),
"Returned an exact copy of the original organism.")
def test_ds_prop_uniform_crossover(self):
"""Test properties of differing genome length, uniform crossovers.
"""
new_org_1, new_org_2 = self.crossover.do_crossover(self.org_1,
self.org_3)
self.assertTrue(len(new_org_1.genome) > len(new_org_2.genome),
"Strings are of wrong sizes after uniform crossover.")
self.assertEqual(str(new_org_2.genome).count("1"),
str(new_org_1.genome).count("3"),
"There should be equal distributions of the smaller string")
self.assertEqual(str(self.org_1.genome[len(new_org_2.genome):]),
str(new_org_1.genome[len(new_org_2.genome):]),
"Uniform should not touch non-overlapping elements of genome")
def test_ss_prop_uniform_crossover(self):
"""Test properties of equal genome length, uniform crossovers.
"""
new_org_1, new_org_2 = self.crossover.do_crossover(self.org_1,
self.org_2)
self.assertEqual(len(new_org_1.genome), len(new_org_2.genome),
"Strings are of different sizes after uniform crossover.")
self.assertEqual(str(new_org_1.genome).count("1"),
str(new_org_2.genome).count("2"),
"There should be equal, inverse distributions")
self.assertEqual(str(new_org_1.genome).count("2"),
str(new_org_2.genome).count("1"),
"There should be equal, inverse distributions")
class InterleaveTest(unittest.TestCase):
"""Test 'simple' 4-point crossovers.
"""
def setUp(self):
self.alphabet = TestAlphabet()
genome_1 = MutableSeq("11111", self.alphabet)
self.org_1 = Organism(genome_1, test_fitness)
genome_2 = MutableSeq("22222", self.alphabet)
self.org_2 = Organism(genome_2, test_fitness)
genome_3 = MutableSeq("333333333", self.alphabet)
self.org_3 = Organism(genome_3, test_fitness)
self._crossover = InterleaveCrossover(1.0)
def test_basic_crossover(self):
"""Test basic interleave crossover functionality.
"""
start_genome_1 = self.org_1.genome[:]
start_genome_2 = self.org_2.genome[:]
new_org_1, new_org_2 = self._crossover.do_crossover(self.org_1,
self.org_2)
self.assertNotEqual(str(new_org_1.genome), str(start_genome_1),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_2.genome), str(start_genome_2),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_1), str(self.org_1),
"Returned an exact copy of the original organism.")
self.assertNotEqual(str(new_org_2), str(self.org_2),
"Returned an exact copy of the original organism.")
def test_prop_sym_crossover(self):
"""Test properties of interleave point crossover."""
new_org_1, new_org_2 = self._crossover.do_crossover(self.org_1,
self.org_2)
self.assertEqual(len(new_org_1.genome), len(new_org_2.genome),
"Strings are of different sizes after interleave point crossover.")
self.assertEqual(str(new_org_1.genome).count("1"),
str(new_org_2.genome).count("2"),
"There should be equal, inverse distributions")
self.assertEqual(str(new_org_1.genome).count("2"),
str(new_org_2.genome).count("1"),
"There should be equal, inverse distributions")
self.assertEqual(str(new_org_1.genome), "12121",
"Did not interleave.")
self.assertEqual(str(new_org_2.genome), "21212",
"Did not interleave.")
def test_prop_asym_crossover(self):
"""Test basic interleave crossover with asymmetric genomes."""
start_genome_1 = self.org_1.genome[:]
start_genome_3 = self.org_3.genome[:]
new_org_1, new_org_3 = self._crossover.do_crossover(self.org_1,
self.org_3)
self.assertNotEqual(str(new_org_1.genome), str(start_genome_1),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_3.genome), str(start_genome_3),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_1), str(self.org_1),
"Returned an exact copy of the original organism.")
self.assertNotEqual(str(new_org_3), str(self.org_3),
"Returned an exact copy of the original organism.")
self.assertEqual(str(new_org_1.genome), "13131",
"Did not interleave with growth.")
self.assertEqual(str(new_org_3.genome), "31313333",
"Did not interleave with growth.")
class FourPointTest(unittest.TestCase):
"""Test 'simple' 4-point crossovers.
"""
def setUp(self):
self.alphabet = TestAlphabet()
genome_1 = MutableSeq("11111", self.alphabet)
self.org_1 = Organism(genome_1, test_fitness)
genome_2 = MutableSeq("22222", self.alphabet)
self.org_2 = Organism(genome_2, test_fitness)
self.sym_crossover = GeneralPointCrossover(3, 1.0)
self.asym_crossover = GeneralPointCrossover(4, 1.0)
def test_basic_crossover(self):
"""Test basic 4-point crossover functionality.
"""
start_genome_1 = self.org_1.genome[:]
start_genome_2 = self.org_2.genome[:]
new_org_1, new_org_2 = self.sym_crossover.do_crossover(self.org_1,
self.org_2)
self.assertNotEqual(str(new_org_1.genome), str(start_genome_1),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_2.genome), str(start_genome_2),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_1), str(self.org_1),
"Returned an exact copy of the original organism.")
self.assertNotEqual(str(new_org_2), str(self.org_2),
"Returned an exact copy of the original organism.")
def test_prop_sym_crossover(self):
"""Test properties of symmetric 4-point crossover.
"""
new_org_1, new_org_2 = self.sym_crossover.do_crossover(self.org_1,
self.org_2)
self.assertEqual(len(new_org_1.genome), len(new_org_2.genome),
"Strings are of different sizes after symmetric crossover.")
self.assertEqual(str(new_org_1.genome).count("1"),
str(new_org_2.genome).count("2"),
"There should be equal, inverse distributions")
self.assertEqual(str(new_org_1.genome).count("2"),
str(new_org_2.genome).count("1"),
"There should be equal, inverse distributions")
def test_basic_asym_crossover(self):
"""Test basic asymmetric 2-point crossover functionality.
"""
start_genome_1 = self.org_1.genome[:]
start_genome_2 = self.org_2.genome[:]
new_org_1, new_org_2 = self.asym_crossover.do_crossover(self.org_1,
self.org_2)
self.assertNotEqual(str(new_org_1.genome), str(start_genome_1),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_2.genome), str(start_genome_2),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_1), str(self.org_1),
"Returned an exact copy of the original organism.")
self.assertNotEqual(str(new_org_2), str(self.org_2),
"Returned an exact copy of the original organism.")
class TwoPointTest(unittest.TestCase):
"""Test simple 2-point crossovers.
"""
def setUp(self):
self.alphabet = TestAlphabet()
genome_1 = MutableSeq("11111111", self.alphabet)
self.org_1 = Organism(genome_1, test_fitness)
genome_2 = MutableSeq("22222222", self.alphabet)
self.org_2 = Organism(genome_2, test_fitness)
self.asym_crossover = TwoPointCrossover(1.0)
def test_basic_asym_crossover(self):
"""Test basic asymmetric 2-point crossover functionality.
"""
start_genome_1 = self.org_1.genome[:]
start_genome_2 = self.org_2.genome[:]
new_org_1, new_org_2 = self.asym_crossover.do_crossover(self.org_1,
self.org_2)
self.assertNotEqual(str(new_org_1.genome), str(start_genome_1),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_2.genome), str(start_genome_2),
"Did not perform a crossover when expected.")
self.assertNotEqual(str(new_org_1), str(self.org_1),
"Returned an exact copy of the original organism.")
self.assertNotEqual(str(new_org_2), str(self.org_2),
"Returned an exact copy of the original organism.")
class TestCrossover:
"""Provide basic crossover functionality for testing SafeFitness.
"""
def __init__(self):
# whether or not to produce new organisms with lower fitness
# higher fitness, or the same organism
self.type = "lower"
def do_crossover(self, org_1, org_2):
seq_org1 = org_1.genome.toseq()
seq_org2 = org_2.genome.toseq()
org1_genome = str(seq_org1)
org2_genome = str(seq_org2)
new_org_1 = org_1.copy()
new_org_2 = org_2.copy()
if self.type == "same":
return new_org_1, new_org_2
elif self.type == "lower":
new_org1_genome = str(int(org1_genome) - 1)
new_org2_genome = str(int(org2_genome) - 1)
new_org_1.genome = MutableSeq(new_org1_genome,
org_1.genome.alphabet)
new_org_2.genome = MutableSeq(new_org2_genome,
org_2.genome.alphabet)
elif self.type == "higher":
new_org1_genome = str(int(org1_genome) + 1)
new_org2_genome = str(int(org2_genome) + 1)
else:
raise ValueError("Got type %s" % self.type)
new_org_1.genome = MutableSeq(new_org1_genome,
org_1.genome.alphabet)
new_org_2.genome = MutableSeq(new_org2_genome,
org_2.genome.alphabet)
return new_org_1, new_org_2
class SafeFitnessTest(unittest.TestCase):
"""Tests for crossovers which do not reduce fitness.
"""
def setUp(self):
self.alphabet = TestAlphabet()
genome_1 = MutableSeq("2", self.alphabet)
self.org_1 = Organism(genome_1, test_fitness)
genome_2 = MutableSeq("2", self.alphabet)
self.org_2 = Organism(genome_2, test_fitness)
self.test_crossover = TestCrossover()
def test_keep_higher(self):
"""Make sure we always keep higher fitness when specified.
"""
crossover = SafeFitnessCrossover(self.test_crossover)
self.test_crossover.type = "same"
new_org_1, new_org_2 = crossover.do_crossover(self.org_1, self.org_2)
self.assertEqual(str(new_org_1), str(self.org_1),
"Did not retain organism for same fitness.")
self.assertEqual(str(new_org_2), str(self.org_2),
"Did not retain organism for same fitness.")
self.test_crossover.type = "lower"
new_org_1, new_org_2 = crossover.do_crossover(self.org_1, self.org_2)
self.assertEqual(str(new_org_1), str(self.org_1),
"Did not retain organism when crossover had lower fitness.")
self.assertEqual(str(new_org_2), str(self.org_2),
"Did not retain organism when crossover had lower fitness.")
self.test_crossover.type = "higher"
new_org_1, new_org_2 = crossover.do_crossover(self.org_1, self.org_2)
self.assertTrue(new_org_1.fitness > self.org_1.fitness and
new_org_2.fitness > self.org_2.fitness,
"Did not get new organism when it had higher fitness.")
def test_keep_lower(self):
"""Make sure we do normal crossover functionality when specified.
"""
crossover = SafeFitnessCrossover(self.test_crossover, 1.0)
self.test_crossover.type = "same"
new_org_1, new_org_2 = crossover.do_crossover(self.org_1, self.org_2)
self.assertEqual(str(new_org_1), str(self.org_1),
"Did not retain organism for same fitness.")
self.assertEqual(str(new_org_2), str(self.org_2),
"Did not retain organism for same fitness.")
self.test_crossover.type = "lower"
new_org_1, new_org_2 = crossover.do_crossover(self.org_1, self.org_2)
self.assertNotEqual(str(new_org_1), str(self.org_1),
"Retained lower fitness organism in crossover.")
self.assertNotEqual(str(new_org_2), str(self.org_2),
"Retained lower fitness organism in crossover.")
self.test_crossover.type = "higher"
new_org_1, new_org_2 = crossover.do_crossover(self.org_1, self.org_2)
self.assertTrue(new_org_1.fitness > self.org_1.fitness and
new_org_2.fitness > self.org_2.fitness,
"Did not get new organism under higher fitness conditions.")
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| gpl-2.0 | 2,750,946,505,808,609,300 | 41.006961 | 87 | 0.572328 | false | 3.755445 | true | false | false |
Titan-C/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 1 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
# #############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
# #############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
# #############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
# #############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# #############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
# #############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause | 3,837,792,158,449,737,700 | 36.021978 | 79 | 0.591273 | false | 3.94959 | false | false | false |
petebachant/PXL | pxl/tests/test_fdiff.py | 1 | 1436 | from __future__ import division, print_function
from .. import fdiff
from ..fdiff import *
import matplotlib.pyplot as plt
import pandas as pd
import os
import numpy as np
from uncertainties import unumpy
plot = False
def test_second_order_diff():
"""Test `second_order_diff`."""
# Create a non-equally spaced x vector
x = np.append(np.linspace(0, np.pi, 100),
np.linspace(np.pi + 0.01, 2*np.pi, 400))
u = np.sin(x)
dudx = second_order_diff(u, x)
assert dudx.shape == u.shape
# Assert that this function is almost identical to cos(x)
np.testing.assert_allclose(dudx, np.cos(x), rtol=1e-3)
if plot:
plt.plot(x, dudx, "-o", lw=2, alpha=0.5)
plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5)
plt.show()
def test_second_order_diff_uncertainties():
"""Test that `second_order_diff` works with uncertainties."""
# Create a non-equally spaced x vector
x = np.append(np.linspace(0, np.pi, 50),
np.linspace(np.pi + 0.01, 2*np.pi, 100))
x_unc = unumpy.uarray(x, np.ones(len(x))*1e-3)
u = unumpy.uarray(np.sin(x), np.ones(len(x))*1e-2)
dudx = second_order_diff(u, x)
print(dudx[:5])
print(dudx[-5:])
if plot:
plt.errorbar(x, unumpy.nominal_values(dudx), yerr=unumpy.std_devs(dudx),
fmt="-o", lw=2, alpha=0.5)
plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5)
plt.show()
| gpl-3.0 | 5,033,398,059,606,500,000 | 31.636364 | 80 | 0.598189 | false | 2.777563 | false | false | false |
bgaunt/openEcslent | engine/squadAI.py | 2 | 3788 | #---------------------------------------------------------------------------
# Copyright 2010, 2011 Sushil J. Louis and Christopher E. Miles,
# Evolutionary Computing Systems Laboratory, Department of Computer Science
# and Engineering, University of Nevada, Reno.
#
# This file is part of OpenECSLENT
#
# OpenECSLENT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenECSLENT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenECSLENT. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
#-------------------------End Copyright Notice------------------------------
import math
import random
import timer
from aspect import Aspect
from vector import vector3
from mathlib import differenceBetweenAngles, clamp
import command
kUpdateStrategyFrequency = 0.1
class SquadAI(Aspect):
"""
Maintains group level control over a set of units
"""
class LongTermData(object):
"""
Ultimate goal / long term for this squad
Input from the strategic AI / HumanPlayer goes in here
"""
def __init__(self):
self.commands = []
class MediumTermData(object):
def __init__(self):
pass
class ImmediateData(object):
def __init__(self):
pass
def init(self):
self.squadMembers = []
self.longTermUpdateTimer = timer.Timer(kUpdateStrategyFrequency, fireFirstCheck=True)
self.immediateData = self.ImmediateData()
self.mediumTermData = self.MediumTermData()
self.longTermData = self.LongTermData()
self.immediateData.ddContext = self.engine.debugDrawSystem.getContext()
self.mediumTermData.ddContext = self.engine.debugDrawSystem.getContext()
self.longTermData.ddContext = self.engine.debugDrawSystem.getContext()
def tick(self, dtime):
if self.longTermUpdateTimer.check(dtime):
self.longTermUpdate()
@property
def command(self):
pass
@command.getter
def command(self):
return self.longTermData.commands and self.longTermData.commands[0] or None
@property
def commands(self):
return self.longTermData.commands
@commands.setter
def commands(self, commands):
self.longTermData.commands = commands
def longTermUpdate(self):
"""The squad is not particularly used at the moment, - since we aren't doing any real coordination
so most of this is just pass through
"""
if not self.commands:
return
current = self.commands[0]
if type(current) == command.MoveTo:
for squadMember in self.squadMembers:
squadMember.UnitAI.commands = self.commands
elif type(current) == command.NetSlave:
for squadMember in self.squadMembers:
squadMember.UnitAI.commands = self.commands
elif type(current) == command.ManualControl:
for squadMember in self.squadMembers:
squadMember.UnitAI.commands = self.commands
elif type(current) == command.Stop:
for squadMember in self.squadMembers:
squadMember.UnitAI.commands = self.commands
else:
raise Exception('not yet implemented')
| gpl-3.0 | -829,340,554,603,509,400 | 32.821429 | 106 | 0.63226 | false | 4.251403 | false | false | false |
chhsiao1981/f6a_tw_crawler | f6a_tw_crawler/main.py | 1 | 2575 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from f6a_tw_crawler.constants import *
import gevent.monkey; gevent.monkey.patch_all()
from bottle import Bottle, request, response, route, run, post, get, static_file, redirect, HTTPError, view, template
import random
import math
import base64
import time
import ujson as json
import sys
import argparse
from beaker.middleware import SessionMiddleware
from f6a_tw_crawler import cfg
from f6a_tw_crawler import util
app = Bottle()
@app.get('/')
def dummy():
return _process_result("1")
def _process_params():
return dict(request.params)
def _process_json_request():
return util.json_loads(_process_body_request())
def _process_body_request():
f = request.body
f.seek(0)
return f.read()
def _process_result(the_obj):
response.set_header('Accept', '*')
response.set_header('Access-Control-Allow-Headers', 'Content-Type, Accept')
response.set_header('Access-Control-Allow-Origin', '*')
response.set_header('Access-Control-Allow-Methods', '*')
response.content_type = 'application/json'
return util.json_dumps(the_obj)
def _process_mime_result(content_type, content):
response.set_header('Accept', '*')
response.set_header('Access-Control-Allow-Headers', 'Content-Type, Accept')
response.set_header('Access-Control-Allow-Origin', '*')
response.set_header('Access-Control-Allow-Methods', '*')
response.content_type = content_type
return content
def parse_args():
''' '''
parser = argparse.ArgumentParser(description='f6a_tw_crawler')
parser.add_argument('-i', '--ini', type=str, required=True, help="ini filename")
parser.add_argument('-l', '--log_filename', type=str, default='', required=False, help="log filename")
parser.add_argument('-p', '--port', type=str, required=True, help="port")
args = parser.parse_args()
return (S_OK, args)
def _main():
global app
(error_code, args) = parse_args()
cfg.init({"port": args.port, "ini_filename": args.ini, 'log_filename': args.log_filename})
session_opts = {
'session.cookie_expires': True,
'session.encrypt_key': cfg.config.get('session_encrypt_key', '')
'session.httponly': True,
'session.timeout': cfg.config.get('session_expire_timestamp', SESSION_EXPIRE_TIMESTAMP)
'session.type': 'cookie',
'session.validate_key': True,
}
app = SessionMiddleware(app, session_opts)
run(app, host='0.0.0.0', port=cfg.config.get('port'), server='gevent')
if __name__ == '__main__':
_main()
| mit | -8,639,354,462,411,019,000 | 25.822917 | 117 | 0.667573 | false | 3.406085 | false | false | false |
Erotemic/local | git_tools/git_xadd.py | 1 | 4330 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Adds and commits a change to a local branch (but not the current one)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import git
# import sys
class CheckoutContext(object):
def __init__(self, repo):
self.repo = repo
self.orig_branch_name = repo.active_branch.name
def __enter__(self):
return self
def __exit__(self, type, value, tb):
# if True:
# print('Changing to original branch {}'.format(self.orig_branch_name))
self.repo.git.checkout(self.orig_branch_name)
def git_xadd(branch, files, base=None, message='wip', dry=False):
repo = git.Repo()
existing_branches = {branch.name for branch in repo.branches}
if branch not in existing_branches:
if base is None:
raise ValueError('Branch {!r} does not exist'.format(branch))
elif base not in existing_branches:
raise ValueError('Base branch {!r} does not exist'.format(base))
else:
# Create the branch if the base is known
with CheckoutContext(repo):
repo.git.checkout(base)
if dry:
print('Would created new branch {}'.format(branch))
else:
repo.git.checkout(branch, b=True)
print('Created new branch {}'.format(branch))
with CheckoutContext(repo):
repo.git.checkout(branch)
if dry:
print('Changing to branch {}'.format(branch))
print('Would add files {}'.format(files))
print('Would commit with message {}'.format(message))
else:
repo.git.add(files)
repo.git.commit(m=message)
# def get_varargs(argv=None):
# """
# Returns positional args specified directly after the scriptname
# and before any args starting with '-' on the commandline.
# """
# if argv is None:
# argv = sys.argv
# scriptname = argv[0]
# if scriptname == '':
# # python invoked by iteself
# pos_start = 0
# pos_end = 0
# else:
# pos_start = pos_end = 1
# for idx in range(pos_start, len(argv)):
# if argv[idx].startswith('-'):
# pos_end = idx
# break
# else:
# pos_end = len(argv)
# varargs = argv[pos_start:pos_end]
# return varargs
if __name__ == '__main__':
r"""
SeeAlso:
git_squash_streaks.py
Ignore:
python ~/misc/git/git_xadd.py dev/doc_fixes arrows/ocv/split_image.cxx -m "added a bit more info"
git merge dev/doc_fixes
CommandLine:
export PYTHONPATH=$PYTHONPATH:/home/joncrall/misc
python ~/misc/git_xadd.py
"""
import argparse
parser = argparse.ArgumentParser(description='git-xadd add files to non-working branches')
parser.add_argument('files', nargs='+',
help='Files to externally add')
parser.add_argument(*('-m', '--message'), type=str, default='wip',
help='commit message')
parser.add_argument('--branch', type=str, default=None, required=True,
help='branch to add to')
parser.add_argument('--base', type=str, default=None,
help='base of external branch (used if branch doesnt exist)')
parser.add_argument(*('-n', '--dry'), action='store_true',
default=False, help='dry run')
args = parser.parse_args()
# import ubelt as ub
# print('sys.argv = {!r}'.format(sys.argv))
# message = ub.argval(('-m', '--message'), default='wip')
# branch = ub.argval('--branch', default=None)
# base = ub.argval('--base', default=None)
# dry = ub.argflag(('-n', '--dry'))
# if branch is None:
# raise ValueError('must specify --branch')
# varargs = get_varargs()
# files = varargs[:]
branch = args.branch
message = args.message
dry = args.dry
base = args.base
files = args.files
if branch is None:
raise ValueError('must specify --branch')
if len(files) == 0:
raise ValueError('Must specify files')
# print('args = {!r}'.format(args))
git_xadd(branch, files, message=message, base=base, dry=dry)
| gpl-3.0 | -8,634,392,987,533,247,000 | 32.307692 | 105 | 0.569746 | false | 3.831858 | false | false | false |
gregoil/rotest | tests/api/resource_control/test_lock_resources.py | 1 | 8138 | """Basic unittests for the server resource control operations."""
from __future__ import absolute_import
from functools import partial
from six.moves import http_client
from django.contrib.auth.models import User
from django.test import Client, TransactionTestCase
from rotest.management.models import DemoComplexResourceData
from tests.api.utils import request
class TestLockResources(TransactionTestCase):
"""Assert operations of lock resources request."""
fixtures = ['resource_ut.json']
def setUp(self):
"""Setup test environment."""
self.client = Client()
_, token_object = request(client=self.client,
path="tests/get_token", method="get")
self.token = token_object.token
self.requester = partial(request, self.client,
"resources/lock_resources")
def test_lock_empty(self):
"""Assert trying to lock no resources."""
response, content = self.requester(
json_data={
"descriptors": [],
"timeout": 0,
"token": self.token
})
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(len(content.resource_descriptors), 0)
def test_lock_valid_resource(self):
"""Assert trying to lock valid resource."""
response, content = self.requester(
json_data={
"descriptors": [
{
"type": "rotest.management.models.ut_models."
"DemoResourceData",
"properties": {}
}
],
"timeout": 0,
"token": self.token
})
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(len(content.resource_descriptors), 1)
def test_invalid_resource_field(self):
"""Assert invalid resource field filter requested."""
response, _ = self.requester(
json_data={
"descriptors": [
{
"type": "rotest.management.models.ut_models."
"DemoResourceData",
"properties": {
"invalid_field": "field1"
}
}
],
"timeout": 0,
"token": self.token
})
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
def test_lock_complex(self):
"""Assert trying to lock complex resource."""
resources = DemoComplexResourceData.objects.filter(
name='complex_resource1')
resource, = resources
sub_resource = resource.demo1
self.assertTrue(resource.is_available())
self.assertTrue(sub_resource.is_available())
response, _ = self.requester(
json_data={
"descriptors": [
{
"type": "rotest.management.models.ut_models."
"DemoComplexResourceData",
"properties": {}
}
],
"timeout": 0,
"token": self.token
})
# refresh from db
resources = DemoComplexResourceData.objects.filter(
name='complex_resource1')
resource, = resources
sub_resource = resource.demo1
self.assertEqual(response.status_code, http_client.OK)
self.assertFalse(resource.is_available())
self.assertFalse(sub_resource.is_available())
def test_lock_complex_sub_resource_unavailable(self):
"""Assert trying to lock resource with sub-resource unavailable."""
resources = DemoComplexResourceData.objects.filter(
name='complex_resource1')
resource, = resources
sub_resource = resource.demo1
sub_resource.reserved = "unknown_person"
sub_resource.save()
response, _ = self.requester(
json_data={
"descriptors": [
{
"type": "rotest.management.models.ut_models."
"DemoComplexResourceData",
"properties": {}
}
],
"timeout": 0,
"token": self.token
})
resources = DemoComplexResourceData.objects.filter(
name='complex_resource1')
resource, = resources
sub_resource = resource.demo1
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
# no reserved nor owner for main resource
self.assertFalse(resource.reserved)
self.assertFalse(resource.owner)
self.assertFalse(resource.is_available())
# sub resource left untouched
self.assertFalse(sub_resource.is_available())
self.assertEqual(sub_resource.reserved, "unknown_person")
class TestLockResourcesInvalid(TransactionTestCase):
"""Assert operations of invalid lock resources requests."""
def setUp(self):
"""Setup test environment."""
self.client = Client()
_, token_object = request(client=self.client,
path="tests/get_token", method="get")
self.token = token_object.token
self.requester = partial(request, self.client,
"resources/lock_resources")
def test_invalid_input(self):
"""Assert invalid request."""
# empty data
response, _ = self.requester(json_data={})
self.assertEqual(response.status_code,
http_client.INTERNAL_SERVER_ERROR)
def test_no_user_in_db(self):
"""Assert locking user not in db."""
# localhost is not in db
response, content = self.requester(
json_data={
"descriptors": [],
"timeout": 0,
"token": self.token
})
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertEqual(content.details,
"User localhost has no matching object in the DB")
def test_invalid_content_type(self):
"""Assert invalid request content type."""
# invalid content type
response, _ = self.requester(content_type="text/html")
self.assertEqual(response.status_code,
http_client.INTERNAL_SERVER_ERROR)
def test_invalid_resource(self):
"""Assert invalid resource requested."""
User.objects.create_user(username='localhost',
email='[email protected]',
password='localhost')
response, content = self.requester(
json_data={
"descriptors": [
{
"type": "invalidmodule.invalidtype",
"properties": {}
}
],
"timeout": 0,
"token": self.token
})
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("Failed to extract type", content.details)
self.assertIn("invalidmodule.invalidtype", content.details)
self.assertIn("Reason: No module named", content.details)
self.assertIn("invalidmodule", content.details)
# no available resources
response, content = self.requester(
json_data={
"descriptors": [
{
"type": "rotest.management.models.ut_models."
"DemoResourceData",
"properties": {}
}
],
"timeout": 0,
"token": self.token
})
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertTrue(content.details.startswith(
"No existing resource meets the requirements"))
| mit | 8,388,351,127,183,234,000 | 35.493274 | 75 | 0.53072 | false | 5.017263 | true | false | false |
wbsavage/shinken | shinken/log.py | 1 | 8631 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
import logging
import sys
from logging.handlers import TimedRotatingFileHandler
from brok import Brok
def is_tty():
# Look if we are in a tty or not
if hasattr(sys.stdout, 'isatty'):
return sys.stdout.isatty()
return False
if is_tty():
# Try to load the terminal color. Won't work under python 2.4
try:
from shinken.misc.termcolor import cprint
except (SyntaxError, ImportError), exp:
# Outch can't import a cprint, do a simple print
def cprint(s, color):
print s
# Ok it's a daemon mode, if so, just print
else:
def cprint(s, color):
print s
obj = None
name = None
local_log = None
human_timestamp_log = False
class Log:
"""Shinken logger class, wrapping access to Python logging standard library."""
"Store the numeric value from python logging class"
NOTSET = logging.NOTSET
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
def __init__(self):
self._level = logging.NOTSET
def load_obj(self, object, name_=None):
""" We load the object where we will put log broks
with the 'add' method
"""
global obj
global name
obj = object
name = name_
@staticmethod
def get_level_id(lvlName):
"""Convert a level name (string) to its integer value
and vice-versa. Input a level and it will return a name.
Raise KeyError when name or level not found
"""
return logging._levelNames[lvlName]
# We can have level as an int (logging.INFO) or a string INFO
# if string, try to get the int value
def get_level(self):
return logging.getLogger().getEffectiveLevel()
# We can have level as an int (logging.INFO) or a string INFO
# if string, try to get the int value
def set_level(self, level):
if not isinstance(level, int):
level = getattr(logging, level, None)
if not level or not isinstance(level, int):
raise TypeError('log level must be an integer')
self._level = level
logging.getLogger().setLevel(level)
def debug(self, msg, *args, **kwargs):
self._log(logging.DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self._log(logging.INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self._log(logging.WARNING, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self._log(logging.ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
self._log(logging.CRITICAL, msg, *args, **kwargs)
def log(self, message, format=None, print_it=True):
"""Old log method, kept for NAGIOS compatibility
What strings should not use the new format ??"""
self._log(logging.INFO, message, format, print_it, display_level=False)
def _log(self, level, message, format=None, print_it=True, display_level=True):
"""We enter a log message, we format it, and we add the log brok"""
global obj
global name
global local_log
global human_timestamp_log
# ignore messages when message level is lower than Log level
if level < self._level:
return
# We format the log in UTF-8
if isinstance(message, str):
message = message.decode('UTF-8', 'replace')
if format is None:
lvlname = logging.getLevelName(level)
if display_level:
fmt = u'[%(date)s] %(level)-9s %(name)s%(msg)s\n'
else:
fmt = u'[%(date)s] %(name)s%(msg)s\n'
args = {
'date': (human_timestamp_log and time.asctime()
or int(time.time())),
'level': lvlname.capitalize()+' :',
'name': name and ('[%s] ' % name) or '',
'msg': message
}
s = fmt % args
else:
s = format % message
if print_it and len(s) > 1:
# Take a color so we can print if it's a TTY
if is_tty():
color = {Log.WARNING:'yellow', Log.CRITICAL:'magenta', Log.ERROR:'red'}.get(level, None)
else:
color = None
# Print to standard output.
# If the daemon is launched with a non UTF8 shell
# we can have problems in printing, work around it.
try:
cprint(s[:-1], color)
except UnicodeEncodeError:
print s.encode('ascii', 'ignore')
# We create the brok and load the log message
# DEBUG level logs are logged by the daemon locally
# and must not be forwarded to other satellites, or risk overloading them.
if level != logging.DEBUG:
b = Brok('log', {'log': s})
obj.add(b)
# If local logging is enabled, log to the defined handler, file.
if local_log is not None:
logging.log(level, s.strip())
def register_local_log(self, path, level=None):
"""The shinken logging wrapper can write to a local file if needed
and return the file descriptor so we can avoid to
close it.
"""
global local_log
if level is not None:
self._level = level
# Open the log and set to rotate once a day
basic_log_handler = TimedRotatingFileHandler(path,
'midnight',
backupCount=5)
basic_log_handler.setLevel(self._level)
basic_log_formatter = logging.Formatter('%(asctime)s %(message)s')
basic_log_handler.setFormatter(basic_log_formatter)
logger = logging.getLogger()
logger.addHandler(basic_log_handler)
logger.setLevel(self._level)
local_log = basic_log_handler
# Return the file descriptor of this file
return basic_log_handler.stream.fileno()
def quit(self):
"""Close the local log file at program exit"""
global local_log
if local_log:
self.debug("Closing %s local_log" % str(local_log))
local_log.close()
def set_human_format(self, on=True):
"""
Set the output as human format.
If the optional parameter `on` is False, the timestamps format
will be reset to the default format.
"""
global human_timestamp_log
human_timestamp_log = bool(on)
logger = Log()
class __ConsoleLogger:
"""
This wrapper class for logging and printing messages to stdout, too.
:fixme: Implement this using an additional stream-handler, as soon
as the logging system is based on the standard Python logging
module.
"""
def debug(self, msg, *args, **kwargs):
self._log(Log.DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
kwargs.setdefault('display_level', False)
self._log(Log.INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self._log(Log.WARNING, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self._log(Log.ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
self._log(Log.CRITICAL, msg, *args, **kwargs)
def _log(self, *args, **kwargs):
# if `print_it` is not passed as an argument, set it to `true`
kwargs.setdefault('print_it', True)
logger._log(*args, **kwargs)
console_logger = __ConsoleLogger()
| agpl-3.0 | -2,778,704,645,423,375,400 | 31.942748 | 104 | 0.594717 | false | 3.96281 | false | false | false |
whaleygeek/punchcard_reader | cardreader/arduino.py | 1 | 3715 | # arduino.py 20/06/2015 D.J.Whale
#
# (c) 2015 D.J.Whale
#
# Read card records from an arduino card reader.
#----- CONFIGURATION -----------------------------------------------------------
DEBUG = False
USE_EMBEDDED_PYSERIAL = True
BAUD = 115200
ENABLE_ERROR_CORRECT = False
if USE_EMBEDDED_PYSERIAL:
from os import sys, path
thisdir = path.dirname(path.abspath(__file__))
sys.path.append(thisdir)
import serial
#----- PORTSCAN ----------------------------------------------------------------
import portscan
name = portscan.getName()
if name != None:
if DEBUG:
print("Using port:" + name)
PORT = name
else:
name = portscan.find()
if name == None:
raise ValueError("No port selected, giving in")
PORT = name
print("Your cardreader board has been detected")
print("Now running your program...")
#----- CONFIGURE SERIAL PORT ---------------------------------------------------
s = serial.Serial(PORT)
s.baudrate = BAUD
s.parity = serial.PARITY_NONE
s.databits = serial.EIGHTBITS
s.stopbits = serial.STOPBITS_ONE
s.close()
s.port = PORT
s.open()
#TEST_DATA1 = [
# "..XXX...",
# ".XX.XX..",
# "XXXXXXX.",
# "....XXX.",
# "XXXXXXX.",
# ".XXXXX..",
# "..XXX...",
# "........"
#]
rec_buffer = None
line_buffer = ""
def isReady():
if rec_buffer != None:
return True
processSerial()
if rec_buffer != None:
return True
return False
def read():
global rec_buffer
if not isReady():
return None
rec = rec_buffer
rec_buffer = None
return rec
def readline():
global line_buffer
while True:
data = s.read(1)
if len(data) == 0:
return None # no new data has been received
data = data[0]
if data == '\n':
pass # strip newline
elif data[0] == '\r':
line = line_buffer
line_buffer = ""
#print(line)
return line
else:
line_buffer += data
def processSerial():
global rec_buffer
# Poll serial to see if there is a line of data waiting
line = readline()
if line == None:
return
result = getRec(line)
if result != None:
# There is a rec, process it
databuf = result
rectype = ord(databuf[0])
payload = databuf[1:]
#print("rectype:" + str(rectype))
if rectype == REPORT_OK_CARD:
#print("CARD OK")
rec_buffer = decodeDataBuf(payload)
#if rec_buffer != None:
# printCard(rec_buffer)
else:
# Just display other rec types on diagnostics
print("Unhandled rec:" + str(rectype) + " " + str(databuf))
REPORT_OK_BOOT = 0
REPORT_OK_CARD = 1
REPORT_OK_STATE = 2
REPORT_OK_ROW = 3
REPORT_OK_ADC = 4
REPORT_ERR_LENGTH = 129
def getRec(line):
# :ttnnnnnn\n
# i.e. start char is :
# first two chars are hexascii type
# next pairs of chars are data
# line terminated by a newline
if len(line) < 3:
return None # Too short, no start/type
if line[0] != ':':
return None # Not a start char
line = line[1:] # strip start char
# read the type as hexascii, error if not hexascii
# if boot record, read ascii data and exit
try:
data = line.decode('hex') # now binary
except:
print("non hex data:" + line)
return None
return data # binary buffer
def decodeDataBuf(buf):
card = []
for b in buf:
byte = ord(b)
# each byte in buffer is a row
row = ""
# each bit in b is a column in that row, D7..D0
mult = 0x80
for bitno in range(8):
bit = ((byte & mult) != 0)
if bit:
row += 'X'
else:
row += '.'
mult = mult >> 1
card.append(row)
return card
def printCard(card):
for row in card:
print(row)
# END
| mit | 5,137,636,407,564,047,000 | 18.051282 | 80 | 0.563392 | false | 3.340827 | false | false | false |
bitmazk/django-dynamic-content | dynamic_content/south_migrations/0001_initial.py | 1 | 2962 | # flake8: noqa
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DynamicContentTranslation'
db.create_table(u'dynamic_content_dynamiccontent_translation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content', self.gf('django.db.models.fields.TextField')()),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['dynamic_content.DynamicContent'])),
))
db.send_create_signal(u'dynamic_content', ['DynamicContentTranslation'])
# Adding unique constraint on 'DynamicContentTranslation', fields ['language_code', 'master']
db.create_unique(u'dynamic_content_dynamiccontent_translation', ['language_code', 'master_id'])
# Adding model 'DynamicContent'
db.create_table(u'dynamic_content_dynamiccontent', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('identifier', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal(u'dynamic_content', ['DynamicContent'])
def backwards(self, orm):
# Removing unique constraint on 'DynamicContentTranslation', fields ['language_code', 'master']
db.delete_unique(u'dynamic_content_dynamiccontent_translation', ['language_code', 'master_id'])
# Deleting model 'DynamicContentTranslation'
db.delete_table(u'dynamic_content_dynamiccontent_translation')
# Deleting model 'DynamicContent'
db.delete_table(u'dynamic_content_dynamiccontent')
models = {
u'dynamic_content.dynamiccontent': {
'Meta': {'object_name': 'DynamicContent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'dynamic_content.dynamiccontenttranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'DynamicContentTranslation', 'db_table': "u'dynamic_content_dynamiccontent_translation'"},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['dynamic_content.DynamicContent']"})
}
}
complete_apps = ['dynamic_content']
| mit | 8,530,690,097,504,096,000 | 50.068966 | 178 | 0.642471 | false | 3.923179 | false | false | false |
eldarion/pycon | pycon/sponsorship/managers.py | 1 | 1557 | from django.db import models
class SponsorManager(models.Manager):
def active(self):
return self.get_query_set().filter(active=True).order_by("level")
def with_weblogo(self):
queryset = self.raw("""
SELECT DISTINCT
"sponsorship_sponsor"."id",
"sponsorship_sponsor"."applicant_id",
"sponsorship_sponsor"."name",
"sponsorship_sponsor"."external_url",
"sponsorship_sponsor"."annotation",
"sponsorship_sponsor"."contact_name",
"sponsorship_sponsor"."contact_email",
"sponsorship_sponsor"."level_id",
"sponsorship_sponsor"."added",
"sponsorship_sponsor"."active",
"sponsorship_sponsorlevel"."order"
FROM
"sponsorship_sponsor"
INNER JOIN
"sponsorship_sponsorbenefit" ON ("sponsorship_sponsor"."id" = "sponsorship_sponsorbenefit"."sponsor_id")
INNER JOIN
"sponsorship_benefit" ON ("sponsorship_sponsorbenefit"."benefit_id" = "sponsorship_benefit"."id")
LEFT OUTER JOIN
"sponsorship_sponsorlevel" ON ("sponsorship_sponsor"."level_id" = "sponsorship_sponsorlevel"."id")
WHERE (
"sponsorship_sponsor"."active" = 't' AND
"sponsorship_benefit"."type" = 'weblogo' AND
"sponsorship_sponsorbenefit"."upload" != ''
)
ORDER BY "sponsorship_sponsorlevel"."order" ASC, "sponsorship_sponsor"."added" ASC
""")
return queryset
| bsd-3-clause | -1,580,691,195,747,468,800 | 39.973684 | 120 | 0.583815 | false | 3.689573 | false | false | false |
Fokko/incubator-airflow | tests/contrib/hooks/test_segment_hook.py | 1 | 2012 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from airflow import AirflowException
from airflow.contrib.hooks.segment_hook import SegmentHook
TEST_CONN_ID = 'test_segment'
WRITE_KEY = 'foo'
class TestSegmentHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.conn = conn = mock.MagicMock()
conn.write_key = WRITE_KEY
self.expected_write_key = WRITE_KEY
self.conn.extra_dejson = {'write_key': self.expected_write_key}
class UnitTestSegmentHook(SegmentHook):
def get_conn(self):
return conn
def get_connection(self, _):
return conn
self.test_hook = UnitTestSegmentHook(segment_conn_id=TEST_CONN_ID)
def test_get_conn(self):
expected_connection = self.test_hook.get_conn()
self.assertEqual(expected_connection, self.conn)
self.assertIsNotNone(expected_connection.write_key)
self.assertEqual(expected_connection.write_key, self.expected_write_key)
def test_on_error(self):
with self.assertRaises(AirflowException):
self.test_hook.on_error('error', ['items'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -762,890,311,649,424,400 | 31.451613 | 80 | 0.694334 | false | 4.007968 | true | false | false |
ondrokrc/gramps | gramps/gui/utilscairo.py | 1 | 2664 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2007 Donald N. Allingham, Martin Hawlisch
# Copyright (C) 2009 Douglas S. Blank
# Copyright (C) 2012 Benny Malengier
# Copyright (C) 2013 Vassilii Khachaturov
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
#from gi.repository import Pango
#from gi.repository import GObject
#from gi.repository import Gdk
#from gi.repository import Gtk
#from gi.repository import PangoCairo
import cairo
#import math
#import colorsys
#import pickle
#-------------------------------------------------------------------------
#
# Functions
#
#-------------------------------------------------------------------------
def warpPath(ctx, function):
"""Transform a path given a 2D transformation function.
ctx -- a cairo.Context, on which the path is set
function -- a 2D transform (x,y) |-> (x_new,y_new)
The transformed path replaces the original one on the context.
Taken from /usr/share/doc/python-cairo/examples/warpedtext.py
According to /usr/share/doc/python-cairo/copyright, licensed
under MOZILLA PUBLIC LICENSE 1.1, see that file for more detail.
"""
first = True
for type, points in ctx.copy_path():
if type == cairo.PATH_MOVE_TO:
if first:
ctx.new_path()
first = False
x, y = function(*points)
ctx.move_to(x, y)
elif type == cairo.PATH_LINE_TO:
x, y = function(*points)
ctx.line_to(x, y)
elif type == cairo.PATH_CURVE_TO:
x1, y1, x2, y2, x3, y3 = points
x1, y1 = function(x1, y1)
x2, y2 = function(x2, y2)
x3, y3 = function(x3, y3)
ctx.curve_to(x1, y1, x2, y2, x3, y3)
elif type == cairo.PATH_CLOSE_PATH:
ctx.close_path()
| gpl-2.0 | 5,053,228,286,535,073,000 | 32.721519 | 79 | 0.588213 | false | 3.838617 | false | false | false |
sk1418/passwdmanager | passwdmanager/upgrade.py | 1 | 8922 | # PasswdManager -- Password management tool
# Copyright (C) 2009 -- 2013 Kai Yuan <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python
"""
This upgrade program will only upgrade passwdManager from version 1.0.x to 1.1.0
Since version 1.2.0, application will upgrade from 1.0.x or 1.1.0 automatically, because the
pycrypto library changes. Therefore this script won't be used any longer.
"""
import shutil
import sqlite3 as sqlite
import config,util,service,dao
import wx
import os.path
SIZE_WINDOW = (900,610)
SIZE_MULTILINE = (400,300)
SIZE_ROOT = (300,28)
def chkRootPwd(path, pwd):
'''check given root password on given path'''
if os.path.isfile(path):
conn = sqlite.connect(path)
masterDao = dao.MasterDao(conn)
md5Pwd = masterDao.getMasterPwd()
md5String = util.md5Encode(pwd)
conn.close()
return md5Pwd == md5String
else:
return False
def showErrorDialog(ErrMsg):
dlg = wx.MessageDialog(None, ErrMsg, 'Error' , wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
class UpgradeService:
def __init__(self):
'''
constructor
'''
self.log = None
def getConnection(self):
conn = sqlite.connect(config.CONN_PATH)
return conn
def addLog(self,msg):
self.log.SetValue(self.log.GetValue()+"\n"+msg)
def upgrade(self,src,key,log):
try:
self.log = log
self.log.SetValue("")
self.addLog("Starting upgrade")
#the original datafile in new version
newData=config.CONN_PATH;
# backup the newData(sample data) with .bak
shutil.copy(newData,newData+".bak")
self.addLog("backup 1.1.0 datafile->"+newData+".bak")
shutil.copy(src, newData)
self.addLog("copy 1.0.x datafile->"+newData)
shutil.copy(src, newData+"_v1.0.x.bak")
self.addLog("backup 1.0.x datafile->"+newData+"_v1.0.x.bak")
conn = self.getConnection()
self.addLog("Adding Secret Info column to datafile.... ")
self.__addSecretColumn(conn)
self.addLog("Done!")
self.addLog("Encrypting username/loginName for all existing accounts....")
c = self.__encryptAccounts(key,conn)
self.addLog("Done! "+ str(c) + " accounts were updated.")
self.addLog("Upgrade Finished, also your old data were migrated to new PasswdManager.")
self.addLog("Close Upgrade tool, and start PasswdManager, login with your old master password.")
self.addLog("Thanks for using PasswordManager")
conn.commit()
conn.close()
except sqlite.OperationalError:
self.addLog("Upgrade datafile failed. Is selected datafile with version 1.0.x?")
showErrorDialog("Upgrade datafile failed. Is selected datafile with version 1.0.x?")
def __addSecretColumn(self,conn):
sql = """
ALTER TABLE ACCOUNT ADD COLUMN secret TEXT
"""
cur = conn.cursor()
cur.execute(sql)
cur.close()
def __encryptAccounts(self,key,conn):
cur = conn.cursor()
cur2 =conn.cursor()
sql = 'select id, username FROM ACCOUNT'
cur.execute(sql)
upsql = 'update Account set username=? where id=?'
c=0
for row in cur.fetchall():
(id,uid) = row
newUid=util.encrypt(key,uid)
cur2.execute(upsql,(newUid,id,))
c += 1
cur2.close()
cur.close()
return c
class MainFrame(wx.Frame):
'''
main window
'''
def __init__(self):
ID_INFO = 10000
ID_FILE = 10001
ID_FILEPICKER = 10002
ID_PWD = 10003
ID_TEXTCTRL = 10004
ID_LINE = 10005
ID_TEXT = 10006
ID_BUTTON = 10007
placeHolder = (200,20)
pos = wx.DefaultPosition
size = (900,340)
style = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self,None,-1, 'PasswdManager Upgrade 1.0.x -> 1.1.x', pos, size, style)
item0 = wx.BoxSizer( wx.VERTICAL )
item0.Add(placeHolder)#place holder
item1 = wx.StaticText( self, ID_INFO,
"This tool will upgrade the data file of passwdManager 1.0.x to 1.1.x. \n"
"\nNote:\n"
"- a backup of given data file will be automatically created\n"
"- Master password of your data file is needed for upgrading.(for encrypting account names)",
wx.DefaultPosition, [700,120], 0 )
item0.Add( item1, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
item2 = wx.FlexGridSizer( 0, 2, 0, 0 )
item3 = wx.StaticText( self, ID_FILE, "1.0.x Database File", wx.DefaultPosition, wx.DefaultSize, 0 )
item2.Add( item3, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5 )
item4 = wx.FilePickerCtrl( self, ID_FILEPICKER, "", "Choose file", "passwd Manager Data file (*.pmgr)|*.pmgr", wx.DefaultPosition, [300,28], wx.FLP_OPEN|wx.FLP_USE_TEXTCTRL| wx.FLP_FILE_MUST_EXIST )
item2.Add( item4, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
item5 = wx.StaticText( self, ID_PWD, "Master Password", wx.DefaultPosition, wx.DefaultSize, 0 )
item2.Add( item5, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5 )
item6 = wx.TextCtrl( self, ID_TEXTCTRL, "", wx.DefaultPosition, [300,28], wx.TE_PASSWORD )
item2.Add( item6, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
item0.Add( item2, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
item7 = wx.StaticLine( self, ID_LINE, wx.DefaultPosition, [500,20], wx.LI_HORIZONTAL )
item0.Add( item7, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
item8 = wx.StaticText( self, ID_TEXT, "Upgrade log", wx.DefaultPosition, [440,-1], 0 )
item0.Add( item8, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
item9 = wx.TextCtrl( self, ID_TEXTCTRL, "", wx.DefaultPosition, [500,400], wx.TE_MULTILINE|wx.TE_READONLY)
item0.Add( item9, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
item10 = wx.FlexGridSizer( 0, 3, 0, 0 )
item10.Add( placeHolder , 0, wx.ALIGN_CENTER|wx.ALL, 5 )
item11 = wx.Button( self, wx.ID_OK, "Start", wx.DefaultPosition, wx.DefaultSize, 0 )
item12 = wx.Button( self, wx.ID_CLOSE, "Close", wx.DefaultPosition, wx.DefaultSize, 0 )
item10.Add( item11, 0, wx.ALIGN_RIGHT|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )
item10.Add( item12, 0, wx.ALIGN_RIGHT|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )
item0.Add( item10, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.TOP|wx.BOTTOM|wx.SHAPED, 5 )
self.SetSizer( item0 )
item0.SetSizeHints( self )
self.pwd = item6
self.filepicker = item4
self.log = item9
self.okbt= item11
self.Bind(wx.EVT_BUTTON, self.onStart, item11)
self.Bind(wx.EVT_BUTTON, self.exitUpg, item12)
self.upgService = UpgradeService()
def exitUpg(self,event):
exit()
def onStart(self,event):
'''user click upgrade'''
if not self.pwd.GetValue():
showErrorDialog("Master password cannot be empty!")
self.pwd.SetFocus()
elif not self.filepicker.GetPath():
showErrorDialog("please choose the passwd Manager data file in old version.")
self.filepicker.SetFocus()
elif not os.path.isfile(self.filepicker.GetPath()):
showErrorDialog("Seleced data file doesn't exist.")
self.filepicker.SetFocus()
elif not chkRootPwd(self.filepicker.GetPath(), self.pwd.GetValue()):
showErrorDialog("The given root password is not correct for the given data file.")
self.pwd.SetFocus()
else:
#here start the upgrade logic
self.upgService.upgrade(self.filepicker.GetPath(),self.pwd.GetValue(), self.log)
self.okbt.Disable()
class UpApp(wx.App):
''' upgrade application starting point'''
def OnInit(self):
wx.InitAllImageHandlers()
frame = MainFrame()
frame.Show(True)
return True
if __name__ == '__main__':
upApp = UpApp()
upApp.MainLoop()
| gpl-3.0 | -305,031,895,678,248,640 | 33.715953 | 206 | 0.6039 | false | 3.557416 | false | false | false |
jeremiahyan/odoo | addons/mrp/models/mrp_production.py | 1 | 92112 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import datetime
import math
import re
from collections import defaultdict
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.exceptions import AccessError, UserError
from odoo.tools import float_compare, float_round, float_is_zero, format_datetime
from odoo.tools.misc import format_date
from odoo.addons.stock.models.stock_move import PROCUREMENT_PRIORITIES
SIZE_BACK_ORDER_NUMERING = 3
class MrpProduction(models.Model):
""" Manufacturing Orders """
_name = 'mrp.production'
_description = 'Production Order'
_date_name = 'date_planned_start'
_inherit = ['mail.thread', 'mail.activity.mixin']
_order = 'priority desc, date_planned_start asc,id'
@api.model
def _get_default_picking_type(self):
company_id = self.env.context.get('default_company_id', self.env.company.id)
return self.env['stock.picking.type'].search([
('code', '=', 'mrp_operation'),
('warehouse_id.company_id', '=', company_id),
], limit=1).id
@api.model
def _get_default_location_src_id(self):
location = False
company_id = self.env.context.get('default_company_id', self.env.company.id)
if self.env.context.get('default_picking_type_id'):
location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_src_id
if not location:
location = self.env['stock.warehouse'].search([('company_id', '=', company_id)], limit=1).lot_stock_id
return location and location.id or False
@api.model
def _get_default_location_dest_id(self):
location = False
company_id = self.env.context.get('default_company_id', self.env.company.id)
if self._context.get('default_picking_type_id'):
location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_dest_id
if not location:
location = self.env['stock.warehouse'].search([('company_id', '=', company_id)], limit=1).lot_stock_id
return location and location.id or False
@api.model
def _get_default_date_planned_finished(self):
if self.env.context.get('default_date_planned_start'):
return fields.Datetime.to_datetime(self.env.context.get('default_date_planned_start')) + datetime.timedelta(hours=1)
return datetime.datetime.now() + datetime.timedelta(hours=1)
@api.model
def _get_default_date_planned_start(self):
if self.env.context.get('default_date_deadline'):
return fields.Datetime.to_datetime(self.env.context.get('default_date_deadline'))
return datetime.datetime.now()
@api.model
def _get_default_is_locked(self):
return self.user_has_groups('mrp.group_locked_by_default')
name = fields.Char(
'Reference', copy=False, readonly=True, default=lambda x: _('New'))
priority = fields.Selection(
PROCUREMENT_PRIORITIES, string='Priority', default='0',
help="Components will be reserved first for the MO with the highest priorities.")
backorder_sequence = fields.Integer("Backorder Sequence", default=0, copy=False, help="Backorder sequence, if equals to 0 means there is not related backorder")
origin = fields.Char(
'Source', copy=False,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Reference of the document that generated this production order request.")
product_id = fields.Many2one(
'product.product', 'Product',
domain="""[
('type', 'in', ['product', 'consu']),
'|',
('company_id', '=', False),
('company_id', '=', company_id)
]
""",
readonly=True, required=True, check_company=True,
states={'draft': [('readonly', False)]})
product_tracking = fields.Selection(related='product_id.tracking')
product_tmpl_id = fields.Many2one('product.template', 'Product Template', related='product_id.product_tmpl_id')
product_qty = fields.Float(
'Quantity To Produce',
default=1.0, digits='Product Unit of Measure',
readonly=True, required=True, tracking=True,
states={'draft': [('readonly', False)]})
product_uom_id = fields.Many2one(
'uom.uom', 'Product Unit of Measure',
readonly=True, required=True,
states={'draft': [('readonly', False)]}, domain="[('category_id', '=', product_uom_category_id)]")
lot_producing_id = fields.Many2one(
'stock.production.lot', string='Lot/Serial Number', copy=False,
domain="[('product_id', '=', product_id), ('company_id', '=', company_id)]", check_company=True)
qty_producing = fields.Float(string="Quantity Producing", digits='Product Unit of Measure', copy=False)
product_uom_category_id = fields.Many2one(related='product_id.uom_id.category_id')
product_uom_qty = fields.Float(string='Total Quantity', compute='_compute_product_uom_qty', store=True)
picking_type_id = fields.Many2one(
'stock.picking.type', 'Operation Type',
domain="[('code', '=', 'mrp_operation'), ('company_id', '=', company_id)]",
default=_get_default_picking_type, required=True, check_company=True)
use_create_components_lots = fields.Boolean(related='picking_type_id.use_create_components_lots')
location_src_id = fields.Many2one(
'stock.location', 'Components Location',
default=_get_default_location_src_id,
readonly=True, required=True,
domain="[('usage','=','internal'), '|', ('company_id', '=', False), ('company_id', '=', company_id)]",
states={'draft': [('readonly', False)]}, check_company=True,
help="Location where the system will look for components.")
location_dest_id = fields.Many2one(
'stock.location', 'Finished Products Location',
default=_get_default_location_dest_id,
readonly=True, required=True,
domain="[('usage','=','internal'), '|', ('company_id', '=', False), ('company_id', '=', company_id)]",
states={'draft': [('readonly', False)]}, check_company=True,
help="Location where the system will stock the finished products.")
date_planned_start = fields.Datetime(
'Scheduled Date', copy=False, default=_get_default_date_planned_start,
help="Date at which you plan to start the production.",
index=True, required=True)
date_planned_finished = fields.Datetime(
'Scheduled End Date',
default=_get_default_date_planned_finished,
help="Date at which you plan to finish the production.",
copy=False)
date_deadline = fields.Datetime(
'Deadline', copy=False, store=True, readonly=True, compute='_compute_date_deadline', inverse='_set_date_deadline',
help="Informative date allowing to define when the manufacturing order should be processed at the latest to fulfill delivery on time.")
date_start = fields.Datetime('Start Date', copy=False, readonly=True, help="Date of the WO")
date_finished = fields.Datetime('End Date', copy=False, readonly=True, help="Date when the MO has been close")
bom_id = fields.Many2one(
'mrp.bom', 'Bill of Material',
readonly=True, states={'draft': [('readonly', False)]},
domain="""[
'&',
'|',
('company_id', '=', False),
('company_id', '=', company_id),
'&',
'|',
('product_id','=',product_id),
'&',
('product_tmpl_id.product_variant_ids','=',product_id),
('product_id','=',False),
('type', '=', 'normal')]""",
check_company=True,
help="Bill of Materials allow you to define the list of required components to make a finished product.")
state = fields.Selection([
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('progress', 'In Progress'),
('to_close', 'To Close'),
('done', 'Done'),
('cancel', 'Cancelled')], string='State',
compute='_compute_state', copy=False, index=True, readonly=True,
store=True, tracking=True,
help=" * Draft: The MO is not confirmed yet.\n"
" * Confirmed: The MO is confirmed, the stock rules and the reordering of the components are trigerred.\n"
" * In Progress: The production has started (on the MO or on the WO).\n"
" * To Close: The production is done, the MO has to be closed.\n"
" * Done: The MO is closed, the stock moves are posted. \n"
" * Cancelled: The MO has been cancelled, can't be confirmed anymore.")
reservation_state = fields.Selection([
('confirmed', 'Waiting'),
('assigned', 'Ready'),
('waiting', 'Waiting Another Operation')],
string='Material Availability',
compute='_compute_reservation_state', copy=False, index=True, readonly=True,
store=True, tracking=True,
help=" * Ready: The material is available to start the production.\n\
* Waiting: The material is not available to start the production.\n\
The material availability is impacted by the manufacturing readiness\
defined on the BoM.")
move_raw_ids = fields.One2many(
'stock.move', 'raw_material_production_id', 'Components',
copy=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=[('scrapped', '=', False)])
move_finished_ids = fields.One2many(
'stock.move', 'production_id', 'Finished Products',
copy=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=[('scrapped', '=', False)])
move_byproduct_ids = fields.One2many('stock.move', compute='_compute_move_byproduct_ids', inverse='_set_move_byproduct_ids')
finished_move_line_ids = fields.One2many(
'stock.move.line', compute='_compute_lines', inverse='_inverse_lines', string="Finished Product"
)
workorder_ids = fields.One2many(
'mrp.workorder', 'production_id', 'Work Orders', copy=True)
move_dest_ids = fields.One2many('stock.move', 'created_production_id',
string="Stock Movements of Produced Goods")
unreserve_visible = fields.Boolean(
'Allowed to Unreserve Production', compute='_compute_unreserve_visible',
help='Technical field to check when we can unreserve')
reserve_visible = fields.Boolean(
'Allowed to Reserve Production', compute='_compute_unreserve_visible',
help='Technical field to check when we can reserve quantities')
confirm_no_consumption = fields.Boolean(
compute='_compute_components_availability',
help='Technical field used to see if we have to display a warning or not when confirming an order without any components.')
user_id = fields.Many2one(
'res.users', 'Responsible', default=lambda self: self.env.user,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=lambda self: [('groups_id', 'in', self.env.ref('mrp.group_mrp_user').id)])
company_id = fields.Many2one(
'res.company', 'Company', default=lambda self: self.env.company,
index=True, required=True)
qty_produced = fields.Float(compute="_get_produced_qty", string="Quantity Produced")
procurement_group_id = fields.Many2one(
'procurement.group', 'Procurement Group',
copy=False)
product_description_variants = fields.Char('Custom Description')
orderpoint_id = fields.Many2one('stock.warehouse.orderpoint', 'Orderpoint')
propagate_cancel = fields.Boolean(
'Propagate cancel and split',
help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too')
delay_alert_date = fields.Datetime('Delay Alert Date', compute='_compute_delay_alert_date', search='_search_delay_alert_date')
json_popover = fields.Char('JSON data for the popover widget', compute='_compute_json_popover')
scrap_ids = fields.One2many('stock.scrap', 'production_id', 'Scraps')
scrap_count = fields.Integer(compute='_compute_scrap_move_count', string='Scrap Move')
is_locked = fields.Boolean('Is Locked', default=_get_default_is_locked, copy=False)
is_planned = fields.Boolean('Its Operations are Planned', compute="_compute_is_planned", store=True)
show_final_lots = fields.Boolean('Show Final Lots', compute='_compute_show_lots')
production_location_id = fields.Many2one('stock.location', "Production Location", compute="_compute_production_location", store=True)
picking_ids = fields.Many2many('stock.picking', compute='_compute_picking_ids', string='Picking associated to this manufacturing order')
delivery_count = fields.Integer(string='Delivery Orders', compute='_compute_picking_ids')
confirm_cancel = fields.Boolean(compute='_compute_confirm_cancel')
consumption = fields.Selection([
('flexible', 'Allowed'),
('warning', 'Allowed with warning'),
('strict', 'Blocked')],
required=True,
readonly=True,
default='flexible',
)
mrp_production_child_count = fields.Integer("Number of generated MO", compute='_compute_mrp_production_child_count')
mrp_production_source_count = fields.Integer("Number of source MO", compute='_compute_mrp_production_source_count')
mrp_production_backorder_count = fields.Integer("Count of linked backorder", compute='_compute_mrp_production_backorder')
show_lock = fields.Boolean('Show Lock/unlock buttons', compute='_compute_show_lock')
components_availability = fields.Char(
string="Component Availability", compute='_compute_components_availability')
components_availability_state = fields.Selection([
('available', 'Available'),
('expected', 'Expected'),
('late', 'Late')], compute='_compute_components_availability')
show_lot_ids = fields.Boolean('Display the serial number shortcut on the moves', compute='_compute_show_lot_ids')
forecasted_issue = fields.Boolean(compute='_compute_forecasted_issue')
@api.depends('procurement_group_id.stock_move_ids.created_production_id.procurement_group_id.mrp_production_ids')
def _compute_mrp_production_child_count(self):
for production in self:
production.mrp_production_child_count = len(production.procurement_group_id.stock_move_ids.created_production_id.procurement_group_id.mrp_production_ids)
@api.depends('move_dest_ids.group_id.mrp_production_ids')
def _compute_mrp_production_source_count(self):
for production in self:
production.mrp_production_source_count = len(production.procurement_group_id.mrp_production_ids.move_dest_ids.group_id.mrp_production_ids)
@api.depends('procurement_group_id.mrp_production_ids')
def _compute_mrp_production_backorder(self):
for production in self:
production.mrp_production_backorder_count = len(production.procurement_group_id.mrp_production_ids)
@api.depends('move_raw_ids', 'state', 'date_planned_start', 'move_raw_ids.forecast_availability', 'move_raw_ids.forecast_expected_date')
def _compute_components_availability(self):
self.components_availability = False
self.components_availability_state = 'available'
self.confirm_no_consumption = False
productions = self.filtered(lambda mo: mo.state not in ['cancel', 'draft', 'done'])
productions.components_availability = _('Available')
for production in productions:
if not production.move_raw_ids:
production.confirm_no_consumption = True
continue
forecast_date = max(production.move_raw_ids.filtered('forecast_expected_date').mapped('forecast_expected_date'), default=False)
if any(float_compare(move.forecast_availability, move.product_qty, move.product_id.uom_id.rounding) == -1 for move in production.move_raw_ids):
production.components_availability = _('Not Available')
production.components_availability_state = 'late'
elif forecast_date:
production.components_availability = _('Exp %s', format_date(self.env, forecast_date))
production.components_availability_state = 'late' if forecast_date > production.date_planned_start else 'expected'
@api.depends('move_finished_ids.date_deadline')
def _compute_date_deadline(self):
for production in self:
production.date_deadline = min(production.move_finished_ids.filtered('date_deadline').mapped('date_deadline'), default=production.date_deadline or False)
def _set_date_deadline(self):
for production in self:
production.move_finished_ids.date_deadline = production.date_deadline
@api.depends("workorder_ids.date_planned_start", "workorder_ids.date_planned_finished")
def _compute_is_planned(self):
for production in self:
if production.workorder_ids:
production.is_planned = any(wo.date_planned_start and wo.date_planned_finished for wo in production.workorder_ids)
else:
production.is_planned = False
@api.depends('move_raw_ids.delay_alert_date')
def _compute_delay_alert_date(self):
delay_alert_date_data = self.env['stock.move'].read_group([('id', 'in', self.move_raw_ids.ids), ('delay_alert_date', '!=', False)], ['delay_alert_date:max'], 'raw_material_production_id')
delay_alert_date_data = {data['raw_material_production_id'][0]: data['delay_alert_date'] for data in delay_alert_date_data}
for production in self:
production.delay_alert_date = delay_alert_date_data.get(production.id, False)
def _compute_json_popover(self):
for production in self:
production.json_popover = json.dumps({
'popoverTemplate': 'stock.PopoverStockRescheduling',
'delay_alert_date': format_datetime(self.env, production.delay_alert_date, dt_format=False) if production.delay_alert_date else False,
'late_elements': [{
'id': late_document.id,
'name': late_document.display_name,
'model': late_document._name,
} for late_document in production.move_raw_ids.filtered(lambda m: m.delay_alert_date).move_orig_ids._delay_alert_get_documents()
]
})
@api.depends('move_raw_ids.state', 'move_finished_ids.state')
def _compute_confirm_cancel(self):
""" If the manufacturing order contains some done move (via an intermediate
post inventory), the user has to confirm the cancellation.
"""
domain = [
('state', '=', 'done'),
'|',
('production_id', 'in', self.ids),
('raw_material_production_id', 'in', self.ids)
]
res = self.env['stock.move'].read_group(domain, ['state', 'production_id', 'raw_material_production_id'], ['production_id', 'raw_material_production_id'], lazy=False)
productions_with_done_move = {}
for rec in res:
production_record = rec['production_id'] or rec['raw_material_production_id']
if production_record:
productions_with_done_move[production_record[0]] = True
for production in self:
production.confirm_cancel = productions_with_done_move.get(production.id, False)
@api.depends('procurement_group_id')
def _compute_picking_ids(self):
for order in self:
order.picking_ids = self.env['stock.picking'].search([
('group_id', '=', order.procurement_group_id.id), ('group_id', '!=', False),
])
order.delivery_count = len(order.picking_ids)
def action_view_mo_delivery(self):
""" This function returns an action that display picking related to
manufacturing order orders. It can either be a in a list or in a form
view, if there is only one picking to show.
"""
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("stock.action_picking_tree_all")
pickings = self.mapped('picking_ids')
if len(pickings) > 1:
action['domain'] = [('id', 'in', pickings.ids)]
elif pickings:
form_view = [(self.env.ref('stock.view_picking_form').id, 'form')]
if 'views' in action:
action['views'] = form_view + [(state,view) for state,view in action['views'] if view != 'form']
else:
action['views'] = form_view
action['res_id'] = pickings.id
action['context'] = dict(self._context, default_origin=self.name, create=False)
return action
@api.depends('product_uom_id', 'product_qty', 'product_id.uom_id')
def _compute_product_uom_qty(self):
for production in self:
if production.product_id.uom_id != production.product_uom_id:
production.product_uom_qty = production.product_uom_id._compute_quantity(production.product_qty, production.product_id.uom_id)
else:
production.product_uom_qty = production.product_qty
@api.depends('product_id', 'company_id')
def _compute_production_location(self):
if not self.company_id:
return
location_by_company = self.env['stock.location'].read_group([
('company_id', 'in', self.company_id.ids),
('usage', '=', 'production')
], ['company_id', 'ids:array_agg(id)'], ['company_id'])
location_by_company = {lbc['company_id'][0]: lbc['ids'] for lbc in location_by_company}
for production in self:
if production.product_id:
production.production_location_id = production.product_id.with_company(production.company_id).property_stock_production
else:
production.production_location_id = location_by_company.get(production.company_id.id)[0]
@api.depends('product_id.tracking')
def _compute_show_lots(self):
for production in self:
production.show_final_lots = production.product_id.tracking != 'none'
def _inverse_lines(self):
""" Little hack to make sure that when you change something on these objects, it gets saved"""
pass
@api.depends('move_finished_ids.move_line_ids')
def _compute_lines(self):
for production in self:
production.finished_move_line_ids = production.move_finished_ids.mapped('move_line_ids')
@api.depends(
'move_raw_ids.state', 'move_raw_ids.quantity_done', 'move_finished_ids.state',
'workorder_ids', 'workorder_ids.state', 'product_qty', 'qty_producing')
def _compute_state(self):
""" Compute the production state. This uses a similar process to stock
picking, but has been adapted to support having no moves. This adaption
includes some state changes outside of this compute.
There exist 3 extra steps for production:
- progress: At least one item is produced or consumed.
- to_close: The quantity produced is greater than the quantity to
produce and all work orders has been finished.
"""
for production in self:
if not production.state:
production.state = 'draft'
elif production.move_raw_ids and all(move.state == 'cancel' for move in production.move_raw_ids):
production.state = 'cancel'
elif production.state == 'done' or (production.move_raw_ids and all(move.state in ('cancel', 'done') for move in production.move_raw_ids)):
production.state = 'done'
elif production.qty_producing >= production.product_qty:
production.state = 'to_close'
elif any(wo_state in ('progress', 'done') for wo_state in production.workorder_ids.mapped('state')):
production.state = 'progress'
elif not float_is_zero(production.qty_producing, precision_rounding=production.product_uom_id.rounding):
production.state = 'progress'
elif any(not float_is_zero(move.quantity_done, precision_rounding=move.product_uom.rounding or move.product_id.uom_id.rounding) for move in production.move_raw_ids):
production.state = 'progress'
@api.depends('state', 'move_raw_ids.state')
def _compute_reservation_state(self):
for production in self:
# Compute reservation state
# State where the reservation does not matter.
production.reservation_state = False
# Compute reservation state according to its component's moves.
if production.state not in ('draft', 'done', 'cancel'):
relevant_move_state = production.move_raw_ids._get_relevant_state_among_moves()
if relevant_move_state == 'partially_available':
if production.bom_id.operation_ids and production.bom_id.ready_to_produce == 'asap':
production.reservation_state = production._get_ready_to_produce_state()
else:
production.reservation_state = 'confirmed'
elif relevant_move_state != 'draft':
production.reservation_state = relevant_move_state
@api.depends('move_raw_ids', 'state', 'move_raw_ids.product_uom_qty')
def _compute_unreserve_visible(self):
for order in self:
already_reserved = order.state not in ('done', 'cancel') and order.mapped('move_raw_ids.move_line_ids')
any_quantity_done = any(m.quantity_done > 0 for m in order.move_raw_ids)
order.unreserve_visible = not any_quantity_done and already_reserved
order.reserve_visible = order.state in ('confirmed', 'progress', 'to_close') and any(move.product_uom_qty and move.state in ['confirmed', 'partially_available'] for move in order.move_raw_ids)
@api.depends('workorder_ids.state', 'move_finished_ids', 'move_finished_ids.quantity_done')
def _get_produced_qty(self):
for production in self:
done_moves = production.move_finished_ids.filtered(lambda x: x.state != 'cancel' and x.product_id.id == production.product_id.id)
qty_produced = sum(done_moves.mapped('quantity_done'))
production.qty_produced = qty_produced
return True
def _compute_scrap_move_count(self):
data = self.env['stock.scrap'].read_group([('production_id', 'in', self.ids)], ['production_id'], ['production_id'])
count_data = dict((item['production_id'][0], item['production_id_count']) for item in data)
for production in self:
production.scrap_count = count_data.get(production.id, 0)
@api.depends('move_finished_ids')
def _compute_move_byproduct_ids(self):
for order in self:
order.move_byproduct_ids = order.move_finished_ids.filtered(lambda m: m.product_id != order.product_id)
def _set_move_byproduct_ids(self):
move_finished_ids = self.move_finished_ids.filtered(lambda m: m.product_id == self.product_id)
self.move_finished_ids = move_finished_ids | self.move_byproduct_ids
@api.depends('state')
def _compute_show_lock(self):
for order in self:
order.show_lock = self.env.user.has_group('mrp.group_locked_by_default') and order.id is not False and order.state not in {'cancel', 'draft'}
@api.depends('state','move_raw_ids')
def _compute_show_lot_ids(self):
for order in self:
order.show_lot_ids = order.state != 'draft' and any(m.product_id.tracking == 'serial' for m in order.move_raw_ids)
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
('qty_positive', 'check (product_qty > 0)', 'The quantity to produce must be positive!'),
]
@api.depends('product_uom_qty', 'date_planned_start')
def _compute_forecasted_issue(self):
for order in self:
warehouse = order.location_dest_id.warehouse_id
order.forecasted_issue = False
if order.product_id:
virtual_available = order.product_id.with_context(warehouse=warehouse.id, to_date=order.date_planned_start).virtual_available
if order.state == 'draft':
virtual_available += order.product_uom_qty
if virtual_available < 0:
order.forecasted_issue = True
@api.model
def _search_delay_alert_date(self, operator, value):
late_stock_moves = self.env['stock.move'].search([('delay_alert_date', operator, value)])
return ['|', ('move_raw_ids', 'in', late_stock_moves.ids), ('move_finished_ids', 'in', late_stock_moves.ids)]
@api.onchange('company_id')
def _onchange_company_id(self):
if self.company_id:
if self.move_raw_ids:
self.move_raw_ids.update({'company_id': self.company_id})
if self.picking_type_id and self.picking_type_id.company_id != self.company_id:
self.picking_type_id = self.env['stock.picking.type'].search([
('code', '=', 'mrp_operation'),
('warehouse_id.company_id', '=', self.company_id.id),
], limit=1).id
@api.onchange('product_id', 'picking_type_id', 'company_id')
def _onchange_product_id(self):
""" Finds UoM of changed product. """
if not self.product_id:
self.bom_id = False
elif not self.bom_id or self.bom_id.product_tmpl_id != self.product_tmpl_id or (self.bom_id.product_id and self.bom_id.product_id != self.product_id):
bom = self.env['mrp.bom']._bom_find(self.product_id, picking_type=self.picking_type_id, company_id=self.company_id.id, bom_type='normal')[self.product_id]
if bom:
self.bom_id = bom.id
self.product_qty = self.bom_id.product_qty
self.product_uom_id = self.bom_id.product_uom_id.id
else:
self.bom_id = False
self.product_uom_id = self.product_id.uom_id.id
@api.onchange('product_qty', 'product_uom_id')
def _onchange_product_qty(self):
for workorder in self.workorder_ids:
workorder.product_uom_id = self.product_uom_id
if self._origin.product_qty:
workorder.duration_expected = workorder._get_duration_expected(ratio=self.product_qty / self._origin.product_qty)
else:
workorder.duration_expected = workorder._get_duration_expected()
if workorder.date_planned_start and workorder.duration_expected:
workorder.date_planned_finished = workorder.date_planned_start + relativedelta(minutes=workorder.duration_expected)
@api.onchange('bom_id')
def _onchange_bom_id(self):
if not self.product_id and self.bom_id:
self.product_id = self.bom_id.product_id or self.bom_id.product_tmpl_id.product_variant_ids[0]
self.product_qty = self.bom_id.product_qty or 1.0
self.product_uom_id = self.bom_id and self.bom_id.product_uom_id.id or self.product_id.uom_id.id
self.move_raw_ids = [(2, move.id) for move in self.move_raw_ids.filtered(lambda m: m.bom_line_id)]
self.move_finished_ids = [(2, move.id) for move in self.move_finished_ids]
self.picking_type_id = self.bom_id.picking_type_id or self.picking_type_id
@api.onchange('date_planned_start', 'product_id')
def _onchange_date_planned_start(self):
if self.date_planned_start and not self.is_planned:
date_planned_finished = self.date_planned_start + relativedelta(days=self.product_id.produce_delay)
date_planned_finished = date_planned_finished + relativedelta(days=self.company_id.manufacturing_lead)
if date_planned_finished == self.date_planned_start:
date_planned_finished = date_planned_finished + relativedelta(hours=1)
self.date_planned_finished = date_planned_finished
self.move_raw_ids = [(1, m.id, {'date': self.date_planned_start}) for m in self.move_raw_ids]
self.move_finished_ids = [(1, m.id, {'date': date_planned_finished}) for m in self.move_finished_ids]
@api.onchange('bom_id', 'product_id', 'product_qty', 'product_uom_id')
def _onchange_move_raw(self):
if not self.bom_id and not self._origin.product_id:
return
# Clear move raws if we are changing the product. In case of creation (self._origin is empty),
# we need to avoid keeping incorrect lines, so clearing is necessary too.
if self.product_id != self._origin.product_id:
self.move_raw_ids = [(5,)]
if self.bom_id and self.product_qty > 0:
# keep manual entries
list_move_raw = [(4, move.id) for move in self.move_raw_ids.filtered(lambda m: not m.bom_line_id)]
moves_raw_values = self._get_moves_raw_values()
move_raw_dict = {move.bom_line_id.id: move for move in self.move_raw_ids.filtered(lambda m: m.bom_line_id)}
for move_raw_values in moves_raw_values:
if move_raw_values['bom_line_id'] in move_raw_dict:
# update existing entries
list_move_raw += [(1, move_raw_dict[move_raw_values['bom_line_id']].id, move_raw_values)]
else:
# add new entries
list_move_raw += [(0, 0, move_raw_values)]
self.move_raw_ids = list_move_raw
else:
self.move_raw_ids = [(2, move.id) for move in self.move_raw_ids.filtered(lambda m: m.bom_line_id)]
@api.onchange('bom_id', 'product_id', 'product_qty', 'product_uom_id')
def _onchange_move_finished(self):
if self.product_id and self.product_qty > 0:
# keep manual entries
list_move_finished = [(4, move.id) for move in self.move_finished_ids.filtered(
lambda m: not m.byproduct_id and m.product_id != self.product_id)]
moves_finished_values = self._get_moves_finished_values()
moves_byproduct_dict = {move.byproduct_id.id: move for move in self.move_finished_ids.filtered(lambda m: m.byproduct_id)}
move_finished = self.move_finished_ids.filtered(lambda m: m.product_id == self.product_id)
for move_finished_values in moves_finished_values:
if move_finished_values.get('byproduct_id') in moves_byproduct_dict:
# update existing entries
list_move_finished += [(1, moves_byproduct_dict[move_finished_values['byproduct_id']].id, move_finished_values)]
elif move_finished_values.get('product_id') == self.product_id.id and move_finished:
list_move_finished += [(1, move_finished.id, move_finished_values)]
else:
# add new entries
list_move_finished += [(0, 0, move_finished_values)]
self.move_finished_ids = list_move_finished
else:
self.move_finished_ids = [(2, move.id) for move in self.move_finished_ids.filtered(lambda m: m.bom_line_id)]
@api.onchange('location_src_id', 'move_raw_ids', 'bom_id')
def _onchange_location(self):
source_location = self.location_src_id
self.move_raw_ids.update({
'warehouse_id': source_location.warehouse_id.id,
'location_id': source_location.id,
})
@api.onchange('location_dest_id', 'move_finished_ids', 'bom_id')
def _onchange_location_dest(self):
destination_location = self.location_dest_id
update_value_list = []
for move in self.move_finished_ids:
update_value_list += [(1, move.id, ({
'warehouse_id': destination_location.warehouse_id.id,
'location_dest_id': destination_location.id,
}))]
self.move_finished_ids = update_value_list
@api.onchange('picking_type_id')
def _onchange_picking_type(self):
if not self.picking_type_id.default_location_src_id or not self.picking_type_id.default_location_dest_id.id:
company_id = self.company_id.id if (self.company_id and self.company_id in self.env.companies) else self.env.company.id
fallback_loc = self.env['stock.warehouse'].search([('company_id', '=', company_id)], limit=1).lot_stock_id
self.location_src_id = self.picking_type_id.default_location_src_id.id or fallback_loc.id
self.location_dest_id = self.picking_type_id.default_location_dest_id.id or fallback_loc.id
@api.onchange('qty_producing', 'lot_producing_id')
def _onchange_producing(self):
self._set_qty_producing()
@api.onchange('lot_producing_id')
def _onchange_lot_producing(self):
if self.product_id.tracking == 'serial' and self.lot_producing_id:
message, dummy = self.env['stock.quant']._check_serial_number(self.product_id,
self.lot_producing_id,
self.company_id)
if message:
return {'warning': {'title': _('Warning'), 'message': message}}
@api.onchange('bom_id')
def _onchange_workorder_ids(self):
if self.bom_id:
self._create_workorder()
else:
self.workorder_ids = False
def write(self, vals):
if 'workorder_ids' in self:
production_to_replan = self.filtered(lambda p: p.is_planned)
res = super(MrpProduction, self).write(vals)
for production in self:
if 'date_planned_start' in vals and not self.env.context.get('force_date', False):
if production.state in ['done', 'cancel']:
raise UserError(_('You cannot move a manufacturing order once it is cancelled or done.'))
if production.is_planned:
production.button_unplan()
if vals.get('date_planned_start'):
production.move_raw_ids.write({'date': production.date_planned_start, 'date_deadline': production.date_planned_start})
if vals.get('date_planned_finished'):
production.move_finished_ids.write({'date': production.date_planned_finished})
if any(field in ['move_raw_ids', 'move_finished_ids', 'workorder_ids'] for field in vals) and production.state != 'draft':
if production.state == 'done':
# for some reason moves added after state = 'done' won't save group_id, reference if added in
# "stock_move.default_get()"
production.move_raw_ids.filtered(lambda move: move.additional and move.date > production.date_planned_start).write({
'group_id': production.procurement_group_id.id,
'reference': production.name,
'date': production.date_planned_start,
'date_deadline': production.date_planned_start
})
production.move_finished_ids.filtered(lambda move: move.additional and move.date > production.date_planned_finished).write({
'reference': production.name,
'date': production.date_planned_finished,
'date_deadline': production.date_deadline
})
production._autoconfirm_production()
if production in production_to_replan:
production._plan_workorders(replan=True)
if production.state == 'done' and ('lot_producing_id' in vals or 'qty_producing' in vals):
finished_move_lines = production.move_finished_ids.filtered(
lambda move: move.product_id == self.product_id and move.state == 'done').mapped('move_line_ids')
if 'lot_producing_id' in vals:
finished_move_lines.write({'lot_id': vals.get('lot_producing_id')})
if 'qty_producing' in vals:
finished_move_lines.write({'qty_done': vals.get('qty_producing')})
if not production.bom_id.operation_ids and vals.get('date_planned_start') and not vals.get('date_planned_finished'):
new_date_planned_start = fields.Datetime.to_datetime(vals.get('date_planned_start'))
if not production.date_planned_finished or new_date_planned_start >= production.date_planned_finished:
production.date_planned_finished = new_date_planned_start + datetime.timedelta(hours=1)
return res
@api.model
def create(self, values):
# Remove from `move_finished_ids` the by-product moves and then move `move_byproduct_ids`
# into `move_finished_ids` to avoid duplicate and inconsistency.
if values.get('move_finished_ids', False):
values['move_finished_ids'] = list(filter(lambda move: move[2]['byproduct_id'] is False, values['move_finished_ids']))
if values.get('move_byproduct_ids', False):
values['move_finished_ids'] = values.get('move_finished_ids', []) + values['move_byproduct_ids']
del values['move_byproduct_ids']
if not values.get('name', False) or values['name'] == _('New'):
picking_type_id = values.get('picking_type_id') or self._get_default_picking_type()
picking_type_id = self.env['stock.picking.type'].browse(picking_type_id)
if picking_type_id:
values['name'] = picking_type_id.sequence_id.next_by_id()
else:
values['name'] = self.env['ir.sequence'].next_by_code('mrp.production') or _('New')
if not values.get('procurement_group_id'):
procurement_group_vals = self._prepare_procurement_group_vals(values)
values['procurement_group_id'] = self.env["procurement.group"].create(procurement_group_vals).id
production = super(MrpProduction, self).create(values)
(production.move_raw_ids | production.move_finished_ids).write({
'group_id': production.procurement_group_id.id,
'origin': production.name
})
production.move_raw_ids.write({'date': production.date_planned_start})
production.move_finished_ids.write({'date': production.date_planned_finished})
# Trigger move_raw creation when importing a file
if 'import_file' in self.env.context:
production._onchange_move_raw()
production._onchange_move_finished()
return production
@api.ondelete(at_uninstall=False)
def _unlink_except_done(self):
if any(production.state == 'done' for production in self):
raise UserError(_('Cannot delete a manufacturing order in done state.'))
not_cancel = self.filtered(lambda m: m.state != 'cancel')
if not_cancel:
productions_name = ', '.join([prod.display_name for prod in not_cancel])
raise UserError(_('%s cannot be deleted. Try to cancel them before.', productions_name))
def unlink(self):
self.action_cancel()
workorders_to_delete = self.workorder_ids.filtered(lambda wo: wo.state != 'done')
if workorders_to_delete:
workorders_to_delete.unlink()
return super(MrpProduction, self).unlink()
def action_toggle_is_locked(self):
self.ensure_one()
self.is_locked = not self.is_locked
return True
def action_product_forecast_report(self):
self.ensure_one()
action = self.product_id.action_product_forecast_report()
action['context'] = {
'active_id': self.product_id.id,
'active_model': 'product.product',
'move_to_match_ids': self.move_finished_ids.filtered(lambda m: m.product_id == self.product_id).ids
}
warehouse = self.picking_type_id.warehouse_id
if warehouse:
action['context']['warehouse'] = warehouse.id
return action
def _create_workorder(self):
for production in self:
if not production.bom_id:
continue
workorders_values = []
product_qty = production.product_uom_id._compute_quantity(production.product_qty, production.bom_id.product_uom_id)
exploded_boms, dummy = production.bom_id.explode(production.product_id, product_qty / production.bom_id.product_qty, picking_type=production.bom_id.picking_type_id)
for bom, bom_data in exploded_boms:
# If the operations of the parent BoM and phantom BoM are the same, don't recreate work orders.
if not (bom.operation_ids and (not bom_data['parent_line'] or bom_data['parent_line'].bom_id.operation_ids != bom.operation_ids)):
continue
for operation in bom.operation_ids:
workorders_values += [{
'name': operation.name,
'production_id': production.id,
'workcenter_id': operation.workcenter_id.id,
'product_uom_id': production.product_uom_id.id,
'operation_id': operation.id,
'state': 'pending',
}]
production.workorder_ids = [(5, 0)] + [(0, 0, value) for value in workorders_values]
for workorder in production.workorder_ids:
workorder.duration_expected = workorder._get_duration_expected()
def _get_move_finished_values(self, product_id, product_uom_qty, product_uom, operation_id=False, byproduct_id=False):
group_orders = self.procurement_group_id.mrp_production_ids
move_dest_ids = self.move_dest_ids
if len(group_orders) > 1:
move_dest_ids |= group_orders[0].move_finished_ids.filtered(lambda m: m.product_id == self.product_id).move_dest_ids
date_planned_finished = self.date_planned_start + relativedelta(days=self.product_id.produce_delay)
date_planned_finished = date_planned_finished + relativedelta(days=self.company_id.manufacturing_lead)
if date_planned_finished == self.date_planned_start:
date_planned_finished = date_planned_finished + relativedelta(hours=1)
return {
'product_id': product_id,
'product_uom_qty': product_uom_qty,
'product_uom': product_uom,
'operation_id': operation_id,
'byproduct_id': byproduct_id,
'name': self.name,
'date': date_planned_finished,
'date_deadline': self.date_deadline,
'picking_type_id': self.picking_type_id.id,
'location_id': self.product_id.with_company(self.company_id).property_stock_production.id,
'location_dest_id': self.location_dest_id.id,
'company_id': self.company_id.id,
'production_id': self.id,
'warehouse_id': self.location_dest_id.warehouse_id.id,
'origin': self.name,
'group_id': self.procurement_group_id.id,
'propagate_cancel': self.propagate_cancel,
'move_dest_ids': [(4, x.id) for x in move_dest_ids],
}
def _get_moves_finished_values(self):
moves = []
for production in self:
if production.product_id in production.bom_id.byproduct_ids.mapped('product_id'):
raise UserError(_("You cannot have %s as the finished product and in the Byproducts", self.product_id.name))
moves.append(production._get_move_finished_values(production.product_id.id, production.product_qty, production.product_uom_id.id))
for byproduct in production.bom_id.byproduct_ids:
product_uom_factor = production.product_uom_id._compute_quantity(production.product_qty, production.bom_id.product_uom_id)
qty = byproduct.product_qty * (product_uom_factor / production.bom_id.product_qty)
moves.append(production._get_move_finished_values(
byproduct.product_id.id, qty, byproduct.product_uom_id.id,
byproduct.operation_id.id, byproduct.id))
return moves
def _get_moves_raw_values(self):
moves = []
for production in self:
if not production.bom_id:
continue
factor = production.product_uom_id._compute_quantity(production.product_qty, production.bom_id.product_uom_id) / production.bom_id.product_qty
boms, lines = production.bom_id.explode(production.product_id, factor, picking_type=production.bom_id.picking_type_id)
for bom_line, line_data in lines:
if bom_line.child_bom_id and bom_line.child_bom_id.type == 'phantom' or\
bom_line.product_id.type not in ['product', 'consu']:
continue
operation = bom_line.operation_id.id or line_data['parent_line'] and line_data['parent_line'].operation_id.id
moves.append(production._get_move_raw_values(
bom_line.product_id,
line_data['qty'],
bom_line.product_uom_id,
operation,
bom_line
))
return moves
def _get_move_raw_values(self, product_id, product_uom_qty, product_uom, operation_id=False, bom_line=False):
source_location = self.location_src_id
data = {
'sequence': bom_line.sequence if bom_line else 10,
'name': self.name,
'date': self.date_planned_start,
'date_deadline': self.date_planned_start,
'bom_line_id': bom_line.id if bom_line else False,
'picking_type_id': self.picking_type_id.id,
'product_id': product_id.id,
'product_uom_qty': product_uom_qty,
'product_uom': product_uom.id,
'location_id': source_location.id,
'location_dest_id': self.product_id.with_company(self.company_id).property_stock_production.id,
'raw_material_production_id': self.id,
'company_id': self.company_id.id,
'operation_id': operation_id,
'price_unit': product_id.standard_price,
'procure_method': 'make_to_stock',
'origin': self.name,
'state': 'draft',
'warehouse_id': source_location.warehouse_id.id,
'group_id': self.procurement_group_id.id,
'propagate_cancel': self.propagate_cancel,
}
return data
def _set_qty_producing(self):
if self.product_id.tracking == 'serial':
qty_producing_uom = self.product_uom_id._compute_quantity(self.qty_producing, self.product_id.uom_id, rounding_method='HALF-UP')
if qty_producing_uom != 1:
self.qty_producing = self.product_id.uom_id._compute_quantity(1, self.product_uom_id, rounding_method='HALF-UP')
for move in (self.move_raw_ids | self.move_finished_ids.filtered(lambda m: m.product_id != self.product_id)):
if move._should_bypass_set_qty_producing() or not move.product_uom:
continue
new_qty = float_round((self.qty_producing - self.qty_produced) * move.unit_factor, precision_rounding=move.product_uom.rounding)
move.move_line_ids.filtered(lambda ml: ml.state not in ('done', 'cancel')).qty_done = 0
move.move_line_ids = move._set_quantity_done_prepare_vals(new_qty)
def _update_raw_moves(self, factor):
self.ensure_one()
update_info = []
move_to_unlink = self.env['stock.move']
for move in self.move_raw_ids.filtered(lambda m: m.state not in ('done', 'cancel')):
old_qty = move.product_uom_qty
new_qty = old_qty * factor
if new_qty > 0:
move.write({'product_uom_qty': new_qty})
move._action_assign()
update_info.append((move, old_qty, new_qty))
else:
if move.quantity_done > 0:
raise UserError(_('Lines need to be deleted, but can not as you still have some quantities to consume in them. '))
move._action_cancel()
move_to_unlink |= move
move_to_unlink.unlink()
return update_info
def _get_ready_to_produce_state(self):
""" returns 'assigned' if enough components are reserved in order to complete
the first operation of the bom. If not returns 'waiting'
"""
self.ensure_one()
first_operation = self.bom_id.operation_ids[0]
if len(self.bom_id.operation_ids) == 1:
moves_in_first_operation = self.move_raw_ids
else:
moves_in_first_operation = self.move_raw_ids.filtered(lambda move: move.operation_id == first_operation)
moves_in_first_operation = moves_in_first_operation.filtered(
lambda move: move.bom_line_id and
not move.bom_line_id._skip_bom_line(self.product_id)
)
if all(move.state == 'assigned' for move in moves_in_first_operation):
return 'assigned'
return 'confirmed'
def _autoconfirm_production(self):
"""Automatically run `action_confirm` on `self`.
If the production has one of its move was added after the initial call
to `action_confirm`.
"""
moves_to_confirm = self.env['stock.move']
for production in self:
if production.state in ('done', 'cancel'):
continue
additional_moves = production.move_raw_ids.filtered(
lambda move: move.state == 'draft'
)
additional_moves.write({
'group_id': production.procurement_group_id.id,
})
additional_moves._adjust_procure_method()
moves_to_confirm |= additional_moves
additional_byproducts = production.move_finished_ids.filtered(
lambda move: move.state == 'draft'
)
moves_to_confirm |= additional_byproducts
if moves_to_confirm:
moves_to_confirm = moves_to_confirm._action_confirm()
# run scheduler for moves forecasted to not have enough in stock
moves_to_confirm._trigger_scheduler()
self.workorder_ids.filtered(lambda w: w.state not in ['done', 'cancel'])._action_confirm()
def action_view_mrp_production_childs(self):
self.ensure_one()
mrp_production_ids = self.procurement_group_id.stock_move_ids.created_production_id.procurement_group_id.mrp_production_ids.ids
action = {
'res_model': 'mrp.production',
'type': 'ir.actions.act_window',
}
if len(mrp_production_ids) == 1:
action.update({
'view_mode': 'form',
'res_id': mrp_production_ids[0],
})
else:
action.update({
'name': _("%s Child MO's") % self.name,
'domain': [('id', 'in', mrp_production_ids)],
'view_mode': 'tree,form',
})
return action
def action_view_mrp_production_sources(self):
self.ensure_one()
mrp_production_ids = self.procurement_group_id.mrp_production_ids.move_dest_ids.group_id.mrp_production_ids.ids
action = {
'res_model': 'mrp.production',
'type': 'ir.actions.act_window',
}
if len(mrp_production_ids) == 1:
action.update({
'view_mode': 'form',
'res_id': mrp_production_ids[0],
})
else:
action.update({
'name': _("MO Generated by %s") % self.name,
'domain': [('id', 'in', mrp_production_ids)],
'view_mode': 'tree,form',
})
return action
def action_view_mrp_production_backorders(self):
backorder_ids = self.procurement_group_id.mrp_production_ids.ids
return {
'res_model': 'mrp.production',
'type': 'ir.actions.act_window',
'name': _("Backorder MO's"),
'domain': [('id', 'in', backorder_ids)],
'view_mode': 'tree,form',
}
def action_generate_serial(self):
self.ensure_one()
self.lot_producing_id = self.env['stock.production.lot'].create({
'product_id': self.product_id.id,
'company_id': self.company_id.id
})
if self.move_finished_ids.filtered(lambda m: m.product_id == self.product_id).move_line_ids:
self.move_finished_ids.filtered(lambda m: m.product_id == self.product_id).move_line_ids.lot_id = self.lot_producing_id
if self.product_id.tracking == 'serial':
self._set_qty_producing()
def _action_generate_immediate_wizard(self):
view = self.env.ref('mrp.view_immediate_production')
return {
'name': _('Immediate Production?'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mrp.immediate.production',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'context': dict(self.env.context, default_mo_ids=[(4, mo.id) for mo in self]),
}
def action_confirm(self):
self._check_company()
for production in self:
if production.bom_id:
production.consumption = production.bom_id.consumption
# In case of Serial number tracking, force the UoM to the UoM of product
if production.product_tracking == 'serial' and production.product_uom_id != production.product_id.uom_id:
production.write({
'product_qty': production.product_uom_id._compute_quantity(production.product_qty, production.product_id.uom_id),
'product_uom_id': production.product_id.uom_id
})
for move_finish in production.move_finished_ids.filtered(lambda m: m.product_id == production.product_id):
move_finish.write({
'product_uom_qty': move_finish.product_uom._compute_quantity(move_finish.product_uom_qty, move_finish.product_id.uom_id),
'product_uom': move_finish.product_id.uom_id
})
production.move_raw_ids._adjust_procure_method()
(production.move_raw_ids | production.move_finished_ids)._action_confirm()
production.workorder_ids._action_confirm()
# run scheduler for moves forecasted to not have enough in stock
self.move_raw_ids._trigger_scheduler()
self.state = 'confirmed'
return True
def action_assign(self):
for production in self:
production.move_raw_ids._action_assign()
return True
def button_plan(self):
""" Create work orders. And probably do stuff, like things. """
orders_to_plan = self.filtered(lambda order: not order.is_planned)
orders_to_confirm = orders_to_plan.filtered(lambda mo: mo.state == 'draft')
orders_to_confirm.action_confirm()
for order in orders_to_plan:
order._plan_workorders()
return True
def _plan_workorders(self, replan=False):
""" Plan all the production's workorders depending on the workcenters
work schedule.
:param replan: If it is a replan, only ready and pending workorder will be take in account
:type replan: bool.
"""
self.ensure_one()
if not self.workorder_ids:
return
# Schedule all work orders (new ones and those already created)
qty_to_produce = max(self.product_qty - self.qty_produced, 0)
qty_to_produce = self.product_uom_id._compute_quantity(qty_to_produce, self.product_id.uom_id)
start_date = max(self.date_planned_start, datetime.datetime.now())
if replan:
workorder_ids = self.workorder_ids.filtered(lambda wo: wo.state in ['ready', 'pending'])
# We plan the manufacturing order according to its `date_planned_start`, but if
# `date_planned_start` is in the past, we plan it as soon as possible.
workorder_ids.leave_id.unlink()
else:
workorder_ids = self.workorder_ids.filtered(lambda wo: not wo.date_planned_start)
for workorder in workorder_ids:
workcenters = workorder.workcenter_id | workorder.workcenter_id.alternative_workcenter_ids
best_finished_date = datetime.datetime.max
vals = {}
for workcenter in workcenters:
# compute theoretical duration
if workorder.workcenter_id == workcenter:
duration_expected = workorder.duration_expected
else:
duration_expected = workorder._get_duration_expected(alternative_workcenter=workcenter)
from_date, to_date = workcenter._get_first_available_slot(start_date, duration_expected)
# If the workcenter is unavailable, try planning on the next one
if not from_date:
continue
# Check if this workcenter is better than the previous ones
if to_date and to_date < best_finished_date:
best_start_date = from_date
best_finished_date = to_date
best_workcenter = workcenter
vals = {
'workcenter_id': workcenter.id,
'duration_expected': duration_expected,
}
# If none of the workcenter are available, raise
if best_finished_date == datetime.datetime.max:
raise UserError(_('Impossible to plan the workorder. Please check the workcenter availabilities.'))
# Instantiate start_date for the next workorder planning
if workorder.next_work_order_id:
start_date = best_finished_date
# Create leave on chosen workcenter calendar
leave = self.env['resource.calendar.leaves'].create({
'name': workorder.display_name,
'calendar_id': best_workcenter.resource_calendar_id.id,
'date_from': best_start_date,
'date_to': best_finished_date,
'resource_id': best_workcenter.resource_id.id,
'time_type': 'other'
})
vals['leave_id'] = leave.id
workorder.write(vals)
self.with_context(force_date=True).write({
'date_planned_start': self.workorder_ids[0].date_planned_start,
'date_planned_finished': self.workorder_ids[-1].date_planned_finished
})
def button_unplan(self):
if any(wo.state == 'done' for wo in self.workorder_ids):
raise UserError(_("Some work orders are already done, you cannot unplan this manufacturing order."))
elif any(wo.state == 'progress' for wo in self.workorder_ids):
raise UserError(_("Some work orders have already started, you cannot unplan this manufacturing order."))
self.workorder_ids.leave_id.unlink()
self.workorder_ids.write({
'date_planned_start': False,
'date_planned_finished': False,
})
def _get_consumption_issues(self):
"""Compare the quantity consumed of the components, the expected quantity
on the BoM and the consumption parameter on the order.
:return: list of tuples (order_id, product_id, consumed_qty, expected_qty) where the
consumption isn't honored. order_id and product_id are recordset of mrp.production
and product.product respectively
:rtype: list
"""
issues = []
if self.env.context.get('skip_consumption', False) or self.env.context.get('skip_immediate', False):
return issues
for order in self:
if order.consumption == 'flexible' or not order.bom_id or not order.bom_id.bom_line_ids:
continue
expected_move_values = order._get_moves_raw_values()
expected_qty_by_product = defaultdict(float)
for move_values in expected_move_values:
move_product = self.env['product.product'].browse(move_values['product_id'])
move_uom = self.env['uom.uom'].browse(move_values['product_uom'])
move_product_qty = move_uom._compute_quantity(move_values['product_uom_qty'], move_product.uom_id)
expected_qty_by_product[move_product] += move_product_qty * order.qty_producing / order.product_qty
done_qty_by_product = defaultdict(float)
for move in order.move_raw_ids:
qty_done = move.product_uom._compute_quantity(move.quantity_done, move.product_id.uom_id)
rounding = move.product_id.uom_id.rounding
if not (move.product_id in expected_qty_by_product or float_is_zero(qty_done, precision_rounding=rounding)):
issues.append((order, move.product_id, qty_done, 0.0))
continue
done_qty_by_product[move.product_id] += qty_done
for product, qty_to_consume in expected_qty_by_product.items():
qty_done = done_qty_by_product.get(product, 0.0)
if float_compare(qty_to_consume, qty_done, precision_rounding=product.uom_id.rounding) != 0:
issues.append((order, product, qty_done, qty_to_consume))
return issues
def _action_generate_consumption_wizard(self, consumption_issues):
ctx = self.env.context.copy()
lines = []
for order, product_id, consumed_qty, expected_qty in consumption_issues:
lines.append((0, 0, {
'mrp_production_id': order.id,
'product_id': product_id.id,
'consumption': order.consumption,
'product_uom_id': product_id.uom_id.id,
'product_consumed_qty_uom': consumed_qty,
'product_expected_qty_uom': expected_qty
}))
ctx.update({'default_mrp_production_ids': self.ids, 'default_mrp_consumption_warning_line_ids': lines})
action = self.env["ir.actions.actions"]._for_xml_id("mrp.action_mrp_consumption_warning")
action['context'] = ctx
return action
def _get_quantity_produced_issues(self):
quantity_issues = []
if self.env.context.get('skip_backorder', False):
return quantity_issues
for order in self:
if not float_is_zero(order._get_quantity_to_backorder(), precision_rounding=order.product_uom_id.rounding):
quantity_issues.append(order)
return quantity_issues
def _action_generate_backorder_wizard(self, quantity_issues):
ctx = self.env.context.copy()
lines = []
for order in quantity_issues:
lines.append((0, 0, {
'mrp_production_id': order.id,
'to_backorder': True
}))
ctx.update({'default_mrp_production_ids': self.ids, 'default_mrp_production_backorder_line_ids': lines})
action = self.env["ir.actions.actions"]._for_xml_id("mrp.action_mrp_production_backorder")
action['context'] = ctx
return action
def action_cancel(self):
""" Cancels production order, unfinished stock moves and set procurement
orders in exception """
if not self.move_raw_ids:
self.state = 'cancel'
return True
self._action_cancel()
return True
def _action_cancel(self):
documents_by_production = {}
for production in self:
documents = defaultdict(list)
for move_raw_id in self.move_raw_ids.filtered(lambda m: m.state not in ('done', 'cancel')):
iterate_key = self._get_document_iterate_key(move_raw_id)
if iterate_key:
document = self.env['stock.picking']._log_activity_get_documents({move_raw_id: (move_raw_id.product_uom_qty, 0)}, iterate_key, 'UP')
for key, value in document.items():
documents[key] += [value]
if documents:
documents_by_production[production] = documents
# log an activity on Parent MO if child MO is cancelled.
finish_moves = production.move_finished_ids.filtered(lambda x: x.state not in ('done', 'cancel'))
if finish_moves:
production._log_downside_manufactured_quantity({finish_move: (production.product_uom_qty, 0.0) for finish_move in finish_moves}, cancel=True)
self.workorder_ids.filtered(lambda x: x.state not in ['done', 'cancel']).action_cancel()
finish_moves = self.move_finished_ids.filtered(lambda x: x.state not in ('done', 'cancel'))
raw_moves = self.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel'))
(finish_moves | raw_moves)._action_cancel()
picking_ids = self.picking_ids.filtered(lambda x: x.state not in ('done', 'cancel'))
picking_ids.action_cancel()
for production, documents in documents_by_production.items():
filtered_documents = {}
for (parent, responsible), rendering_context in documents.items():
if not parent or parent._name == 'stock.picking' and parent.state == 'cancel' or parent == production:
continue
filtered_documents[(parent, responsible)] = rendering_context
production._log_manufacture_exception(filtered_documents, cancel=True)
# In case of a flexible BOM, we don't know from the state of the moves if the MO should
# remain in progress or done. Indeed, if all moves are done/cancel but the quantity produced
# is lower than expected, it might mean:
# - we have used all components but we still want to produce the quantity expected
# - we have used all components and we won't be able to produce the last units
#
# However, if the user clicks on 'Cancel', it is expected that the MO is either done or
# canceled. If the MO is still in progress at this point, it means that the move raws
# are either all done or a mix of done / canceled => the MO should be done.
self.filtered(lambda p: p.state not in ['done', 'cancel'] and p.bom_id.consumption == 'flexible').write({'state': 'done'})
return True
def _get_document_iterate_key(self, move_raw_id):
return move_raw_id.move_orig_ids and 'move_orig_ids' or False
def _cal_price(self, consumed_moves):
self.ensure_one()
return True
def _post_inventory(self, cancel_backorder=False):
for order in self:
moves_not_to_do = order.move_raw_ids.filtered(lambda x: x.state == 'done')
moves_to_do = order.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel'))
for move in moves_to_do.filtered(lambda m: m.product_qty == 0.0 and m.quantity_done > 0):
move.product_uom_qty = move.quantity_done
# MRP do not merge move, catch the result of _action_done in order
# to get extra moves.
moves_to_do = moves_to_do._action_done(cancel_backorder=cancel_backorder)
moves_to_do = order.move_raw_ids.filtered(lambda x: x.state == 'done') - moves_not_to_do
finish_moves = order.move_finished_ids.filtered(lambda m: m.product_id == order.product_id and m.state not in ('done', 'cancel'))
# the finish move can already be completed by the workorder.
if not finish_moves.quantity_done:
finish_moves.quantity_done = float_round(order.qty_producing - order.qty_produced, precision_rounding=order.product_uom_id.rounding, rounding_method='HALF-UP')
finish_moves.move_line_ids.lot_id = order.lot_producing_id
order._cal_price(moves_to_do)
moves_to_finish = order.move_finished_ids.filtered(lambda x: x.state not in ('done', 'cancel'))
moves_to_finish = moves_to_finish._action_done(cancel_backorder=cancel_backorder)
order.action_assign()
consume_move_lines = moves_to_do.mapped('move_line_ids')
order.move_finished_ids.move_line_ids.consume_line_ids = [(6, 0, consume_move_lines.ids)]
return True
@api.model
def _get_name_backorder(self, name, sequence):
if not sequence:
return name
seq_back = "-" + "0" * (SIZE_BACK_ORDER_NUMERING - 1 - int(math.log10(sequence))) + str(sequence)
if re.search("-\\d{%d}$" % SIZE_BACK_ORDER_NUMERING, name):
return name[:-SIZE_BACK_ORDER_NUMERING-1] + seq_back
return name + seq_back
def _get_backorder_mo_vals(self):
self.ensure_one()
next_seq = max(self.procurement_group_id.mrp_production_ids.mapped("backorder_sequence"))
return {
'name': self._get_name_backorder(self.name, next_seq + 1),
'backorder_sequence': next_seq + 1,
'procurement_group_id': self.procurement_group_id.id,
'move_raw_ids': None,
'move_finished_ids': None,
'product_qty': self._get_quantity_to_backorder(),
'lot_producing_id': False,
'origin': self.origin
}
def _generate_backorder_productions(self, close_mo=True):
backorders = self.env['mrp.production']
for production in self:
if production.backorder_sequence == 0: # Activate backorder naming
production.backorder_sequence = 1
backorder_mo = production.copy(default=production._get_backorder_mo_vals())
if close_mo:
production.move_raw_ids.filtered(lambda m: m.state not in ('done', 'cancel')).write({
'raw_material_production_id': backorder_mo.id,
})
production.move_finished_ids.filtered(lambda m: m.state not in ('done', 'cancel')).write({
'production_id': backorder_mo.id,
})
else:
new_moves_vals = []
for move in production.move_raw_ids | production.move_finished_ids:
if not move.additional:
qty_to_split = move.product_uom_qty - move.unit_factor * production.qty_producing
qty_to_split = move.product_uom._compute_quantity(qty_to_split, move.product_id.uom_id, rounding_method='HALF-UP')
move_vals = move._split(qty_to_split)
if not move_vals:
continue
if move.raw_material_production_id:
move_vals[0]['raw_material_production_id'] = backorder_mo.id
else:
move_vals[0]['production_id'] = backorder_mo.id
new_moves_vals.append(move_vals[0])
new_moves = self.env['stock.move'].create(new_moves_vals)
backorders |= backorder_mo
for old_wo, wo in zip(production.workorder_ids, backorder_mo.workorder_ids):
wo.qty_produced = max(old_wo.qty_produced - old_wo.qty_producing, 0)
if wo.product_tracking == 'serial':
wo.qty_producing = 1
else:
wo.qty_producing = wo.qty_remaining
production.name = self._get_name_backorder(production.name, production.backorder_sequence)
# We need to adapt `duration_expected` on both the original workorders and their
# backordered workorders. To do that, we use the original `duration_expected` and the
# ratio of the quantity really produced and the quantity to produce.
ratio = production.qty_producing / production.product_qty
for workorder in production.workorder_ids:
workorder.duration_expected = workorder.duration_expected * ratio
for workorder in backorder_mo.workorder_ids:
workorder.duration_expected = workorder.duration_expected * (1 - ratio)
# As we have split the moves before validating them, we need to 'remove' the excess reservation
if not close_mo:
self.move_raw_ids.filtered(lambda m: not m.additional)._do_unreserve()
self.move_raw_ids.filtered(lambda m: not m.additional)._action_assign()
backorders.action_confirm()
backorders.action_assign()
# Remove the serial move line without reserved quantity. Post inventory will assigned all the non done moves
# So those move lines are duplicated.
backorders.move_raw_ids.move_line_ids.filtered(lambda ml: ml.product_id.tracking == 'serial' and ml.product_qty == 0).unlink()
backorders.move_raw_ids._recompute_state()
return backorders
def button_mark_done(self):
self._button_mark_done_sanity_checks()
if not self.env.context.get('button_mark_done_production_ids'):
self = self.with_context(button_mark_done_production_ids=self.ids)
res = self._pre_button_mark_done()
if res is not True:
return res
if self.env.context.get('mo_ids_to_backorder'):
productions_to_backorder = self.browse(self.env.context['mo_ids_to_backorder'])
productions_not_to_backorder = self - productions_to_backorder
close_mo = False
else:
productions_not_to_backorder = self
productions_to_backorder = self.env['mrp.production']
close_mo = True
self.workorder_ids.button_finish()
backorders = productions_to_backorder._generate_backorder_productions(close_mo=close_mo)
productions_not_to_backorder._post_inventory(cancel_backorder=True)
productions_to_backorder._post_inventory(cancel_backorder=False)
# if completed products make other confirmed/partially_available moves available, assign them
done_move_finished_ids = (productions_to_backorder.move_finished_ids | productions_not_to_backorder.move_finished_ids).filtered(lambda m: m.state == 'done')
done_move_finished_ids._trigger_assign()
# Moves without quantity done are not posted => set them as done instead of canceling. In
# case the user edits the MO later on and sets some consumed quantity on those, we do not
# want the move lines to be canceled.
(productions_not_to_backorder.move_raw_ids | productions_not_to_backorder.move_finished_ids).filtered(lambda x: x.state not in ('done', 'cancel')).write({
'state': 'done',
'product_uom_qty': 0.0,
})
for production in self:
production.write({
'date_finished': fields.Datetime.now(),
'product_qty': production.qty_produced,
'priority': '0',
'is_locked': True,
'state': 'done',
})
for workorder in self.workorder_ids.filtered(lambda w: w.state not in ('done', 'cancel')):
workorder.duration_expected = workorder._get_duration_expected()
if not backorders:
if self.env.context.get('from_workorder'):
return {
'type': 'ir.actions.act_window',
'res_model': 'mrp.production',
'views': [[self.env.ref('mrp.mrp_production_form_view').id, 'form']],
'res_id': self.id,
'target': 'main',
}
return True
context = self.env.context.copy()
context = {k: v for k, v in context.items() if not k.startswith('default_')}
for k, v in context.items():
if k.startswith('skip_'):
context[k] = False
action = {
'res_model': 'mrp.production',
'type': 'ir.actions.act_window',
'context': dict(context, mo_ids_to_backorder=None)
}
if len(backorders) == 1:
action.update({
'view_mode': 'form',
'res_id': backorders[0].id,
})
else:
action.update({
'name': _("Backorder MO"),
'domain': [('id', 'in', backorders.ids)],
'view_mode': 'tree,form',
})
return action
def _pre_button_mark_done(self):
productions_to_immediate = self._check_immediate()
if productions_to_immediate:
return productions_to_immediate._action_generate_immediate_wizard()
for production in self:
if float_is_zero(production.qty_producing, precision_rounding=production.product_uom_id.rounding):
raise UserError(_('The quantity to produce must be positive!'))
if not any(production.move_raw_ids.mapped('quantity_done')):
raise UserError(_("You must indicate a non-zero amount consumed for at least one of your components"))
consumption_issues = self._get_consumption_issues()
if consumption_issues:
return self._action_generate_consumption_wizard(consumption_issues)
quantity_issues = self._get_quantity_produced_issues()
if quantity_issues:
return self._action_generate_backorder_wizard(quantity_issues)
return True
def _button_mark_done_sanity_checks(self):
self._check_company()
for order in self:
order._check_sn_uniqueness()
def do_unreserve(self):
self.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel'))._do_unreserve()
def button_scrap(self):
self.ensure_one()
return {
'name': _('Scrap'),
'view_mode': 'form',
'res_model': 'stock.scrap',
'view_id': self.env.ref('stock.stock_scrap_form_view2').id,
'type': 'ir.actions.act_window',
'context': {'default_production_id': self.id,
'product_ids': (self.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) | self.move_finished_ids.filtered(lambda x: x.state == 'done')).mapped('product_id').ids,
'default_company_id': self.company_id.id
},
'target': 'new',
}
def action_see_move_scrap(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("stock.action_stock_scrap")
action['domain'] = [('production_id', '=', self.id)]
action['context'] = dict(self._context, default_origin=self.name)
return action
@api.model
def get_empty_list_help(self, help):
self = self.with_context(
empty_list_help_document_name=_("manufacturing order"),
)
return super(MrpProduction, self).get_empty_list_help(help)
def _log_downside_manufactured_quantity(self, moves_modification, cancel=False):
def _keys_in_sorted(move):
""" sort by picking and the responsible for the product the
move.
"""
return (move.picking_id.id, move.product_id.responsible_id.id)
def _keys_in_groupby(move):
""" group by picking and the responsible for the product the
move.
"""
return (move.picking_id, move.product_id.responsible_id)
def _render_note_exception_quantity_mo(rendering_context):
values = {
'production_order': self,
'order_exceptions': rendering_context,
'impacted_pickings': False,
'cancel': cancel
}
return self.env.ref('mrp.exception_on_mo')._render(values=values)
documents = self.env['stock.picking']._log_activity_get_documents(moves_modification, 'move_dest_ids', 'DOWN', _keys_in_sorted, _keys_in_groupby)
documents = self.env['stock.picking']._less_quantities_than_expected_add_documents(moves_modification, documents)
self.env['stock.picking']._log_activity(_render_note_exception_quantity_mo, documents)
def _log_manufacture_exception(self, documents, cancel=False):
def _render_note_exception_quantity_mo(rendering_context):
visited_objects = []
order_exceptions = {}
for exception in rendering_context:
order_exception, visited = exception
order_exceptions.update(order_exception)
visited_objects += visited
visited_objects = self.env[visited_objects[0]._name].concat(*visited_objects)
impacted_object = []
if visited_objects and visited_objects._name == 'stock.move':
visited_objects |= visited_objects.mapped('move_orig_ids')
impacted_object = visited_objects.filtered(lambda m: m.state not in ('done', 'cancel')).mapped('picking_id')
values = {
'production_order': self,
'order_exceptions': order_exceptions,
'impacted_object': impacted_object,
'cancel': cancel
}
return self.env.ref('mrp.exception_on_mo')._render(values=values)
self.env['stock.picking']._log_activity(_render_note_exception_quantity_mo, documents)
def button_unbuild(self):
self.ensure_one()
return {
'name': _('Unbuild: %s', self.product_id.display_name),
'view_mode': 'form',
'res_model': 'mrp.unbuild',
'view_id': self.env.ref('mrp.mrp_unbuild_form_view_simplified').id,
'type': 'ir.actions.act_window',
'context': {'default_product_id': self.product_id.id,
'default_mo_id': self.id,
'default_company_id': self.company_id.id,
'default_location_id': self.location_dest_id.id,
'default_location_dest_id': self.location_src_id.id,
'create': False, 'edit': False},
'target': 'new',
}
@api.model
def _prepare_procurement_group_vals(self, values):
return {'name': values['name']}
def _get_quantity_to_backorder(self):
self.ensure_one()
return max(self.product_qty - self.qty_producing, 0)
def _check_sn_uniqueness(self):
""" Alert the user if the serial number as already been consumed/produced """
if self.product_tracking == 'serial' and self.lot_producing_id:
sml = self.env['stock.move.line'].search_count([
('lot_id', '=', self.lot_producing_id.id),
('location_id.usage', '=', 'production'),
('qty_done', '=', 1),
('state', '=', 'done')
])
if sml:
raise UserError(_('This serial number for product %s has already been produced', self.product_id.name))
for move in self.move_finished_ids:
if move.has_tracking != 'serial' or move.product_id == self.product_id:
continue
for move_line in move.move_line_ids:
domain = [
('lot_id', '=', move_line.lot_id.id),
('qty_done', '=', 1),
('state', '=', 'done')
]
message = _('The serial number %(number)s used for byproduct %(product_name)s has already been produced',
number=move_line.lot_id.name,
product_name=move_line.product_id.name)
co_prod_move_lines = self.move_finished_ids.move_line_ids.filtered(lambda ml: ml.product_id != self.product_id)
domain_unbuild = domain + [
('production_id', '=', False),
('location_dest_id.usage', '=', 'production')
]
# Check presence of same sn in previous productions
duplicates = self.env['stock.move.line'].search_count(domain + [
('location_id.usage', '=', 'production')
])
if duplicates:
# Maybe some move lines have been compensated by unbuild
duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [
('move_id.unbuild_id', '!=', False)
])
if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):
raise UserError(message)
# Check presence of same sn in current production
duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line
if duplicates:
raise UserError(message)
for move in self.move_raw_ids:
if move.has_tracking != 'serial':
continue
for move_line in move.move_line_ids:
if float_is_zero(move_line.qty_done, precision_rounding=move_line.product_uom_id.rounding):
continue
domain = [
('lot_id', '=', move_line.lot_id.id),
('qty_done', '=', 1),
('state', '=', 'done')
]
message = _('The serial number %(number)s used for component %(component)s has already been consumed',
number=move_line.lot_id.name,
component=move_line.product_id.name)
co_prod_move_lines = self.move_raw_ids.move_line_ids
domain_unbuild = domain + [
('production_id', '=', False),
('location_id.usage', '=', 'production')
]
# Check presence of same sn in previous productions
duplicates = self.env['stock.move.line'].search_count(domain + [
('location_dest_id.usage', '=', 'production')
])
if duplicates:
# Maybe some move lines have been compensated by unbuild
duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [
('move_id.unbuild_id', '!=', False)
])
if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):
raise UserError(message)
# Check presence of same sn in current production
duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line
if duplicates:
raise UserError(message)
def _check_immediate(self):
immediate_productions = self.browse()
if self.env.context.get('skip_immediate'):
return immediate_productions
pd = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for production in self:
if all(float_is_zero(ml.qty_done, precision_digits=pd) for
ml in production.move_raw_ids.move_line_ids.filtered(lambda m: m.state not in ('done', 'cancel'))
) and float_is_zero(production.qty_producing, precision_digits=pd):
immediate_productions |= production
return immediate_productions
| gpl-3.0 | -4,215,170,603,825,880,000 | 51.695652 | 204 | 0.603059 | false | 3.928855 | false | false | false |
Zolomon/pos-hidden-markov | noise_channel.py | 1 | 4279 | from corpus import Corpus
import processing
__author__ = 'bengt'
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('train', help='Path to training corpus.')
parser.add_argument('corpus', help='Path to corpus.')
parser.add_argument('n', help='Tag sentences shorter than this length.')
args = parser.parse_args()
train_corpus = Corpus(args.train)
corpus = Corpus(args.corpus)
n = int(args.n)
pos_frequencies = processing.pos_frequencies(train_corpus)
poses_for_word_from_train, total_pos_count = processing.calculate_poses_for_word(train_corpus)
pos_bigram_probabilities_train = processing.calculate_pos_bigram_probabilities(train_corpus)
word_pos_probabilities_train = processing.calculate_word_pos_probabilities(train_corpus)
sentences = [sentence for sentence in corpus.get_sentences() if len(sentence) < n]
WORD_GIVEN_POS = 0
POS_GIVEN_PREVPOS = 1
for sentence in sentences:
prev_pos = '<s>'
columns = {}
current_sentence = []
for word in sentence:
id, form, lemma, plemma, pos, ppos = word
current_sentence.append([id, form, lemma, plemma, pos])
columns[id] = {}
if form in poses_for_word_from_train:
for (pos_for_word, pos_for_word_count) in poses_for_word_from_train[form].items():
p_word_given_pos = word_pos_probabilities_train['{0} {1}'.format(form, pos_for_word)]
pos_bigram = '{0} {1}'.format(prev_pos, pos_for_word)
if pos_bigram in pos_bigram_probabilities_train:
p_pos_given_prevpos = pos_bigram_probabilities_train[pos_bigram]
else:
p_pos_given_prevpos = 0.00001 # Low chance that this is what we want
columns[id][pos_for_word] = {}
columns[id][pos_for_word][WORD_GIVEN_POS] = p_word_given_pos
columns[id][pos_for_word][POS_GIVEN_PREVPOS] = p_pos_given_prevpos
else:
most_common_pos = max(pos_frequencies.items(), key=lambda x: x[1])
if form in word_pos_probabilities_train:
p_word_given_pos = word_pos_probabilities_train['{0} {1}'.format(form, most_common_pos[0])]
else:
p_word_given_pos = 0.00001 # Low chance that this is what we want
p_pos_given_prevpos = pos_bigram_probabilities_train['{0} {1}'.format(prev_pos, most_common_pos[0])]
columns[id][most_common_pos[0]] = {}
columns[id][most_common_pos[0]][WORD_GIVEN_POS] = p_word_given_pos
columns[id][most_common_pos[0]][POS_GIVEN_PREVPOS] = p_pos_given_prevpos
prev_pos = pos
path = {}
trellis = {}
for (column_id, poses) in sorted(columns.items(), key=lambda x: int(x[0])):
column_id = int(column_id)
trellis[column_id] = {}
for (current_pos, data) in poses.items():
current_word_given_pos = data[WORD_GIVEN_POS]
current_pos_given_prevpos = data[POS_GIVEN_PREVPOS]
if column_id == 0:
break
elif column_id == 1:
trellis[column_id][current_pos] = current_word_given_pos * current_pos_given_prevpos
else:
max_prev_column = max([(id, data * current_pos_given_prevpos) for id, data in
trellis[column_id - 1].items()
], key=lambda x: x[1])
p = max_prev_column[1] * current_word_given_pos
trellis[column_id][current_pos] = p
if column_id == 0:
continue
else:
path[column_id] = (max(trellis[column_id].items(), key=lambda x: x[1])[0])
for (id, predicted) in sorted(path.items(), key=lambda x: x[0]):
if id == 1:
print()
id, form, lemma, plemma, pos = current_sentence[id]
print('{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(id, form, lemma, plemma, pos, predicted))
if __name__ == '__main__':
main() | mit | 6,255,098,241,318,718,000 | 41.376238 | 116 | 0.550362 | false | 3.553987 | false | false | false |
icea-dev/atn-sim-ng | configs/core/home/myservices/cyber_attack.py | 1 | 2197 | #
# CORE
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
''' Sample user-defined service.
'''
import os
from core.service import CoreService, addservice
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
class CyberAttack(CoreService):
''' This is a sample user-defined service.
'''
# a unique name is required, without spaces
_name = "CyberAttack"
# you can create your own group here
_group = "Security"
# list of other services this service depends on
_depends = ()
# per-node directories
_dirs = ()
# generated files (without a full path this file goes in the node's dir,
# e.g. /tmp/pycore.12345/n1.conf/)
_configs = ('cyberattack.sh', )
# this controls the starting order vs other enabled services
_startindex = 50
# list of startup commands, also may be generated during startup
_startup = ('sh cyberattack.sh',)
# list of shutdown commands
_shutdown = ('pkill python',)
@classmethod
def generateconfig(cls, node, filename, services):
''' Return a string that will be written to filename, or sent to the
GUI for user customization.
'''
# get lat/lng position
l_lat, l_lng, l_alt = node.session.location.getgeo(node.position.x, node.position.y, node.position.z)
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by CyberAttack (cyber_attack.py)\n"
cfg += "sleep 15\n"
#cfg += "python -m atn.surveillance.adsb.security.adsb_ghost --rewrite-icao24 --flood\n"
cfg += "python -m atn.surveillance.adsb.security.cyber_attack {} {} {} {} \n".format(int(node.objid), l_lat, l_lng, l_alt)
return cfg
@staticmethod
def subnetentry(x):
''' Generate a subnet declaration block given an IPv4 prefix string
for inclusion in the config file.
'''
if x.find(":") >= 0:
# this is an IPv6 address
return ""
else:
net = IPv4Prefix(x)
return 'echo " network %s"' % (net)
# this line is required to add the above class to the list of available services
addservice(CyberAttack)
| gpl-3.0 | 3,616,359,763,887,543,000 | 32.8 | 130 | 0.631771 | false | 3.673913 | false | false | false |
jlev/ballistic-missile-range | plot.py | 1 | 68191 | #-----------------------------------------------------------------------------
# Name: wx.lib.plot.py
# Purpose: Line, Bar and Scatter Graphs
#
# Author: Gordon Williams
#
# Created: 2003/11/03
# RCS-ID: $Id: plot.py,v 1.13 2005/05/09 19:59:34 RD Exp $
# Copyright: (c) 2002
# Licence: Use as you wish.
#-----------------------------------------------------------------------------
# 12/15/2003 - Jeff Grimmett ([email protected])
#
# o 2.5 compatability update.
# o Renamed to plot.py in the wx.lib directory.
# o Reworked test frame to work with wx demo framework. This saves a bit
# of tedious cut and paste, and the test app is excellent.
#
# 12/18/2003 - Jeff Grimmett ([email protected])
#
# o wxScrolledMessageDialog -> ScrolledMessageDialog
#
# Oct 6, 2004 Gordon Williams ([email protected])
# - Added bar graph demo
# - Modified line end shape from round to square.
# - Removed FloatDCWrapper for conversion to ints and ints in arguments
#
# Oct 15, 2004 Gordon Williams ([email protected])
# - Imported modules given leading underscore to name.
# - Added Cursor Line Tracking and User Point Labels.
# - Demo for Cursor Line Tracking and Point Labels.
# - Size of plot preview frame adjusted to show page better.
# - Added helper functions PositionUserToScreen and PositionScreenToUser in PlotCanvas.
# - Added functions GetClosestPoints (all curves) and GetClosestPoint (only closest curve)
# can be in either user coords or screen coords.
#
#
"""
This is a simple light weight plotting module that can be used with
Boa or easily integrated into your own wxPython application. The
emphasis is on small size and fast plotting for large data sets. It
has a reasonable number of features to do line and scatter graphs
easily as well as simple bar graphs. It is not as sophisticated or
as powerful as SciPy Plt or Chaco. Both of these are great packages
but consume huge amounts of computer resources for simple plots.
They can be found at http://scipy.com
This file contains two parts; first the re-usable library stuff, then,
after a "if __name__=='__main__'" test, a simple frame and a few default
plots for examples and testing.
Based on wxPlotCanvas
Written by K.Hinsen, R. Srinivasan;
Ported to wxPython Harm van der Heijden, feb 1999
Major Additions Gordon Williams Feb. 2003 ([email protected])
-More style options
-Zooming using mouse "rubber band"
-Scroll left, right
-Grid(graticule)
-Printing, preview, and page set up (margins)
-Axis and title labels
-Cursor xy axis values
-Doc strings and lots of comments
-Optimizations for large number of points
-Legends
Did a lot of work here to speed markers up. Only a factor of 4
improvement though. Lines are much faster than markers, especially
filled markers. Stay away from circles and triangles unless you
only have a few thousand points.
Times for 25,000 points
Line - 0.078 sec
Markers
Square - 0.22 sec
dot - 0.10
circle - 0.87
cross,plus - 0.28
triangle, triangle_down - 0.90
Thanks to Chris Barker for getting this version working on Linux.
Zooming controls with mouse (when enabled):
Left mouse drag - Zoom box.
Left mouse double click - reset zoom.
Right mouse click - zoom out centred on click location.
"""
import string as _string
import time as _time
import wx
try:
import numpy as numpy
except ImportError:
msg= """
This module requires the Numeric or numarray module,
which could not be imported. It probably is not installed
(it's not part of the standard Python distribution). See the
Scientific Python site (http://www.scipy.org) for information on
downloading source or binaries."""
raise ImportError, "numpy not found. \n" + msg
#
# Plotting classes...
#
class PolyPoints:
"""Base Class for lines and markers
- All methods are private.
"""
def __init__(self, points, attr):
self.points = numpy.array(points)
self.currentScale= (1,1)
self.currentShift= (0,0)
self.scaled = self.points
self.attributes = {}
self.attributes.update(self._attributes)
for name, value in attr.items():
if name not in self._attributes.keys():
raise KeyError, "Style attribute incorrect. Should be one of %s" % self._attributes.keys()
self.attributes[name] = value
def boundingBox(self):
if len(self.points) == 0:
# no curves to draw
# defaults to (-1,-1) and (1,1) but axis can be set in Draw
minXY= numpy.array([-1,-1])
maxXY= numpy.array([ 1, 1])
else:
minXY= numpy.minimum.reduce(self.points)
maxXY= numpy.maximum.reduce(self.points)
return minXY, maxXY
def scaleAndShift(self, scale=(1,1), shift=(0,0)):
if len(self.points) == 0:
# no curves to draw
return
if (scale is not self.currentScale) or (shift is not self.currentShift):
# update point scaling
self.scaled = scale*self.points+shift
self.currentScale= scale
self.currentShift= shift
# else unchanged use the current scaling
def getLegend(self):
return self.attributes['legend']
def getClosestPoint(self, pntXY, pointScaled= True):
"""Returns the index of closest point on the curve, pointXY, scaledXY, distance
x, y in user coords
if pointScaled == True based on screen coords
if pointScaled == False based on user coords
"""
if pointScaled == True:
#Using screen coords
p = self.scaled
pxy = self.currentScale * numpy.array(pntXY)+ self.currentShift
else:
#Using user coords
p = self.points
pxy = numpy.array(pntXY)
#determine distance for each point
d= numpy.sqrt(numpy.add.reduce((p-pxy)**2,1)) #sqrt(dx^2+dy^2)
pntIndex = numpy.argmin(d)
dist = d[pntIndex]
return [pntIndex, self.points[pntIndex], self.scaled[pntIndex], dist]
class PolyLine(PolyPoints):
"""Class to define line type and style
- All methods except __init__ are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.SOLID,
'legend': ''}
def __init__(self, points, **attr):
"""Creates PolyLine object
points - sequence (array, tuple or list) of (x,y) points making up line
**attr - key word attributes
Defaults:
'colour'= 'black', - wx.Pen Colour any wx.NamedColour
'width'= 1, - Pen width
'style'= wx.SOLID, - wx.Pen style
'legend'= '' - Line Legend to display
"""
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord= None):
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale
style= self.attributes['style']
pen = wx.Pen(wx.NamedColour(colour), width, style)
pen.SetCap(wx.CAP_BUTT)
dc.SetPen(pen)
if coord == None:
dc.DrawLines(self.scaled)
else:
dc.DrawLines(coord) # draw legend line
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
h= self.attributes['width'] * printerScale
w= 5 * h
return (w,h)
class PolyMarker(PolyPoints):
"""Class to define marker type and style
- All methods except __init__ are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'size': 2,
'fillcolour': None,
'fillstyle': wx.SOLID,
'marker': 'circle',
'legend': ''}
def __init__(self, points, **attr):
"""Creates PolyMarker object
points - sequence (array, tuple or list) of (x,y) points
**attr - key word attributes
Defaults:
'colour'= 'black', - wx.Pen Colour any wx.NamedColour
'width'= 1, - Pen width
'size'= 2, - Marker size
'fillcolour'= same as colour, - wx.Brush Colour any wx.NamedColour
'fillstyle'= wx.SOLID, - wx.Brush fill style (use wx.TRANSPARENT for no fill)
'marker'= 'circle' - Marker shape
'legend'= '' - Marker Legend to display
Marker Shapes:
- 'circle'
- 'dot'
- 'square'
- 'triangle'
- 'triangle_down'
- 'cross'
- 'plus'
"""
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord= None):
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale
size = self.attributes['size'] * printerScale
fillcolour = self.attributes['fillcolour']
fillstyle = self.attributes['fillstyle']
marker = self.attributes['marker']
dc.SetPen(wx.Pen(wx.NamedColour(colour), width))
if fillcolour:
dc.SetBrush(wx.Brush(wx.NamedColour(fillcolour),fillstyle))
else:
dc.SetBrush(wx.Brush(wx.NamedColour(colour), fillstyle))
if coord == None:
self._drawmarkers(dc, self.scaled, marker, size)
else:
self._drawmarkers(dc, coord, marker, size) # draw legend marker
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
s = 5 * self.attributes['size'] * printerScale
return (s,s)
def _drawmarkers(self, dc, coords, marker,size=1):
f = eval('self._' +marker)
f(dc, coords, size)
def _circle(self, dc, coords, size=1):
fact = 2.5 * size
wh = 5.0 * size
rect = numpy.zeros((len(coords),4),numpy.Float)+[0.0,0.0,wh,wh]
rect[:,0:2] = coords-[fact,fact]
dc.DrawEllipseList(rect.astype(numpy.int32))
def _dot(self, dc, coords, size=1):
dc.DrawPointList(coords)
def _square(self, dc, coords, size=1):
fact = 2.5*size
wh = 5.0*size
rect = numpy.zeros((len(coords),4),numpy.Float)+[0.0,0.0,wh,wh]
rect[:,0:2] = coords-[fact,fact]
dc.DrawRectangleList(rect.astype(numpy.int32))
def _triangle(self, dc, coords, size=1):
shape = [(-2.5*size,1.44*size), (2.5*size,1.44*size), (0.0,-2.88*size)]
poly = numpy.repeat(coords,3)
poly.shape = (len(coords),3,2)
poly += shape
dc.DrawPolygonList(poly.astype(numpy.int32))
def _triangle_down(self, dc, coords, size=1):
shape = [(-2.5*size,-1.44*size), (2.5*size,-1.44*size), (0.0,2.88*size)]
poly = numpy.repeat(coords,3)
poly.shape = (len(coords),3,2)
poly += shape
dc.DrawPolygonList(poly.astype(numpy.int32))
def _cross(self, dc, coords, size=1):
fact = 2.5*size
for f in [[-fact,-fact,fact,fact],[-fact,fact,fact,-fact]]:
lines = numpy.concatenate((coords,coords),axis=1)+f
dc.DrawLineList(lines.astype(numpy.int32))
def _plus(self, dc, coords, size=1):
fact = 2.5*size
for f in [[-fact,0,fact,0],[0,-fact,0,fact]]:
lines = numpy.concatenate((coords,coords),axis=1)+f
dc.DrawLineList(lines.astype(numpy.int32))
class PlotGraphics:
"""Container to hold PolyXXX objects and graph labels
- All methods except __init__ are private.
"""
def __init__(self, objects, title='', xLabel='', yLabel= ''):
"""Creates PlotGraphics object
objects - list of PolyXXX objects to make graph
title - title shown at top of graph
xLabel - label shown on x-axis
yLabel - label shown on y-axis
"""
if type(objects) not in [list,tuple]:
raise TypeError, "objects argument should be list or tuple"
self.objects = objects
self.title= title
self.xLabel= xLabel
self.yLabel= yLabel
def boundingBox(self):
p1, p2 = self.objects[0].boundingBox()
for o in self.objects[1:]:
p1o, p2o = o.boundingBox()
p1 = numpy.minimum(p1, p1o)
p2 = numpy.maximum(p2, p2o)
return p1, p2
def scaleAndShift(self, scale=(1,1), shift=(0,0)):
for o in self.objects:
o.scaleAndShift(scale, shift)
def setPrinterScale(self, scale):
"""Thickens up lines and markers only for printing"""
self.printerScale= scale
def setXLabel(self, xLabel= ''):
"""Set the X axis label on the graph"""
self.xLabel= xLabel
def setYLabel(self, yLabel= ''):
"""Set the Y axis label on the graph"""
self.yLabel= yLabel
def setTitle(self, title= ''):
"""Set the title at the top of graph"""
self.title= title
def getXLabel(self):
"""Get x axis label string"""
return self.xLabel
def getYLabel(self):
"""Get y axis label string"""
return self.yLabel
def getTitle(self, title= ''):
"""Get the title at the top of graph"""
return self.title
def draw(self, dc):
for o in self.objects:
#t=_time.clock() # profile info
o.draw(dc, self.printerScale)
#dt= _time.clock()-t
#print o, "time=", dt
def getSymExtent(self, printerScale):
"""Get max width and height of lines and markers symbols for legend"""
symExt = self.objects[0].getSymExtent(printerScale)
for o in self.objects[1:]:
oSymExt = o.getSymExtent(printerScale)
symExt = numpy.maximum(symExt, oSymExt)
return symExt
def getLegendNames(self):
"""Returns list of legend names"""
lst = [None]*len(self)
for i in range(len(self)):
lst[i]= self.objects[i].getLegend()
return lst
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.objects[item]
#-------------------------------------------------------------------------------
# Main window that you will want to import into your application.
class PlotCanvas(wx.Window):
"""Subclass of a wx.Window to allow simple general plotting
of data with zoom, labels, and automatic axis scaling."""
def __init__(self,
parent,
id=-1,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE,
name=""):
"""Constucts a window, which can be a child of a frame, dialog or
any other non-control window"""
wx.Window.__init__(self, parent, id, pos, size, style, name)
self.border = (1,1)
self.SetBackgroundColour("white")
# Create some mouse events for zooming
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnMouseDoubleClick)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnMouseRightDown)
# set curser as cross-hairs
self.SetCursor(wx.CROSS_CURSOR)
# Things for printing
self.print_data = wx.PrintData()
self.print_data.SetPaperId(wx.PAPER_LETTER)
self.print_data.SetOrientation(wx.LANDSCAPE)
self.pageSetupData= wx.PageSetupDialogData()
self.pageSetupData.SetMarginBottomRight((25,25))
self.pageSetupData.SetMarginTopLeft((25,25))
self.pageSetupData.SetPrintData(self.print_data)
self.printerScale = 1
self.parent= parent
# Zooming variables
self._zoomInFactor = 0.5
self._zoomOutFactor = 2
self._zoomCorner1= numpy.array([0.0, 0.0]) # left mouse down corner
self._zoomCorner2= numpy.array([0.0, 0.0]) # left mouse up corner
self._zoomEnabled= False
self._hasDragged= False
# Drawing Variables
self.last_draw = None
self._pointScale= 1
self._pointShift= 0
self._xSpec= 'auto'
self._ySpec= 'auto'
self._gridEnabled= False
self._legendEnabled= False
# Fonts
self._fontCache = {}
self._fontSizeAxis= 10
self._fontSizeTitle= 15
self._fontSizeLegend= 7
# pointLabels
self._pointLabelEnabled= False
self.last_PointLabel= None
self._pointLabelFunc= None
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
# OnSize called to make sure the buffer is initialized.
# This might result in OnSize getting called twice on some
# platforms at initialization, but little harm done.
if wx.Platform != "__WXMAC__":
self.OnSize(None) # sets the initial size based on client size
# SaveFile
def SaveFile(self, fileName= ''):
"""Saves the file to the type specified in the extension. If no file
name is specified a dialog box is provided. Returns True if sucessful,
otherwise False.
.bmp Save a Windows bitmap file.
.xbm Save an X bitmap file.
.xpm Save an XPM bitmap file.
.png Save a Portable Network Graphics file.
.jpg Save a Joint Photographic Experts Group file.
"""
if _string.lower(fileName[-3:]) not in ['bmp','xbm','xpm','png','jpg']:
dlg1 = wx.FileDialog(
self,
"Choose a file with extension bmp, gif, xbm, xpm, png, or jpg", ".", "",
"BMP files (*.bmp)|*.bmp|XBM files (*.xbm)|*.xbm|XPM file (*.xpm)|*.xpm|PNG files (*.png)|*.png|JPG files (*.jpg)|*.jpg",
wx.SAVE|wx.OVERWRITE_PROMPT
)
try:
while 1:
if dlg1.ShowModal() == wx.ID_OK:
fileName = dlg1.GetPath()
# Check for proper exension
if _string.lower(fileName[-3:]) not in ['bmp','xbm','xpm','png','jpg']:
dlg2 = wx.MessageDialog(self, 'File name extension\n'
'must be one of\n'
'bmp, xbm, xpm, png, or jpg',
'File Name Error', wx.OK | wx.ICON_ERROR)
try:
dlg2.ShowModal()
finally:
dlg2.Destroy()
else:
break # now save file
else: # exit without saving
return False
finally:
dlg1.Destroy()
# File name has required extension
fType = _string.lower(fileName[-3:])
if fType == "bmp":
tp= wx.BITMAP_TYPE_BMP # Save a Windows bitmap file.
elif fType == "xbm":
tp= wx.BITMAP_TYPE_XBM # Save an X bitmap file.
elif fType == "xpm":
tp= wx.BITMAP_TYPE_XPM # Save an XPM bitmap file.
elif fType == "jpg":
tp= wx.BITMAP_TYPE_JPEG # Save a JPG file.
else:
tp= wx.BITMAP_TYPE_PNG # Save a PNG file.
# Save Bitmap
res= self._Buffer.SaveFile(fileName, tp)
return res
def PageSetup(self):
"""Brings up the page setup dialog"""
data = self.pageSetupData
data.SetPrintData(self.print_data)
dlg = wx.PageSetupDialog(self.parent, data)
try:
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData() # returns wx.PageSetupDialogData
# updates page parameters from dialog
self.pageSetupData.SetMarginBottomRight(data.GetMarginBottomRight())
self.pageSetupData.SetMarginTopLeft(data.GetMarginTopLeft())
self.pageSetupData.SetPrintData(data.GetPrintData())
self.print_data=wx.PrintData(data.GetPrintData()) # updates print_data
finally:
dlg.Destroy()
def Printout(self, paper=None):
"""Print current plot."""
if paper != None:
self.print_data.SetPaperId(paper)
pdd = wx.PrintDialogData()
pdd.SetPrintData(self.print_data)
printer = wx.Printer(pdd)
out = PlotPrintout(self)
print_ok = printer.Print(self.parent, out)
if print_ok:
self.print_data = wx.PrintData(printer.GetPrintDialogData().GetPrintData())
out.Destroy()
def PrintPreview(self):
"""Print-preview current plot."""
printout = PlotPrintout(self)
printout2 = PlotPrintout(self)
self.preview = wx.PrintPreview(printout, printout2, self.print_data)
if not self.preview.Ok():
wx.MessageDialog(self, "Print Preview failed.\n" \
"Check that default printer is configured\n", \
"Print error", wx.OK|wx.CENTRE).ShowModal()
self.preview.SetZoom(40)
# search up tree to find frame instance
frameInst= self
while not isinstance(frameInst, wx.Frame):
frameInst= frameInst.GetParent()
frame = wx.PreviewFrame(self.preview, frameInst, "Preview")
frame.Initialize()
frame.SetPosition(self.GetPosition())
frame.SetSize((600,550))
frame.Centre(wx.BOTH)
frame.Show(True)
def SetFontSizeAxis(self, point= 10):
"""Set the tick and axis label font size (default is 10 point)"""
self._fontSizeAxis= point
def GetFontSizeAxis(self):
"""Get current tick and axis label font size in points"""
return self._fontSizeAxis
def SetFontSizeTitle(self, point= 15):
"""Set Title font size (default is 15 point)"""
self._fontSizeTitle= point
def GetFontSizeTitle(self):
"""Get current Title font size in points"""
return self._fontSizeTitle
def SetFontSizeLegend(self, point= 7):
"""Set Legend font size (default is 7 point)"""
self._fontSizeLegend= point
def GetFontSizeLegend(self):
"""Get current Legend font size in points"""
return self._fontSizeLegend
def SetEnableZoom(self, value):
"""Set True to enable zooming."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
self._zoomEnabled= value
def GetEnableZoom(self):
"""True if zooming enabled."""
return self._zoomEnabled
def SetEnableGrid(self, value):
"""Set True to enable grid."""
if value not in [True,False,'Horizontal','Vertical']:
raise TypeError, "Value should be True, False, Horizontal or Vertical"
self._gridEnabled= value
self.Redraw()
def GetEnableGrid(self):
"""True if grid enabled."""
return self._gridEnabled
def SetEnableLegend(self, value):
"""Set True to enable legend."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
self._legendEnabled= value
self.Redraw()
def GetEnableLegend(self):
"""True if Legend enabled."""
return self._legendEnabled
def SetEnablePointLabel(self, value):
"""Set True to enable pointLabel."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
self._pointLabelEnabled= value
self.Redraw() #will erase existing pointLabel if present
self.last_PointLabel = None
def GetEnablePointLabel(self):
"""True if pointLabel enabled."""
return self._pointLabelEnabled
def SetPointLabelFunc(self, func):
"""Sets the function with custom code for pointLabel drawing
******** more info needed ***************
"""
self._pointLabelFunc= func
def GetPointLabelFunc(self):
"""Returns pointLabel Drawing Function"""
return self._pointLabelFunc
def Reset(self):
"""Unzoom the plot."""
self.last_PointLabel = None #reset pointLabel
if self.last_draw is not None:
self.Draw(self.last_draw[0])
def ScrollRight(self, units):
"""Move view right number of axis units."""
self.last_PointLabel = None #reset pointLabel
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
xAxis= (xAxis[0]+units, xAxis[1]+units)
self.Draw(graphics,xAxis,yAxis)
def ScrollUp(self, units):
"""Move view up number of axis units."""
self.last_PointLabel = None #reset pointLabel
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
yAxis= (yAxis[0]+units, yAxis[1]+units)
self.Draw(graphics,xAxis,yAxis)
def GetXY(self,event):
"""Takes a mouse event and returns the XY user axis values."""
x,y= self.PositionScreenToUser(event.GetPosition())
return x,y
def PositionUserToScreen(self, pntXY):
"""Converts User position to Screen Coordinates"""
userPos= numpy.array(pntXY)
x,y= userPos * self._pointScale + self._pointShift
return x,y
def PositionScreenToUser(self, pntXY):
"""Converts Screen position to User Coordinates"""
screenPos= numpy.array(pntXY)
x,y= (screenPos-self._pointShift)/self._pointScale
return x,y
def SetXSpec(self, type= 'auto'):
"""xSpec- defines x axis type. Can be 'none', 'min' or 'auto'
where:
'none' - shows no axis or tick mark values
'min' - shows min bounding box values
'auto' - rounds axis range to sensible values
"""
self._xSpec= type
def SetYSpec(self, type= 'auto'):
"""ySpec- defines x axis type. Can be 'none', 'min' or 'auto'
where:
'none' - shows no axis or tick mark values
'min' - shows min bounding box values
'auto' - rounds axis range to sensible values
"""
self._ySpec= type
def GetXSpec(self):
"""Returns current XSpec for axis"""
return self._xSpec
def GetYSpec(self):
"""Returns current YSpec for axis"""
return self._ySpec
def GetXMaxRange(self):
"""Returns (minX, maxX) x-axis range for displayed graph"""
graphics= self.last_draw[0]
p1, p2 = graphics.boundingBox() # min, max points of graphics
xAxis = self._axisInterval(self._xSpec, p1[0], p2[0]) # in user units
return xAxis
def GetYMaxRange(self):
"""Returns (minY, maxY) y-axis range for displayed graph"""
graphics= self.last_draw[0]
p1, p2 = graphics.boundingBox() # min, max points of graphics
yAxis = self._axisInterval(self._ySpec, p1[1], p2[1])
return yAxis
def GetXCurrentRange(self):
"""Returns (minX, maxX) x-axis for currently displayed portion of graph"""
return self.last_draw[1]
def GetYCurrentRange(self):
"""Returns (minY, maxY) y-axis for currently displayed portion of graph"""
return self.last_draw[2]
def Draw(self, graphics, xAxis = None, yAxis = None, dc = None):
"""Draw objects in graphics with specified x and y axis.
graphics- instance of PlotGraphics with list of PolyXXX objects
xAxis - tuple with (min, max) axis range to view
yAxis - same as xAxis
dc - drawing context - doesn't have to be specified.
If it's not, the offscreen buffer is used
"""
# check Axis is either tuple or none
if type(xAxis) not in [type(None),tuple]:
raise TypeError, "xAxis should be None or (minX,maxX)"
if type(yAxis) not in [type(None),tuple]:
raise TypeError, "yAxis should be None or (minY,maxY)"
# check case for axis = (a,b) where a==b caused by improper zooms
if xAxis != None:
if xAxis[0] == xAxis[1]:
return
if yAxis != None:
if yAxis[0] == yAxis[1]:
return
if dc == None:
# sets new dc and clears it
dc = wx.BufferedDC(wx.ClientDC(self), self._Buffer)
dc.Clear()
# dc.BeginDrawing()
# dc.Clear()
# set font size for every thing but title and legend
dc.SetFont(self._getFont(self._fontSizeAxis))
# sizes axis to axis type, create lower left and upper right corners of plot
if xAxis == None or yAxis == None:
# One or both axis not specified in Draw
p1, p2 = graphics.boundingBox() # min, max points of graphics
if xAxis == None:
xAxis = self._axisInterval(self._xSpec, p1[0], p2[0]) # in user units
if yAxis == None:
yAxis = self._axisInterval(self._ySpec, p1[1], p2[1])
# Adjust bounding box for axis spec
p1[0],p1[1] = xAxis[0], yAxis[0] # lower left corner user scale (xmin,ymin)
p2[0],p2[1] = xAxis[1], yAxis[1] # upper right corner user scale (xmax,ymax)
else:
# Both axis specified in Draw
p1= numpy.array([xAxis[0], yAxis[0]]) # lower left corner user scale (xmin,ymin)
p2= numpy.array([xAxis[1], yAxis[1]]) # upper right corner user scale (xmax,ymax)
self.last_draw = (graphics, xAxis, yAxis) # saves most recient values
# Get ticks and textExtents for axis if required
if self._xSpec is not 'none':
xticks = self._ticks(xAxis[0], xAxis[1])
xTextExtent = dc.GetTextExtent(xticks[-1][1])# w h of x axis text last number on axis
else:
xticks = None
xTextExtent= (0,0) # No text for ticks
if self._ySpec is not 'none':
yticks = self._ticks(yAxis[0], yAxis[1])
yTextExtentBottom= dc.GetTextExtent(yticks[0][1])
yTextExtentTop = dc.GetTextExtent(yticks[-1][1])
yTextExtent= (max(yTextExtentBottom[0],yTextExtentTop[0]),
max(yTextExtentBottom[1],yTextExtentTop[1]))
else:
yticks = None
yTextExtent= (0,0) # No text for ticks
# TextExtents for Title and Axis Labels
titleWH, xLabelWH, yLabelWH= self._titleLablesWH(dc, graphics)
# TextExtents for Legend
legendBoxWH, legendSymExt, legendTextExt = self._legendWH(dc, graphics)
# room around graph area
rhsW= max(xTextExtent[0], legendBoxWH[0]) # use larger of number width or legend width
lhsW= yTextExtent[0]+ yLabelWH[1]
bottomH= max(xTextExtent[1], yTextExtent[1]/2.)+ xLabelWH[1]
topH= yTextExtent[1]/2. + titleWH[1]
textSize_scale= numpy.array([rhsW+lhsW,bottomH+topH]) # make plot area smaller by text size
textSize_shift= numpy.array([lhsW, bottomH]) # shift plot area by this amount
# drawing title and labels text
dc.SetFont(self._getFont(self._fontSizeTitle))
titlePos= (self.plotbox_origin[0]+ lhsW + (self.plotbox_size[0]-lhsW-rhsW)/2.- titleWH[0]/2.,
self.plotbox_origin[1]- self.plotbox_size[1])
dc.DrawText(graphics.getTitle(),titlePos[0],titlePos[1])
dc.SetFont(self._getFont(self._fontSizeAxis))
xLabelPos= (self.plotbox_origin[0]+ lhsW + (self.plotbox_size[0]-lhsW-rhsW)/2.- xLabelWH[0]/2.,
self.plotbox_origin[1]- xLabelWH[1])
dc.DrawText(graphics.getXLabel(),xLabelPos[0],xLabelPos[1])
yLabelPos= (self.plotbox_origin[0],
self.plotbox_origin[1]- bottomH- (self.plotbox_size[1]-bottomH-topH)/2.+ yLabelWH[0]/2.)
if graphics.getYLabel(): # bug fix for Linux
dc.DrawRotatedText(graphics.getYLabel(),yLabelPos[0],yLabelPos[1],90)
# drawing legend makers and text
if self._legendEnabled:
self._drawLegend(dc,graphics,rhsW,topH,legendBoxWH, legendSymExt, legendTextExt)
# allow for scaling and shifting plotted points
scale = (self.plotbox_size-textSize_scale) / (p2-p1)* numpy.array((1,-1))
shift = -p1*scale + self.plotbox_origin + textSize_shift * numpy.array((1,-1))
self._pointScale= scale # make available for mouse events
self._pointShift= shift
self._drawAxes(dc, p1, p2, scale, shift, xticks, yticks)
graphics.scaleAndShift(scale, shift)
graphics.setPrinterScale(self.printerScale) # thicken up lines and markers if printing
# set clipping area so drawing does not occur outside axis box
ptx,pty,rectWidth,rectHeight= self._point2ClientCoord(p1, p2)
dc.SetClippingRegion(ptx,pty,rectWidth,rectHeight)
# Draw the lines and markers
#start = _time.clock()
graphics.draw(dc)
# print "entire graphics drawing took: %f second"%(_time.clock() - start)
# remove the clipping region
dc.DestroyClippingRegion()
# dc.EndDrawing()
def Redraw(self, dc= None):
"""Redraw the existing plot."""
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
self.Draw(graphics,xAxis,yAxis,dc)
def Clear(self):
"""Erase the window."""
self.last_PointLabel = None #reset pointLabel
dc = wx.BufferedDC(wx.ClientDC(self), self._Buffer)
dc.Clear()
self.last_draw = None
def Zoom(self, Center, Ratio):
""" Zoom on the plot
Centers on the X,Y coords given in Center
Zooms by the Ratio = (Xratio, Yratio) given
"""
self.last_PointLabel = None #reset maker
x,y = Center
if self.last_draw != None:
(graphics, xAxis, yAxis) = self.last_draw
w = (xAxis[1] - xAxis[0]) * Ratio[0]
h = (yAxis[1] - yAxis[0]) * Ratio[1]
xAxis = ( x - w/2, x + w/2 )
yAxis = ( y - h/2, y + h/2 )
self.Draw(graphics, xAxis, yAxis)
def GetClosestPoints(self, pntXY, pointScaled= True):
"""Returns list with
[curveNumber, legend, index of closest point, pointXY, scaledXY, distance]
list for each curve.
Returns [] if no curves are being plotted.
x, y in user coords
if pointScaled == True based on screen coords
if pointScaled == False based on user coords
"""
if self.last_draw == None:
#no graph available
return []
graphics, xAxis, yAxis= self.last_draw
l = []
for curveNum,obj in enumerate(graphics):
#check there are points in the curve
if len(obj.points) == 0:
continue #go to next obj
#[curveNumber, legend, index of closest point, pointXY, scaledXY, distance]
cn = [curveNum]+ [obj.getLegend()]+ obj.getClosestPoint( pntXY, pointScaled)
l.append(cn)
return l
def GetClosetPoint(self, pntXY, pointScaled= True):
"""Returns list with
[curveNumber, legend, index of closest point, pointXY, scaledXY, distance]
list for only the closest curve.
Returns [] if no curves are being plotted.
x, y in user coords
if pointScaled == True based on screen coords
if pointScaled == False based on user coords
"""
#closest points on screen based on screen scaling (pointScaled= True)
#list [curveNumber, index, pointXY, scaledXY, distance] for each curve
closestPts= self.GetClosestPoints(pntXY, pointScaled)
if closestPts == []:
return [] #no graph present
#find one with least distance
dists = [c[-1] for c in closestPts]
mdist = min(dists) #Min dist
i = dists.index(mdist) #index for min dist
return closestPts[i] #this is the closest point on closest curve
def UpdatePointLabel(self, mDataDict):
"""Updates the pointLabel point on screen with data contained in
mDataDict.
mDataDict will be passed to your function set by
SetPointLabelFunc. It can contain anything you
want to display on the screen at the scaledXY point
you specify.
This function can be called from parent window with onClick,
onMotion events etc.
"""
if self.last_PointLabel != None:
#compare pointXY
if mDataDict["pointXY"] != self.last_PointLabel["pointXY"]:
#closest changed
self._drawPointLabel(self.last_PointLabel) #erase old
self._drawPointLabel(mDataDict) #plot new
else:
#just plot new with no erase
self._drawPointLabel(mDataDict) #plot new
#save for next erase
self.last_PointLabel = mDataDict
# event handlers **********************************
def OnMotion(self, event):
if self._zoomEnabled and event.LeftIsDown():
if self._hasDragged:
self._drawRubberBand(self._zoomCorner1, self._zoomCorner2) # remove old
else:
self._hasDragged= True
self._zoomCorner2[0], self._zoomCorner2[1] = self.GetXY(event)
self._drawRubberBand(self._zoomCorner1, self._zoomCorner2) # add new
def OnMouseLeftDown(self,event):
self._zoomCorner1[0], self._zoomCorner1[1]= self.GetXY(event)
def OnMouseLeftUp(self, event):
if self._zoomEnabled:
if self._hasDragged == True:
self._drawRubberBand(self._zoomCorner1, self._zoomCorner2) # remove old
self._zoomCorner2[0], self._zoomCorner2[1]= self.GetXY(event)
self._hasDragged = False # reset flag
minX, minY= numpy.minimum( self._zoomCorner1, self._zoomCorner2)
maxX, maxY= numpy.maximum( self._zoomCorner1, self._zoomCorner2)
self.last_PointLabel = None #reset pointLabel
if self.last_draw != None:
self.Draw(self.last_draw[0], xAxis = (minX,maxX), yAxis = (minY,maxY), dc = None)
#else: # A box has not been drawn, zoom in on a point
## this interfered with the double click, so I've disables it.
# X,Y = self.GetXY(event)
# self.Zoom( (X,Y), (self._zoomInFactor,self._zoomInFactor) )
def OnMouseDoubleClick(self,event):
if self._zoomEnabled:
self.Reset()
def OnMouseRightDown(self,event):
if self._zoomEnabled:
X,Y = self.GetXY(event)
self.Zoom( (X,Y), (self._zoomOutFactor, self._zoomOutFactor) )
def OnPaint(self, event):
# All that is needed here is to draw the buffer to screen
if self.last_PointLabel != None:
self._drawPointLabel(self.last_PointLabel) #erase old
self.last_PointLabel = None
dc = wx.BufferedPaintDC(self, self._Buffer)
def OnSize(self,event):
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
Size = self.GetClientSize()
if Size.width <= 0 or Size.height <= 0:
return
# Make new offscreen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
self._Buffer = wx.Bitmap(Size[0],Size[1])
self._setSize()
self.last_PointLabel = None #reset pointLabel
if self.last_draw is None:
self.Clear()
else:
graphics, xSpec, ySpec = self.last_draw
self.Draw(graphics,xSpec,ySpec)
def OnLeave(self, event):
"""Used to erase pointLabel when mouse outside window"""
if self.last_PointLabel != None:
self._drawPointLabel(self.last_PointLabel) #erase old
self.last_PointLabel = None
# Private Methods **************************************************
def _setSize(self, width=None, height=None):
"""DC width and height."""
if width == None:
(self.width,self.height) = self.GetClientSize()
else:
self.width, self.height= width,height
self.plotbox_size = 0.97*numpy.array([self.width, self.height])
xo = 0.5*(self.width-self.plotbox_size[0])
yo = self.height-0.5*(self.height-self.plotbox_size[1])
self.plotbox_origin = numpy.array([xo, yo])
def _setPrinterScale(self, scale):
"""Used to thicken lines and increase marker size for print out."""
# line thickness on printer is very thin at 600 dot/in. Markers small
self.printerScale= scale
def _printDraw(self, printDC):
"""Used for printing."""
if self.last_draw != None:
graphics, xSpec, ySpec= self.last_draw
self.Draw(graphics,xSpec,ySpec,printDC)
def _drawPointLabel(self, mDataDict):
"""Draws and erases pointLabels"""
width = self._Buffer.GetWidth()
height = self._Buffer.GetHeight()
tmp_Buffer = wx.Bitmap(width,height)
dcs = wx.MemoryDC()
dcs.SelectObject(tmp_Buffer)
dcs.Clear()
# dcs.BeginDrawing()
self._pointLabelFunc(dcs,mDataDict) #custom user pointLabel function
# dcs.EndDrawing()
dc = wx.ClientDC( self )
#this will erase if called twice
dc.Blit(0, 0, width, height, dcs, 0, 0, wx.EQUIV) #(NOT src) XOR dst
def _drawLegend(self,dc,graphics,rhsW,topH,legendBoxWH, legendSymExt, legendTextExt):
"""Draws legend symbols and text"""
# top right hand corner of graph box is ref corner
trhc= self.plotbox_origin+ (self.plotbox_size-[rhsW,topH])*[1,-1]
legendLHS= .091* legendBoxWH[0] # border space between legend sym and graph box
lineHeight= max(legendSymExt[1], legendTextExt[1]) * 1.1 #1.1 used as space between lines
dc.SetFont(self._getFont(self._fontSizeLegend))
for i in range(len(graphics)):
o = graphics[i]
s= i*lineHeight
if isinstance(o,PolyMarker):
# draw marker with legend
pnt= (trhc[0]+legendLHS+legendSymExt[0]/2., trhc[1]+s+lineHeight/2.)
o.draw(dc, self.printerScale, coord= numpy.array([pnt]))
elif isinstance(o,PolyLine):
# draw line with legend
pnt1= (trhc[0]+legendLHS, trhc[1]+s+lineHeight/2.)
pnt2= (trhc[0]+legendLHS+legendSymExt[0], trhc[1]+s+lineHeight/2.)
o.draw(dc, self.printerScale, coord= numpy.array([pnt1,pnt2]))
else:
raise TypeError, "object is neither PolyMarker or PolyLine instance"
# draw legend txt
pnt= (trhc[0]+legendLHS+legendSymExt[0], trhc[1]+s+lineHeight/2.-legendTextExt[1]/2)
dc.DrawText(o.getLegend(),pnt[0],pnt[1])
dc.SetFont(self._getFont(self._fontSizeAxis)) # reset
def _titleLablesWH(self, dc, graphics):
"""Draws Title and labels and returns width and height for each"""
# TextExtents for Title and Axis Labels
dc.SetFont(self._getFont(self._fontSizeTitle))
title= graphics.getTitle()
titleWH= dc.GetTextExtent(title)
dc.SetFont(self._getFont(self._fontSizeAxis))
xLabel, yLabel= graphics.getXLabel(),graphics.getYLabel()
xLabelWH= dc.GetTextExtent(xLabel)
yLabelWH= dc.GetTextExtent(yLabel)
return titleWH, xLabelWH, yLabelWH
def _legendWH(self, dc, graphics):
"""Returns the size in screen units for legend box"""
if self._legendEnabled != True:
legendBoxWH= symExt= txtExt= (0,0)
else:
# find max symbol size
symExt= graphics.getSymExtent(self.printerScale)
# find max legend text extent
dc.SetFont(self._getFont(self._fontSizeLegend))
txtList= graphics.getLegendNames()
txtExt= dc.GetTextExtent(txtList[0])
for txt in graphics.getLegendNames()[1:]:
txtExt= numpy.maximum(txtExt,dc.GetTextExtent(txt))
maxW= symExt[0]+txtExt[0]
maxH= max(symExt[1],txtExt[1])
# padding .1 for lhs of legend box and space between lines
maxW= maxW* 1.1
maxH= maxH* 1.1 * len(txtList)
dc.SetFont(self._getFont(self._fontSizeAxis))
legendBoxWH= (maxW,maxH)
return (legendBoxWH, symExt, txtExt)
def _drawRubberBand(self, corner1, corner2):
"""Draws/erases rect box from corner1 to corner2"""
ptx,pty,rectWidth,rectHeight= self._point2ClientCoord(corner1, corner2)
# draw rectangle
dc = wx.ClientDC( self )
# dc.BeginDrawing()
dc.SetPen(wx.Pen(wx.BLACK))
dc.SetBrush(wx.Brush( wx.WHITE, wx.TRANSPARENT ) )
dc.SetLogicalFunction(wx.INVERT)
dc.DrawRectangle( ptx,pty, rectWidth,rectHeight)
dc.SetLogicalFunction(wx.COPY)
# dc.EndDrawing()
def _getFont(self,size):
"""Take font size, adjusts if printing and returns wx.Font"""
s = size*self.printerScale
of = self.GetFont()
# Linux speed up to get font from cache rather than X font server
key = (int(s), of.GetFamily (), of.GetStyle (), of.GetWeight ())
font = self._fontCache.get (key, None)
if font:
return font # yeah! cache hit
else:
font = wx.Font(int(s), of.GetFamily(), of.GetStyle(), of.GetWeight())
self._fontCache[key] = font
return font
def _point2ClientCoord(self, corner1, corner2):
"""Converts user point coords to client screen int coords x,y,width,height"""
c1= numpy.array(corner1)
c2= numpy.array(corner2)
# convert to screen coords
pt1= c1*self._pointScale+self._pointShift
pt2= c2*self._pointScale+self._pointShift
# make height and width positive
pul= numpy.minimum(pt1,pt2) # Upper left corner
plr= numpy.maximum(pt1,pt2) # Lower right corner
rectWidth, rectHeight= plr-pul
ptx,pty= pul
return ptx, pty, rectWidth, rectHeight
def _axisInterval(self, spec, lower, upper):
"""Returns sensible axis range for given spec"""
if spec == 'none' or spec == 'min':
if lower == upper:
return lower-0.5, upper+0.5
else:
return lower, upper
elif spec == 'auto':
range = upper-lower
if range == 0.:
return lower-0.5, upper+0.5
log = numpy.log10(range)
power = numpy.floor(log)
fraction = log-power
if fraction <= 0.05:
power = power-1
grid = 10.**power
lower = lower - lower % grid
mod = upper % grid
if mod != 0:
upper = upper - mod + grid
return lower, upper
elif type(spec) == type(()):
lower, upper = spec
if lower <= upper:
return lower, upper
else:
return upper, lower
else:
raise ValueError, str(spec) + ': illegal axis specification'
def _drawAxes(self, dc, p1, p2, scale, shift, xticks, yticks):
penWidth= self.printerScale # increases thickness for printing only
dc.SetPen(wx.Pen(wx.NamedColour('BLACK'), penWidth))
# set length of tick marks--long ones make grid
if self._gridEnabled:
x,y,width,height= self._point2ClientCoord(p1,p2)
if self._gridEnabled == 'Horizontal':
yTickLength= width/2.0 +1
xTickLength= 3 * self.printerScale
elif self._gridEnabled == 'Vertical':
yTickLength= 3 * self.printerScale
xTickLength= height/2.0 +1
else:
yTickLength= width/2.0 +1
xTickLength= height/2.0 +1
else:
yTickLength= 3 * self.printerScale # lengthens lines for printing
xTickLength= 3 * self.printerScale
if self._xSpec is not 'none':
lower, upper = p1[0],p2[0]
text = 1
for y, d in [(p1[1], -xTickLength), (p2[1], xTickLength)]: # miny, maxy and tick lengths
a1 = scale*numpy.array([lower, y])+shift
a2 = scale*numpy.array([upper, y])+shift
dc.DrawLine(a1[0],a1[1],a2[0],a2[1]) # draws upper and lower axis line
for x, label in xticks:
pt = scale*numpy.array([x, y])+shift
dc.DrawLine(pt[0],pt[1],pt[0],pt[1] + d) # draws tick mark d units
if text:
dc.DrawText(label,pt[0],pt[1])
text = 0 # axis values not drawn on top side
if self._ySpec is not 'none':
lower, upper = p1[1],p2[1]
text = 1
h = dc.GetCharHeight()
for x, d in [(p1[0], -yTickLength), (p2[0], yTickLength)]:
a1 = scale*numpy.array([x, lower])+shift
a2 = scale*numpy.array([x, upper])+shift
dc.DrawLine(a1[0],a1[1],a2[0],a2[1])
for y, label in yticks:
pt = scale*numpy.array([x, y])+shift
dc.DrawLine(pt[0],pt[1],pt[0]-d,pt[1])
if text:
dc.DrawText(label,pt[0]-dc.GetTextExtent(label)[0],
pt[1]-0.5*h)
text = 0 # axis values not drawn on right side
def _ticks(self, lower, upper):
ideal = (upper-lower)/7.
log = numpy.log10(ideal)
power = numpy.floor(log)
fraction = log-power
factor = 1.
error = fraction
for f, lf in self._multiples:
e = numpy.fabs(fraction-lf)
if e < error:
error = e
factor = f
grid = factor * 10.**power
if power > 4 or power < -4:
format = '%+7.1e'
elif power >= 0:
digits = max(1, int(power))
format = '%' + `digits`+'.0f'
else:
digits = -int(power)
format = '%'+`digits+2`+'.'+`digits`+'f'
ticks = []
t = -grid*numpy.floor(-lower/grid)
while t <= upper:
ticks.append( (t, format % (t,)) )
t = t + grid
return ticks
_multiples = [(2., numpy.log10(2.)), (5., numpy.log10(5.))]
#-------------------------------------------------------------------------------
# Used to layout the printer page
class PlotPrintout(wx.Printout):
"""Controls how the plot is made in printing and previewing"""
# Do not change method names in this class,
# we have to override wx.Printout methods here!
def __init__(self, graph):
"""graph is instance of plotCanvas to be printed or previewed"""
wx.Printout.__init__(self)
self.graph = graph
def HasPage(self, page):
if page == 1:
return True
else:
return False
def GetPageInfo(self):
return (1, 1, 1, 1) # disable page numbers
def OnPrintPage(self, page):
dc = self.GetDC() # allows using floats for certain functions
## print "PPI Printer",self.GetPPIPrinter()
## print "PPI Screen", self.GetPPIScreen()
## print "DC GetSize", dc.GetSize()
## print "GetPageSizePixels", self.GetPageSizePixels()
# Note PPIScreen does not give the correct number
# Calulate everything for printer and then scale for preview
PPIPrinter= self.GetPPIPrinter() # printer dots/inch (w,h)
#PPIScreen= self.GetPPIScreen() # screen dots/inch (w,h)
dcSize= dc.GetSize() # DC size
pageSize= self.GetPageSizePixels() # page size in terms of pixcels
clientDcSize= self.graph.GetClientSize()
# find what the margins are (mm)
margLeftSize,margTopSize= self.graph.pageSetupData.GetMarginTopLeft()
margRightSize, margBottomSize= self.graph.pageSetupData.GetMarginBottomRight()
# calculate offset and scale for dc
pixLeft= margLeftSize*PPIPrinter[0]/25.4 # mm*(dots/in)/(mm/in)
pixRight= margRightSize*PPIPrinter[0]/25.4
pixTop= margTopSize*PPIPrinter[1]/25.4
pixBottom= margBottomSize*PPIPrinter[1]/25.4
plotAreaW= pageSize[0]-(pixLeft+pixRight)
plotAreaH= pageSize[1]-(pixTop+pixBottom)
# ratio offset and scale to screen size if preview
if self.IsPreview():
ratioW= float(dcSize[0])/pageSize[0]
ratioH= float(dcSize[1])/pageSize[1]
pixLeft *= ratioW
pixTop *= ratioH
plotAreaW *= ratioW
plotAreaH *= ratioH
# rescale plot to page or preview plot area
self.graph._setSize(plotAreaW,plotAreaH)
# Set offset and scale
dc.SetDeviceOrigin(pixLeft,pixTop)
# Thicken up pens and increase marker size for printing
ratioW= float(plotAreaW)/clientDcSize[0]
ratioH= float(plotAreaH)/clientDcSize[1]
aveScale= (ratioW+ratioH)/2
self.graph._setPrinterScale(aveScale) # tickens up pens for printing
self.graph._printDraw(dc)
# rescale back to original
self.graph._setSize()
self.graph._setPrinterScale(1)
self.graph.Redraw() #to get point label scale and shift correct
return True
#---------------------------------------------------------------------------
# if running standalone...
#
# ...a sample implementation using the above
#
def _draw1Objects():
# 100 points sin function, plotted as green circles
data1 = 2.*numpy.pi*numpy.arange(200)/200.
data1.shape = (100, 2)
data1[:,1] = numpy.sin(data1[:,0])
markers1 = PolyMarker(data1, legend='Green Markers', colour='green', marker='circle',size=1)
# 50 points cos function, plotted as red line
data1 = 2.*numpy.pi*numpy.arange(100)/100.
data1.shape = (50,2)
data1[:,1] = numpy.cos(data1[:,0])
lines = PolyLine(data1, legend= 'Red Line', colour='red')
# A few more points...
pi = numpy.pi
markers2 = PolyMarker([(0., 0.), (pi/4., 1.), (pi/2, 0.),
(3.*pi/4., -1)], legend='Cross Legend', colour='blue',
marker='cross')
return PlotGraphics([markers1, lines, markers2],"Graph Title", "X Axis", "Y Axis")
def _draw2Objects():
# 100 points sin function, plotted as green dots
data1 = 2.*numpy.pi*numpy.arange(200)/200.
data1.shape = (100, 2)
data1[:,1] = numpy.sin(data1[:,0])
line1 = PolyLine(data1, legend='Green Line', colour='green', width=6, style=wx.DOT)
# 50 points cos function, plotted as red dot-dash
data1 = 2.*numpy.pi*numpy.arange(100)/100.
data1.shape = (50,2)
data1[:,1] = numpy.cos(data1[:,0])
line2 = PolyLine(data1, legend='Red Line', colour='red', width=3, style= wx.DOT_DASH)
# A few more points...
pi = numpy.pi
markers1 = PolyMarker([(0., 0.), (pi/4., 1.), (pi/2, 0.),
(3.*pi/4., -1)], legend='Cross Hatch Square', colour='blue', width= 3, size= 6,
fillcolour= 'red', fillstyle= wx.CROSSDIAG_HATCH,
marker='square')
return PlotGraphics([markers1, line1, line2], "Big Markers with Different Line Styles")
def _draw3Objects():
markerList= ['circle', 'dot', 'square', 'triangle', 'triangle_down',
'cross', 'plus', 'circle']
m=[]
for i in range(len(markerList)):
m.append(PolyMarker([(2*i+.5,i+.5)], legend=markerList[i], colour='blue',
marker=markerList[i]))
return PlotGraphics(m, "Selection of Markers", "Minimal Axis", "No Axis")
def _draw4Objects():
# 25,000 point line
data1 = numpy.arange(5e5,1e6,10)
data1.shape = (25000, 2)
line1 = PolyLine(data1, legend='Wide Line', colour='green', width=5)
# A few more points...
markers2 = PolyMarker(data1, legend='Square', colour='blue',
marker='square')
return PlotGraphics([line1, markers2], "25,000 Points", "Value X", "")
def _draw5Objects():
# Empty graph with axis defined but no points/lines
points=[]
line1 = PolyLine(points, legend='Wide Line', colour='green', width=5)
return PlotGraphics([line1], "Empty Plot With Just Axes", "Value X", "Value Y")
def _draw6Objects():
# Bar graph
points1=[(1,0), (1,10)]
line1 = PolyLine(points1, colour='green', legend='Feb.', width=10)
points1g=[(2,0), (2,4)]
line1g = PolyLine(points1g, colour='red', legend='Mar.', width=10)
points1b=[(3,0), (3,6)]
line1b = PolyLine(points1b, colour='blue', legend='Apr.', width=10)
points2=[(4,0), (4,12)]
line2 = PolyLine(points2, colour='Yellow', legend='May', width=10)
points2g=[(5,0), (5,8)]
line2g = PolyLine(points2g, colour='orange', legend='June', width=10)
points2b=[(6,0), (6,4)]
line2b = PolyLine(points2b, colour='brown', legend='July', width=10)
return PlotGraphics([line1, line1g, line1b, line2, line2g, line2b],
"Bar Graph - (Turn on Grid, Legend)", "Months", "Number of Students")
class TestFrame(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title,
wx.DefaultPosition, (600, 400))
# Now Create the menu bar and items
self.mainmenu = wx.MenuBar()
menu = wx.Menu()
menu.Append(200, 'Page Setup...', 'Setup the printer page')
self.Bind(wx.EVT_MENU, self.OnFilePageSetup, id=200)
menu.Append(201, 'Print Preview...', 'Show the current plot on page')
self.Bind(wx.EVT_MENU, self.OnFilePrintPreview, id=201)
menu.Append(202, 'Print...', 'Print the current plot')
self.Bind(wx.EVT_MENU, self.OnFilePrint, id=202)
menu.Append(203, 'Save Plot...', 'Save current plot')
self.Bind(wx.EVT_MENU, self.OnSaveFile, id=203)
menu.Append(205, 'E&xit', 'Enough of this already!')
self.Bind(wx.EVT_MENU, self.OnFileExit, id=205)
self.mainmenu.Append(menu, '&File')
menu = wx.Menu()
menu.Append(206, 'Draw1', 'Draw plots1')
self.Bind(wx.EVT_MENU,self.OnPlotDraw1, id=206)
menu.Append(207, 'Draw2', 'Draw plots2')
self.Bind(wx.EVT_MENU,self.OnPlotDraw2, id=207)
menu.Append(208, 'Draw3', 'Draw plots3')
self.Bind(wx.EVT_MENU,self.OnPlotDraw3, id=208)
menu.Append(209, 'Draw4', 'Draw plots4')
self.Bind(wx.EVT_MENU,self.OnPlotDraw4, id=209)
menu.Append(210, 'Draw5', 'Draw plots5')
self.Bind(wx.EVT_MENU,self.OnPlotDraw5, id=210)
menu.Append(260, 'Draw6', 'Draw plots6')
self.Bind(wx.EVT_MENU,self.OnPlotDraw6, id=260)
menu.Append(211, '&Redraw', 'Redraw plots')
self.Bind(wx.EVT_MENU,self.OnPlotRedraw, id=211)
menu.Append(212, '&Clear', 'Clear canvas')
self.Bind(wx.EVT_MENU,self.OnPlotClear, id=212)
menu.Append(213, '&Scale', 'Scale canvas')
self.Bind(wx.EVT_MENU,self.OnPlotScale, id=213)
menu.Append(214, 'Enable &Zoom', 'Enable Mouse Zoom', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableZoom, id=214)
menu.Append(215, 'Enable &Grid', 'Turn on Grid', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableGrid, id=215)
menu.Append(220, 'Enable &Legend', 'Turn on Legend', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnableLegend, id=220)
menu.Append(222, 'Enable &Point Label', 'Show Closest Point', kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU,self.OnEnablePointLabel, id=222)
menu.Append(225, 'Scroll Up 1', 'Move View Up 1 Unit')
self.Bind(wx.EVT_MENU,self.OnScrUp, id=225)
menu.Append(230, 'Scroll Rt 2', 'Move View Right 2 Units')
self.Bind(wx.EVT_MENU,self.OnScrRt, id=230)
menu.Append(235, '&Plot Reset', 'Reset to original plot')
self.Bind(wx.EVT_MENU,self.OnReset, id=235)
self.mainmenu.Append(menu, '&Plot')
menu = wx.Menu()
menu.Append(300, '&About', 'About this thing...')
self.Bind(wx.EVT_MENU, self.OnHelpAbout, id=300)
self.mainmenu.Append(menu, '&Help')
self.SetMenuBar(self.mainmenu)
# A status bar to tell people what's happening
self.CreateStatusBar(1)
self.client = PlotCanvas(self)
#define the function for drawing pointLabels
self.client.SetPointLabelFunc(self.DrawPointLabel)
# Create mouse event for showing cursor coords in status bar
self.client.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
# Show closest point when enabled
self.client.Bind(wx.EVT_MOTION, self.OnMotion)
self.Show(True)
def DrawPointLabel(self, dc, mDataDict):
"""This is the fuction that defines how the pointLabels are plotted
dc - DC that will be passed
mDataDict - Dictionary of data that you want to use for the pointLabel
As an example I have decided I want a box at the curve point
with some text information about the curve plotted below.
Any wxDC method can be used.
"""
# ----------
dc.SetPen(wx.Pen(wx.BLACK))
dc.SetBrush(wx.Brush( wx.BLACK, wx.SOLID ) )
sx, sy = mDataDict["scaledXY"] #scaled x,y of closest point
dc.DrawRectangle( sx-5,sy-5, 10, 10) #10by10 square centered on point
px,py = mDataDict["pointXY"]
cNum = mDataDict["curveNum"]
pntIn = mDataDict["pIndex"]
legend = mDataDict["legend"]
#make a string to display
s = "Crv# %i, '%s', Pt. (%.2f,%.2f), PtInd %i" %(cNum, legend, px, py, pntIn)
dc.DrawText(s, sx , sy+1)
# -----------
def OnMouseLeftDown(self,event):
s= "Left Mouse Down at Point: (%.4f, %.4f)" % self.client.GetXY(event)
self.SetStatusText(s)
event.Skip() #allows plotCanvas OnMouseLeftDown to be called
def OnMotion(self, event):
#show closest point (when enbled)
if self.client.GetEnablePointLabel() == True:
#make up dict with info for the pointLabel
#I've decided to mark the closest point on the closest curve
dlst= self.client.GetClosetPoint( self.client.GetXY(event), pointScaled= True)
if dlst != []: #returns [] if none
curveNum, legend, pIndex, pointXY, scaledXY, distance = dlst
#make up dictionary to pass to my user function (see DrawPointLabel)
mDataDict= {"curveNum":curveNum, "legend":legend, "pIndex":pIndex,\
"pointXY":pointXY, "scaledXY":scaledXY}
#pass dict to update the pointLabel
self.client.UpdatePointLabel(mDataDict)
event.Skip() #go to next handler
def OnFilePageSetup(self, event):
self.client.PageSetup()
def OnFilePrintPreview(self, event):
self.client.PrintPreview()
def OnFilePrint(self, event):
self.client.Printout()
def OnSaveFile(self, event):
self.client.SaveFile()
def OnFileExit(self, event):
self.Close()
def OnPlotDraw1(self, event):
self.resetDefaults()
self.client.Draw(_draw1Objects())
def OnPlotDraw2(self, event):
self.resetDefaults()
self.client.Draw(_draw2Objects())
def OnPlotDraw3(self, event):
self.resetDefaults()
self.client.SetFont(wx.Font(10,wx.SCRIPT,wx.NORMAL,wx.NORMAL))
self.client.SetFontSizeAxis(20)
self.client.SetFontSizeLegend(12)
self.client.SetXSpec('min')
self.client.SetYSpec('none')
self.client.Draw(_draw3Objects())
def OnPlotDraw4(self, event):
self.resetDefaults()
drawObj= _draw4Objects()
self.client.Draw(drawObj)
## # profile
## start = _time.clock()
## for x in range(10):
## self.client.Draw(drawObj)
## print "10 plots of Draw4 took: %f sec."%(_time.clock() - start)
## # profile end
def OnPlotDraw5(self, event):
# Empty plot with just axes
self.resetDefaults()
drawObj= _draw5Objects()
# make the axis X= (0,5), Y=(0,10)
# (default with None is X= (-1,1), Y= (-1,1))
self.client.Draw(drawObj, xAxis= (0,5), yAxis= (0,10))
def OnPlotDraw6(self, event):
#Bar Graph Example
self.resetDefaults()
#self.client.SetEnableLegend(True) #turn on Legend
#self.client.SetEnableGrid(True) #turn on Grid
self.client.SetXSpec('none') #turns off x-axis scale
self.client.SetYSpec('auto')
self.client.Draw(_draw6Objects(), xAxis= (0,7))
def OnPlotRedraw(self,event):
self.client.Redraw()
def OnPlotClear(self,event):
self.client.Clear()
def OnPlotScale(self, event):
if self.client.last_draw != None:
graphics, xAxis, yAxis= self.client.last_draw
self.client.Draw(graphics,(1,3.05),(0,1))
def OnEnableZoom(self, event):
self.client.SetEnableZoom(event.IsChecked())
def OnEnableGrid(self, event):
self.client.SetEnableGrid(event.IsChecked())
def OnEnableLegend(self, event):
self.client.SetEnableLegend(event.IsChecked())
def OnEnablePointLabel(self, event):
self.client.SetEnablePointLabel(event.IsChecked())
def OnScrUp(self, event):
self.client.ScrollUp(1)
def OnScrRt(self,event):
self.client.ScrollRight(2)
def OnReset(self,event):
self.client.Reset()
def OnHelpAbout(self, event):
from wx.lib.dialogs import ScrolledMessageDialog
about = ScrolledMessageDialog(self, __doc__, "About...")
about.ShowModal()
def resetDefaults(self):
"""Just to reset the fonts back to the PlotCanvas defaults"""
self.client.SetFont(wx.Font(10,wx.SWISS,wx.NORMAL,wx.NORMAL))
self.client.SetFontSizeAxis(10)
self.client.SetFontSizeLegend(7)
self.client.SetXSpec('auto')
self.client.SetYSpec('auto')
def __test():
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame = TestFrame(None, -1, "PlotCanvas")
#frame.Show(True)
self.SetTopWindow(frame)
return True
app = MyApp(0)
app.MainLoop()
if __name__ == '__main__':
__test()
| mit | 579,583,106,644,383,400 | 38.30317 | 141 | 0.574416 | false | 3.732607 | false | false | false |
khchine5/lino-welfare | lino_welfare/modlib/pcsw/fixtures/std.py | 1 | 4085 | # -*- coding: UTF-8 -*-
# Copyright 2011,2013 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
"""Adds default data for `PersonGroup` and `DispenseReason`.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from lino.utils.instantiator import Instantiator, i2d
from lino.api.dd import babelkw
from lino.api import dd, rt
from lino.modlib.uploads.choicelists import Shortcuts
UPLOADTYPE_RESIDENCE_PERMIT = 1
UPLOADTYPE_WORK_PERMIT = 2
UPLOADTYPE_DRIVING_LICENSE = 3
def uploads_objects():
Recurrencies = rt.models.cal.Recurrencies
UploadType = rt.models.uploads.UploadType
kw = dict(
warn_expiry_unit=Recurrencies.monthly,
warn_expiry_value=2)
kw.update(max_number=1, wanted=True)
kw.update(dd.str2kw('name', _("Residence permit")))
# 'name', de=u"Aufenthaltserlaubnis",
# fr=u"Permis de séjour", en="Residence permit"))
yield UploadType(id=UPLOADTYPE_RESIDENCE_PERMIT, **kw)
kw.update(dd.str2kw('name', _("Work permit")))
# 'name', de=u"Arbeitserlaubnis",
# fr=u"Permis de travail", en="Work permit"))
yield UploadType(id=UPLOADTYPE_WORK_PERMIT, **kw)
kw.update(warn_expiry_value=1)
kw.update(dd.str2kw('name', _("Driving licence")))
yield UploadType(id=UPLOADTYPE_DRIVING_LICENSE, **kw)
kw.update(dd.str2kw('name', _("Identifying document")))
yield UploadType(shortcut=Shortcuts.id_document, **kw)
kw.update(max_number=-1, wanted=False)
kw.update(warn_expiry_unit='')
kw.update(dd.str2kw('name', _("Contract")))
yield UploadType(**kw)
kw.update(dd.str2kw('name', _("Medical certificate")))
# de="Ärztliche Bescheinigung",
# fr="Certificat médical",
yield UploadType(**kw)
kw.update(dd.str2kw('name', _("Handicap certificate")))
# de="Behindertenausweis",
# fr="Certificat de handicap",
yield UploadType(**kw)
kw.update(wanted=True)
kw.update(dd.str2kw('name', _("Diploma")))
yield UploadType(**kw)
kw.update(wanted=False)
kw.update(dd.str2kw('name', _("Identity card")))
# fr=u"Carte d'identité", en="Identity card"))
yield UploadType(**kw)
def objects():
from lino.api import dd, rt
pcsw = dd.resolve_app('pcsw')
#~ persongroup = Instantiator('pcsw.PersonGroup','name').build
# Auswertung / Bilan
yield pcsw.PersonGroup(ref_name='1', name=_("Evaluation"))
# Formation / Ausbildung
yield pcsw.PersonGroup(ref_name='2', name=_("Formation"))
yield pcsw.PersonGroup(ref_name='4', name=_("Search")) # Suche / Recherche
yield pcsw.PersonGroup(ref_name='4bis', name=_("Work")) # Arbeit / Travail
yield pcsw.PersonGroup(ref_name='9', name=_("Standby"))
#~ yield persongroup(u"Bilan",ref_name='1')
#~ yield persongroup(u"Formation",ref_name='2')
#~ yield persongroup(u"Recherche",ref_name='4')
#~ yield persongroup(u"Travail",ref_name='4bis')
#~ yield persongroup(u"Standby",ref_name='9',active=False)
yield pcsw.DispenseReason(**babelkw('name', de="Gesundheitlich", fr="Santé", en="Health"))
yield pcsw.DispenseReason(**babelkw('name', de="Studium/Ausbildung", fr="Etude/Formation", en="Studies"))
yield pcsw.DispenseReason(**babelkw('name', de="Familiär", fr="Cause familiale", en="Familiar"))
yield pcsw.DispenseReason(**babelkw('name', de="Sonstige", fr="Autre", en="Other"))
yield uploads_objects()
| agpl-3.0 | -564,761,152,002,856,900 | 35.097345 | 109 | 0.685217 | false | 3.106626 | false | false | false |
roessland/PRST | prst/utils/__init__.py | 1 | 22249 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import copy
__all__ = ["rldecode", "rlencode", "units", "mcolon", "recursive_diff", "gridtools"]
import prst.utils.gridtools
import numpy as np
import scipy.sparse
from scipy.sparse import csr_matrix
import scipy.sparse as sps
class Struct(dict):
"""
MATLAB-struct-like object.
Source: http://stackoverflow.com/questions/35988/
"""
def __init__(self, **kwargs):
super(Struct, self).__init__(**kwargs)
self.__dict__ = self
def rlencode(A, axis=0):
"""
Compute run length encoding of array A along axis.
Synopsis:
A, n = rlencode(A)
A, n = rlencode(A, axis)
Arguments:
A (np.ndarray): Array to be encoded.
axis (Optional[int]): Axis of A where run length encoding is done.
Default value: axis=0
Example (default axis):
>>> A = np.array([
... [1, 2, 3, 4],
... [1, 2, 3, 4],
... [3, 4, 5, 6],
... [3, 3, 3, 3],
... [3, 3, 4, 5],
... [3, 3, 4, 5]])
>>> A, n = rlencode(A, 0)
>>> print(A)
[[1 2 3 4]
[3 4 5 6]
[3 3 3 3]
[3 3 4 5]]
>>> print(n)
[2 1 1 2]
Example (j-axis):
>>> A = np.array([
... [1,1,3,3,3,3],
... [2,2,4,3,3,3],
... [3,3,5,3,4,4],
... [4,4,6,3,5,5]])
>>> A, n = rlencode(A, 1)
>>> print(A)
[[1 3 3 3]
[2 4 3 3]
[3 5 3 4]
[4 6 3 5]]
>>> print(n)
[2 1 1 2]
"""
# Let the relevant axis be the first axis
B = np.swapaxes(A, 0, axis)
# Flatten axes that are normal to the encoding axis
B = B.reshape([B.shape[0],-1])
# Pick indices where the next index is different
i = np.append(np.where(np.any(B[:-1] != B[1:], axis=1)), B.shape[0]-1)
# Find the number of repetitions
n = np.diff(np.insert(i, 0, -1))
# Pick necessary slices of the encoding axis
return A.take(i, axis=axis), n
def rldecode(A, n, axis=0):
"""
Decompresses run length encoding of array A along axis.
Synopsis:
B = rldecode(A, n, axis)
B = rldecode(A, n) # axis assumed to be 0
Arguments:
A (np.ndarray): Encoded array
n (np.ndarray): Repetition of each layer along an axis.
axis (Optional[int]): Axis of A where run length decoding is done.
Returns:
Uncompressed matrix
Example (1D-array) along default axis:
>>> A = np.array([1,4,5])
>>> n = np.array([4,2,1])
>>> print(rldecode(A, n))
[1 1 1 1 4 4 5]
Example (2D-array) along j-axis:
>>> A = np.array([
... [1,3,3,3],
... [2,4,3,3],
... [3,5,3,4],
... [4,6,3,5]])
>>> n = np.array([2,1,1,2])
>>> print(rldecode(A, n, axis=1))
[[1 1 3 3 3 3]
[2 2 4 3 3 3]
[3 3 5 3 4 4]
[4 4 6 3 5 5]]
"""
assert n.size > 0, "Length array was empty."
# repeat functions take 1d array
if n.ndim != 1:
assert n.ndim <= 2
assert n.shape[0] == 1 or n.shape[1] == 1
n = n.ravel()
return A.repeat(n, axis=axis)
def mcolon(lo, hi, s=None):
"""
Compute concatenated ranges.
Synopsis:
mcolon(lo, hi)
mcolon(lo, hi, stride)
Arguments:
lo (ndarray):
1d array of lower bounds
hi (ndarray):
1d array of upper bounds
s (Optional[ndarray]):
1d array of strides. Default = np.ones(lo.shape) (unit strides).
Returns:
np.r_[lo[0]:hi[0], ..., lo[-1]:hi[-1]]
np.r_[lo[0]:hi[0]:s[0], ..., lo[-1]:hi[-1]:s[-1]]
(The NumPy r_ index trick builds a concatenated array of ranges.)
Example:
>>> lo = np.array([0,0,0,0])
>>> hi = np.array([2,3,4,5])
>>> ind = mcolon(lo, hi)
>>> np.array_equal(ind, np.array([0,1,0,1,2,0,1,2,3,0,1,2,3,4]))
True
"""
if s is None:
ranges = [range(l,h) for (l,h) in zip(lo,hi)]
else:
ranges = [range(l,h,st) for (l,h,st) in zip(lo,hi,s)]
return np.concatenate(ranges)
def recursive_diff(A, B, indent=0):
"""
Shows which attributes differ between two objects. Recursive.
Synopsis:
recursive_diff(A, B)
Example:
>> from prst.gridprocessing import cartGrid
>> G, V = cartGrid([3,3,3]), cartGrid([3,3,4])
>> recursive_diff(G, V)
====== Recursive comparison ======
gridType
Equal, (list,list)
cells
facePos
NOT EQUAL, (ndarray,ndarray)
num
NOT EQUAL, (int,int)
indexMap
NOT EQUAL, (ndarray,ndarray)
...
"""
def pprint(*args, **kwargs):
print(" "*indent, *args, **kwargs)
if indent == 0:
print()
print("====== Recursive comparison ======")
# For classes, try to get dict attribute
try:
A = A.__dict__
except:
pass
try:
B = B.__dict__
except:
pass
if isinstance(A, dict) and isinstance(B, dict):
# Descend into attributes which exist in both and are dicts. Print them first.
pass
inA = set(A.keys())
inB = set(B.keys())
notInA = inB - inA
notInB = inA - inB
inBoth = inA & inB
# Print attributes only in A
if notInA:
pprint("A MISSING ATTRIBUTES:", notInA)
# Print attributes only in B
if notInB:
pprint("B MISSING ATTRIBUTES:", notInB)
# Recursively do the same with common attributes
for attr in inBoth:
pprint(attr)
recursive_diff(A[attr], B[attr], indent+2)
else:
# Compare A, B for equality
equal = False
try:
equal = None
close = None
if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):
equal = np.array_equal(A, B)
close = np.allclose(A, B)
else:
equal = A == B
if equal:
pprint("Equal, ", end="")
else:
pprint("NOT EQUAL, ", end="")
if close:
print("(BUT APPROXIMATELY EQUAL)", end="")
except:
pprint("NOT COMPARABLE, ", end="")
print("("+A.__class__.__name__+","+B.__class__.__name__+")")
class ADI(object):
"""ADI: Automatic DIfferentiation
Simple implementation of automatic differentiation for easy construction
of Jacobian matrices.
Synopsis:
x = ADI(value, jacobian)
Arguments:
value(np.ndarray):
The numerical value of the object. Must be a NumPy column array.
Not compatible with matrices (neither np.matrix nor
scipy.sparse.spmatrix).
jacobian(list[scipy.sparse.csr_matrix]):
The Jacobian of the object. Split into parts to improve
performance.
Comment:
This class is typically instantiated for a set of variables using
initVariablesADI, not by itself.
Many methods found in `np.ndarray` are also implemented by ADI. Example:
x, = initVariablesADI(np.array([[2, 3, 4]]).T)
y = x.log()
z = x.sum()
Using "np." methods is not supported yet, e.g., `np.dot(A, x)` where x
is an ADI object will not work as expected, and is not recommended. A
compatability layer, `prst.utils.npad` is provided. `npad.dot(A, x)`
will work correctly for any number of AD arguments, and uses `np.dot(A,
x)` if neither arguments are AD objects. Future versions of NumPy
(>0.12) will most likely deprecate `npad` with the __numpy_ufunc__
functionality.
See also:
initVariablesADI
"""
# Requires __numpy_ufunc__ for syntactical sugar. Hopefully will be added to NumPy 1.12...
# https://github.com/numpy/numpy/issues/7519
__array_priority__ = 10000
ndim = 2
def __init__(self, val, jac):
self.val = val
self.jac = jac
if not isinstance(self.jac, list):
self.jac = [self.jac,]
def __repr__(self):
jacstring = str([block.shape for block in self.jac])
return "(val: {0}.T, jac block sizes: {1})".format(self.val.T, jacstring)
def pprint(self, name=None):
"""
Pretty-print full matrices with limited decimals.
Example:
import numpy as np
from prst.utils import initVariablesADI
x0 = np.array([[1,2,3,2,3]]).T
x, = initVariablesADI(x0)
y = x**2
y.pprint()
Output:
ADI properties
val: [[1 4 9 4 9]].T
jac[0] [[ 2. 0. 0. 0. 0.]
[ 0. 4. 0. 0. 0.]
[ 0. 0. 6. 0. 0.]
[ 0. 0. 0. 4. 0.]
[ 0. 0. 0. 0. 6.]]
"""
namestr = ""
if name:
namestr = name + " "
lines = [
namestr + "ADI properties",
"\tval: " + str(self.val.T) + ".T",
]
for i, j in enumerate(self.jac):
lines.append("\n\tjac[" + str(i) + "]" + "\t" + str(j.toarray()).replace("\n", "\n\t\t"))
lines.append("")
print("\n".join(lines))
def copy(self):
return copy.deepcopy(self)
#def __len__(self):
#raise NotImplementedError("Use shape[0]. See http://stackoverflow.com/questions/37529715/")
@property
def shape(self):
return self.val.shape
def __ge__(u, v):
try:
return u.val >= v.val
except AttributeError:
return u.val >= v
def __gt__(u, v):
try:
return u.val > v.val
except AttributeError:
return u.val > v
def __le__(u, v):
try:
return u.val <= v.val
except AttributeError:
return u.val <= v
def __lt__(u, v):
try:
return u.val < v.val
except AttributeError:
return u.val < v
def __pos__(u): # +u
return u.copy()
def __neg__(u): # -u
return ADI(-u.val, [-j for j in u.jac])
def __add__(u, v): # u + v
if isinstance(v, ADI):
if u.val.shape[0] == v.val.shape[0]:
return ADI(u.val + v.val, [ju+jv for (ju,jv) in zip(u.jac, v.jac)])
if v.val.shape[0] == 1:
# Tile v.jac to same length as u.jac since sparse matrices
# don't broadcast properly.
# https://github.com/scipy/scipy/issues/2128
vjac = [sps.bmat([[j]]*len(u.val)) for j in v.jac]
retjac = [ju+jv for (ju,jv) in zip(u.jac, vjac)]
return ADI(u.val+v.val, retjac)
if u.val.shape[0] == 1:
# Vice versa, this time tile u instead
ujac = [sps.bmat([[j]]*len(v.val)) for j in u.jac]
retjac = [ju+jv for (ju,jv) in zip(ujac, v.jac)]
return ADI(u.val+v.val, retjac)
raise ValueError("Dimension mismatch")
# v isn't AD object
v = np.atleast_2d(v)
return ADI(u.val + v, copy.deepcopy(u.jac))
def __radd__(v, u): # u + v
return v.__add__(u)
def __sub__(u, v):
return u.__add__(-v)
def __rsub__(v, u): # u - v
return (-v).__add__(u)
# mul
def __mul__(u, v):
"""Hadamard product u*v."""
if isinstance(v, ADI):
if len(u.val) == len(v.val):
# Note: scipy.sparse.diags has changed parameters between
# versions 0.16x and 0.17x. This code is only tested on 0.16x.
# TODO test code in SciPy 0.17x
uJv = [sps.diags([u.val.flat],[0])*jv for jv in v.jac] # MATRIX multiplication
vJu = [sps.diags([v.val.flat],[0])*ju for ju in u.jac] # MATRIX multiplication
jac = [a+b for (a,b) in zip(uJv, vJu)]
return ADI(u.val*v.val, jac)
if len(v.val) == 1:
# Fix dimensions and recurse
vval = np.tile(v.val, (u.val.shape[0],1) )
vjac = [sps.bmat([[j]]*len(u.val)) for j in v.jac]
return u.__mul__(ADI(vval, vjac))
if len(u.val) == 1:
# Fix dimensions and recurse
uval = np.tile(u.val, (v.val.shape[0],1) )
ujac = [sps.bmat([[j]]*len(v.val)) for j in u.jac]
return ADI(uval, ujac).__mul__(v)
raise ValueError("Dimension mismatch")
else:
v = np.atleast_2d(v)
if len(u.val) == 1:
val = u.val * v
jac = [sps.diags(v.flat,0)*sps.bmat([[j]]*len(v)) for j in u.jac]
return ADI(val, jac)
if len(v) == 1:
return ADI(u.val*v, [v.flat[0]*ju for ju in u.jac])
if len(u.val) == len(v):
vJu = [sps.diags(v.flat, 0)*ju for ju in u.jac] # MATRIX multiplication
return ADI(u.val*v, vJu)
raise ValueError("Dimension mismatch")
def __rmul__(v, u):
# u * v = v * u
return v.__mul__(u)
def dot(u, A): # u x A
return _dot(u, A)
def __pow__(u, v):
return u._pow(u, v)
# This method is static so that it can be called with non-ADI u
# E.g. when calculating 2**u, where u is ADI.
@staticmethod
def _pow(u, v):
"""Elementwise power, u**v."""
if not isinstance(v, ADI): # u is AD, v is a scalar or vector
v = np.atleast_2d(v)
tmp = v*u.val**(v-1)
uvJac = [_spdiag(tmp)*ju for ju in u.jac]
return ADI(u.val**v, uvJac)
elif not isinstance(u, ADI): # u is a scalar, v is AD
u = np.atleast_2d(u)
tmp = u**v.val*np.log(u)
uvJac = [sps.diags(tmp.flat, 0)*jv for jv in v.jac]
return ADI(u**v.val, uvJac)
else: # u and v are ADI objects of same length
if len(u.val) != len(v.val):
raise ValueError("Must be same length")
# d(u^v)/dx = diag(u^v o (v / u))*
# + diag(u^v o log(u))*J
tmp1 = u.val**v.val * v.val/u.val
tmp2 = u.val**v.val * np.log(u.val)
uvJacPart1 = [sps.diags(tmp1.flat, 0)*ju for ju in u.jac]
uvJacPart2 = [sps.diags(tmp2.flat, 0)*jv for jv in v.jac]
uvJac = [a+b for (a,b) in zip(uvJacPart1, uvJacPart2)]
return ADI(u.val**v.val, uvJac)
def __rpow__(v, u):
"""u**v where u is not ADI."""
return v._pow(u, v)
def __div__(u, v):
raise DeprecationWarning("Add 'from __future__ import division'.")
def __truediv__(u, v):
return u * v**(-1.0)
def __rdiv__(v, u):
raise DeprecationWarning("Add 'from __future__ import division'.")
def __rtruediv__(v, u):
return u * v**(-1.0)
def __getitem__(u, s):
"""
Slices the column array using NumPy syntax.
Examples: (x is ADI object)
x[(2,1),:]
x[1]
x[1,:]
x[np.array([True,True,False])]
x[np.array([False,False,False]),:]
x[np.array([2,1,0]),:]
x[np.array([2]),:]
x[::-1]
"""
val = np.atleast_2d(u.val[s])
if val.shape[0] != 1 and val.shape[1] != 1:
raise ValueError("Slice type not supported")
if val.shape[1] != 1:
val = val.T
try:
s = s[0]
except TypeError:
pass
jac = [j[s,:] for j in u.jac]
return ADI(val, jac)
def __setitem__(u, s, v):
"""
Sets values in ADI vector.
If the right side is non-ADI, the corresponding Jacobian rows are set to zero.
If the right side is ADI, the corresponding Jacobian rows are overwritten.
"""
if isinstance(v, ADI):
# This part is not so pretty, and could probably
# be improved.
if u.val[s].ndim <= 1:
u.val[s] = v.val.ravel()
elif u.val[s].ndim == 2:
u.val[s] = v.val
else:
raise ValueError("This should never happen.")
try:
s = s[0]
except TypeError:
pass
for i in range(len(u.jac)):
u.jac[i][s] = v.jac[i]
else:
u.val[s] = v
try:
s = s[0]
except TypeError:
pass
for i in range(len(u.jac)):
u.jac[i][s] = 0
def max(u):
"""Return the maximum element in the array."""
i = np.argmax(u.val)
return ADI(np.atleast_2d(u.val[i,:]), [j[i,:] for j in u.jac])
def min(u):
"""Return the minimum element in the array."""
i = np.argmin(u.val)
return ADI(np.atleast_2d(u.val[i,:]), [j[i,:] for j in u.jac])
def sum(u):
"""Return the sum of the array elements."""
val = u.val.sum(keepdims=True)
jac = [sps.csr_matrix(j.sum(axis=0)) for j in u.jac]
return ADI(val, jac)
def sin(u):
"""Return element-wise sine of array."""
val = np.sin(u.val)
cosval = np.cos(u.val)
jac = [sps.diags(cosval.flat, 0)*j for j in u.jac]
return ADI(val, jac)
def cos(u):
"""Return element-wise cosine of array."""
val = np.cos(u.val)
msinval = -np.sin(u.val)
jac = [sps.diags(msinval.flat, 0)*j for j in u.jac]
return ADI(val, jac)
def exp(u):
val = np.exp(u.val)
jac = [sps.diags(val.flat, 0)*j for j in u.jac]
return ADI(val, jac)
def log(u):
val = np.log(u.val)
m = sps.diags((1/u.val).flat, 0)
jac = [m*j for j in u.jac]
return ADI(val, jac)
def sign(u):
return np.sign(u.val)
def abs(u):
val = np.abs(u.val)
sgn = np.sign(u.val)
jac = [sps.diags(sgn.flat, 0)*j for j in u.jac]
return ADI(val, jac)
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
"""Placeholder method for future NumPy versions."""
raise NotImplementedError("NumPy has finally added __numpy_ufunc__ support, but "
"PRST has not added support yet.")
# NumPy binary ufunc wrappers
def _dot(u, v):
"""Matrix multiplication."""
if isinstance(u, ADI) and isinstance(v, ADI):
# u_ad, v_ad
assert u.val.shape[0] == v.val.shape[0] == 1, "dot(ad,ad) only valid for 1x1 arguments"
return u * v
elif isinstance(u, ADI) and not isinstance(v, ADI):
# u_ad, v
v = np.atleast_2d(v)
assert v.shape[0] == 1, "dot(ad,vec) only valid for 1x1 vec."
return u*v
elif not isinstance(u, ADI) and isinstance(v, ADI):
# u, v_ad
if not hasattr(u, "dot"):
u = np.atleast_2d(u)
u_sp = sps.csr_matrix(u)
return ADI(u.dot(v.val), [u_sp*j for j in v.jac])
else:
# u, v
if hasattr(u, "dot"):
return u.dot(v)
return np.dot(u, v)
def _tile(A, reps):
if isinstance(A, ADI):
if len(reps) != 2 or reps[1] != 1:
raise TypeError("AD vectors can only be tiled vertically.")
val = np.tile(A.val, reps)
jac = [sps.bmat([[j]]*reps[0]) for j in A.jac]
return ADI(val, jac)
else:
return np.tile(A, reps)
# Numpy unary ufunc wrappers
# The unary wrappers are all following the same formula, and can possibly be
# removed entirely by making `npad` more magic with __getattr__.
def _sign(u):
if isinstance(u, ADI):
return u.sign()
else:
return np.sign(u)
def _abs(u):
"""np.abs for AD array."""
if isinstance(u, ADI):
return u.abs()
else:
return np.abs(u)
def _exp(u):
"""np.exp for AD array."""
if isinstance(u, ADI):
return u.exp()
else:
return np.abs(u)
# NumPy n-ary functions
def _vstack(tup):
"""np.vstack for AD array."""
vals = np.vstack((u.val for u in tup))
jacs = []
num_jacs = len(tup[0].jac)
for j in range(num_jacs):
jacs.append(sps.bmat([[u.jac[j]] for u in tup]))
return ADI(vals, jacs)
def _concatenate(tup, axis):
"""np.concatenate for AD array."""
if axis != 0:
raise TypeError("ADI objects can only be concatenated vertically.")
return _vstack(tup)
# Register ufunc wrappers so they can be easily imported.
npad = Struct()
# n-ary
npad.vstack = _vstack
npad.concatenate = _concatenate
# binary
npad.dot = _dot
npad.tile = _tile
# unary
npad.sign = _sign
npad.abs = _abs
def initVariablesADI(*variables):
"""
Returns AD (automatic differentiation) variables.
See `help(prst.utils.ADI)` for documentation.
"""
# Convert all inputs to column arrays
vals = list(variables)
for i in range(len(vals)):
vals[i] = np.atleast_2d(vals[i])
if vals[i].shape[1] == 0:
vals[i] = vals[i].reshape(-1,1)
elif vals[i].shape[1] != 1:
raise ValueError("AD variables must be column vectors.")
numvals = np.array([len(val) for val in vals])
n = len(vals)
ret = [None]*n
for i in range(n):
nrows = numvals[i]
# Set Jacobians wrt other variables to zero-matrices
jac = [None]*n
for j in np.r_[0:i, (i+1):n]:
ncols = numvals[j]
jac[j] = scipy.sparse.csr_matrix((nrows, ncols))
# Set Jacobian of current variable wrt itself to the identity matrix.
jac[i] = scipy.sparse.identity(nrows, format="csr")
ret[i] = ADI(vals[i], jac)
return ret
def _spdiag(val_column):
"""Improved version of scipy.sparse.diags."""
if val_column.shape[0] == 0:
return sps.csr_matrix((1,0))
return sps.diags(val_column.flat, 0, format="csr")
| gpl-3.0 | 1,775,324,393,358,209,500 | 29.311989 | 101 | 0.499573 | false | 3.29566 | false | false | false |
dondamage/SPyTS | tmp/TsPacket.py | 1 | 2806 | import TsPacketError
class TsPacket(object):
"""
A wrapper class to represent an MPEG2-TS packet.
"""
TS_PKT_LEN_188 = 188
TS_PKT_LEN_204 = 204
def __init__(self, pkt):
if isinstance(pkt, bytes):
if len(pkt) in (TsPacket.TS_PKT_LEN_188, TsPacket.TS_PKT_LEN_204):
self._content = pkt
self._header = self._content[:4]
self._payload = self._content[4:]
else:
raise TsPacketError.TsPacketError("Invalid length of bytes object.")
else:
raise TypeError("Argument must be a bytes object.")
def _bytes_to_int(self, b, endianness="big"):
return int.from_bytes(b, endianness)
def get_header(self):
"""Return only the header."""
return self._header
def get_payload(self):
"""Return only the payload."""
return self._payload
def get_sync(self):
"""Return sync byte."""
sync = self._header[0]
return sync
def get_tei(self):
"""Return TEI flag."""
tei = (self._bytes_to_int(self._header[1:1+1]) & 0x80) >> 7
return tei
def get_pusi(self):
"""Return PUSI flag."""
pusi = (self._bytes_to_int(self._header[1:1+1]) & 0x40) >> 6
return pusi
def get_tp(self):
"""Return TP flag."""
tp = (self._bytes_to_int(self._header[1:1+1]) & 0x20) >> 5
return tp
def get_pid(self):
"""Return PID."""
pid = (self._bytes_to_int(self._header[1:2+1]) & 0x1FFF) >> 0
return pid
def get_tsc(self):
"""Return TSC."""
tsc = (self._bytes_to_int(self._header[3:3+1]) & 0xC0) >> 6
return tsc
def get_afc(self):
"""Return AFC."""
afc = (self._bytes_to_int(self._header[3:3+1]) & 0x30) >> 4
return afc
def get_cc(self):
"""Return CC."""
cc = (self._bytes_to_int(self._header[3:3+1]) & 0x0F)
return cc
def get_af(self):
"""Return the adaptation field as an immutable bytes object, if present."""
if self.get_afc() >= 2:
af_len = self._payload[0]
if af_len > 0:
return bytes(self._payload[1:af_len+2])
else:
return bytes(0)
else:
return None
def get_pcr_flag(self):
"""Return value of the PCR flag of TS packet pkt, if present."""
pcr_flag = None
if self.get_afc() >= 2:
af = self.get_af()
af_length = af[0]
if af_length > 0:
pcr_flag = (af[1] & 0x10) >> 4
return pcr_flag
def get_pcr(self):
"""Return value of the PCR field of TS packet pkt, if present."""
pcr = None
if (self.get_pcr_flag == 1):
pcr_base = int.from_bytes((self._content[1:6] & 0xFFFFFFFF80) >> 7, "big")
pcr_reserved = int.from_bytes((self._content[5:6] & 0x7E) >> 1, "big")
pcr_extension = int.from_bytes((self._content[5:7] & 0x1FF), "big")
pcr = pcr_base*300 + pcr_extension
return pcr
| gpl-2.0 | 1,291,673,668,577,762,300 | 26.792079 | 80 | 0.572701 | false | 2.988285 | false | false | false |
WilliamDASILVA/TheMysteryOfSchweitzer | interfaces/DialogInterface.py | 1 | 1998 | from engine import Global;
from engine.Interface import Interface;
from engine.render.image import Image;
from engine.render.text import Text;
from engine.render.sprite import Sprite;
# --------------------------------------------------- *\
# [class] DialogInterface()
#
# * The dialog interface *
#
# --------------------------------------------------- */
class DialogInterface(Interface):
# --------------------------------------------------- *\
# [function] __init__()
#
# --------------------------------------------------- */
def __init__(self):
super().__init__();
self.define();
# --------------------------------------------------- *\
# [function] define()
#
# * Define the elements *
# Return : nil
# --------------------------------------------------- */
def define(self):
sX = Global.screenSize[0];
sY = Global.screenSize[1];
self.elements['background'] = Image("assets/darker.png");
self.elements['background'].setPosition(0, sY/2);
self.elements['background'].setSize(sX, sY/2);
self.elements['characterFace'] = Image("assets/characters/test/face.png");
self.elements['characterFace'].setPosition(0.05*sX, 0.78 * sY);
self.elements['characterFace'].setSize(0.1*sX, 0.1*sX);
self.elements['characterFace'].setDepth(5);
self.elements['author'] = Text("None", "arial");
self.elements['author'].setPosition(0.2*sX,0.82*sY);
self.elements['author'].setColor((255,255,255));
self.elements['author'].setFontSize(20);
self.elements['author'].setDepth(5);
self.elements['text'] = Text("None", "arial");
self.elements['text'].setPosition(0.2*sX,0.87*sY);
self.elements['text'].setColor((255,255,255));
self.elements['text'].setFontSize(16);
self.elements['text'].setDepth(5);
self.elements['help'] = Text("( Press 'Enter' to skip )", "arial");
self.elements['help'].setPosition(0.2*sX,0.92*sY);
self.elements['help'].setColor((200,200,200));
self.elements['help'].setFontSize(12);
self.elements['help'].setDepth(5);
| mit | 1,338,667,157,344,302,000 | 32.864407 | 76 | 0.560561 | false | 3.25938 | false | false | false |
madscatt/sasmol | src/python/test_sasmol/test_sassubset/test_intg_sassubset_Mask_get_dihedral_subset_mask.py | 1 | 15098 | '''
SASMOL: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from sasmol.test_sasmol.util import env
"""
Integration test for sasio.sassubset.Mask.get_dihedral_subset_mask
contract:
null test by reading a nonexisting pdb, mask none
3 residues, mask none due to empty list
3 residues, mask none due to wrong flexible_residues list
3 residues, mask none due to illegal flexible_residues list
3 residues, mask 1st
3 residues, mask 2nd
3 residues, mask 3rd
3 residues, mask 1st and 2nd
3 residues, mask 1st and 3rd
3 residues, mask 3rd and 2nd
3 residues, mask all
3 residues with duplicate resid in 2 chains, mask 1st (both chains will be masked, what about if we only need to mask one?)
small protein (crambin with 46aa), randomly mask [12, 36, 46, 18, 8]
small protein (crambin with 46aa), mask all
large protein complex (groel with 526*14 residues), randomly mask [12, 36, 46, 18, 8] (Skipped as SASSIE_LARGETEST)
large protein complex (groel with 526*14 residues), mask all (Skipped as SASSIE_LARGETEST)
rna molecule, mask randomly 3 residue dihedrals
rna molecule, mask all residues
problemetic pdb (1PSI wih unpaird MODEL/ENDMDL)
"""
from unittest import main,skipIf
from mocker import Mocker, MockerTestCase, ARGS
import sasmol.sasmol as sasmol
import sasmol.sassubset as sassubset
import numpy
import os
PdbDataPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','pdb_common')+os.path.sep
class Test_intg_sassubset_Mask_get_dihedral_subset_mask(MockerTestCase):
def setUp(self):
self.o=sasmol.SasMol(0)
def assert_list_almost_equal(self,a,b,places=5):
if (len(a)!=len(b)):
raise TypeError
else:
for i in range(len(a)):
if isinstance(a[i],(int,float,numpy.generic)):
if (numpy.isnan(a[i]) and numpy.isnan(b[i])): continue
self.assertAlmostEqual(a[i],b[i],places)
else:
self.assert_list_almost_equal(a[i],b[i],places)
def hard_wired_get_single_dihedral_subset_mask(self, o, nfr, mtype):
farray = [0]*len(o.name())
for i in range(len(o.name())):
name = o.name()[i]
resid = o.resid()[i]
if mtype==0:
if (resid==nfr-1 and name=='C') or (resid == nfr and name in ['N','CA','C']) or (resid==nfr+1 and name=='N'):
farray[i]=1
elif mtype==1:
if (resid==nfr-1 and name=="O3'") or (resid == nfr and name in ["P","O5'","C5'","C4'","C3'","O3'"]) or (resid==nfr+1 and name=='P') or (resid==nfr+1 and name=="O5'"):
farray[i]=1
return farray
def hard_wired_get_all_dihedral_subset_mask(self, o, flexible_residues, mtype):
farray = []
for nfr in flexible_residues:
farray.append(self.hard_wired_get_single_dihedral_subset_mask(o, nfr, mtype))
return farray
def test_null(self):
'''
null test
read a nonexisting pdb
'''
#
try:
self.o.read_pdb(PdbDataPath+'XXX.pdb')
except Exception:
pass
#
#
flexible_residues = []
mtype=0
with self.assertRaises(AttributeError):
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
def test_3AAD_mask_none_empty_flexible_residues(self):
'''
test a pdb file with 3 residue
mask none of the residue dihedral due to empty flexible residue list
'''
#
self.o.read_pdb(PdbDataPath+'3AAD.pdb')
#
flexible_residues = []
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_3AAD_mask_none_wrong_flexible_residues(self):
'''
test a pdb file with 3 residue
mask none of the residue dihedral due to wrong flexible residue list
'''
#
self.o.read_pdb(PdbDataPath+'3AAD.pdb')
#
flexible_residues = [1,2]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_3AAD_mask_none_illegal_flexible_residues(self):
'''
test a pdb file with 3 residue
mask none of the residue dihedral due to wrong flexible residue list
'''
#
self.o.read_pdb(PdbDataPath+'3AAD.pdb')
#
flexible_residues = [1,'a']
mtype=0
with self.assertRaises(ValueError):
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
def test_3AAD_mask_1st(self):
'''
test a pdb file with 3 residue
mask the 1st residue dihedral
'''
#
self.o.read_pdb(PdbDataPath+'3AAD.pdb')
#
flexible_residues = [515]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_3AAD_mask_2nd(self):
'''
test a pdb file with 3 residue
mask the 2nd residue dihedral
'''
#
self.o.read_pdb(PdbDataPath+'3AAD.pdb')
#
flexible_residues = [516]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_3AAD_mask_1st_2nd(self):
'''
test a pdb file with 3 residue
mask the 1st and 2nd residue dihedral
'''
#
self.o.read_pdb(PdbDataPath+'3AAD.pdb')
#
flexible_residues = [515,516]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_3AAD_mask_1st_3nd(self):
'''
test a pdb file with 3 residue
mask the 1st and 3rd residue dihedral
'''
#
self.o.read_pdb(PdbDataPath+'3AAD.pdb')
#
flexible_residues = [515,517]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_3AAD_mask_3rd_2nd(self):
'''
test a pdb file with 3 residue
mask the 3rd and 2nd residue dihedral
'''
#
self.o.read_pdb(PdbDataPath+'3AAD.pdb')
#
flexible_residues = [515,516]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_3AAD_mask_all(self):
'''
test a pdb file with 3 residue
mask the all residue dihedrals
'''
#
self.o.read_pdb(PdbDataPath+'3AAD.pdb')
#
flexible_residues = [515,516,517]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_3AAD_2chains_mask_1st(self):
'''
test a pdb file with 3 residues with duplicating resid in 2 chains
mask the 1st residue dihedral
'''
#
self.o.read_pdb(PdbDataPath+'3AAD-2chain.pdb')
#
flexible_residues = [515]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_1CRN_mask_random_5(self):
'''
test a small protein (crambin)
mask randomly 5 residue dihedrals
'''
#
self.o.read_pdb(PdbDataPath+'1CRN.pdb')
#
flexible_residues = [12, 36, 46, 18, 8]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_1CRN_mask_all(self):
'''
test a small protein (crambin)
mask the all residue dihedrals
'''
#
self.o.read_pdb(PdbDataPath+'1CRN.pdb')
#
flexible_residues = range(1,46)
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
@skipIf(os.environ['SASSIE_LARGETEST']=='n',"I am not testing huge files")
def test_1KP8_mask_random_5(self):
'''
test a large protein complex (groel)
mask randomly 5 residue dihedrals
'''
#
self.o.read_pdb(PdbDataPath+'1KP8.pdb')
#
flexible_residues = [12, 36, 46, 18, 8]
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
@skipIf(os.environ['SASSIE_LARGETEST']=='n',"I am not testing huge files. It will take 5 min")
def test_1KP8_mask_all(self):
'''
test a large protein complex (groel)
mask the all residue dihedrals
'''
#
self.o.read_pdb(PdbDataPath+'1KP8.pdb')
#
flexible_residues = range(2,527)
mtype=0
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_rna_mask_random(self):
'''
test a rna molecule
mask randomly 3 residue dihedrals
'''
#
self.o.read_pdb(PdbDataPath+'rna.pdb')
#
flexible_residues = [1, 7, 13]
mtype=1
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
@skipIf(os.environ['SASSIE_LARGETEST']=='n',"I am not testing huge files. It will take 5 min")
def test_rna_mask_all(self):
'''
test a rna molecule
mask all residue dihedrals
'''
#
self.o.read_pdb(PdbDataPath+'rna.pdb')
#
flexible_residues = range(1,25)
mtype=1
result_farray = self.o.get_dihedral_subset_mask(flexible_residues,mtype)
#
expected_farray = self.hard_wired_get_all_dihedral_subset_mask(self.o, flexible_residues, mtype)
#
print 'result_mask:\n', result_farray, '\nexpected_mask:\n',expected_farray
self.assertTrue(isinstance(result_farray, numpy.ndarray))
self.assert_list_almost_equal(result_farray, expected_farray)
def test_1PSI(self):
'''
test a pdb which will not be read successfully by read_pdb
assertRaises
'''
#
try:
self.o.read_pdb(PdbDataPath+'1PSI.pdb')
except Exception:
pass
#
flexible_residues = range(2,527)
mtype=0
#
with self.assertRaises(Exception):
self.o.get_dihedral_subset_mask(flexible_residues,mtype)
def tearDown(self):
pass
if __name__ == '__main__':
main()
| gpl-3.0 | -5,796,164,659,144,685,000 | 33.158371 | 178 | 0.647437 | false | 3.20552 | true | false | false |
barjacks/pythonrecherche | 07 Selenium, more beautifulsoup/zh_wie_neu.py | 1 | 1387 |
# coding: utf-8
from bs4 import BeautifulSoup
import pandas as pd
import requests
import time
import progressbar
bar = progressbar.ProgressBar()
lst = []
lst_pass = []
for elem,i in zip(range(1697,13000), bar((range(1697,13000)))):
url = "https://www.zueriwieneu.ch/report/" + str(elem)
response = requests.get(url)
züri_soup = BeautifulSoup(response.text, 'html.parser')
if züri_soup.find('h1').text != 'Melden Sie Schäden an der Infrastruktur von Zürich':
Mini_dict = {
'Kategorie' : züri_soup.find('h1').text,
'Meldedatum' : züri_soup.find('div', {'class':'problem-header clearfix'}).find('p').text.strip(),
'Meldung' : züri_soup.find('div', {'class':'problem-header clearfix'}).find_all('p')[1],
'Antwortdatum' : züri_soup.find('ul', {'class':'item-list item-list--updates'}).find_all('p')[0].text,
'Antwort' : züri_soup.find('ul', {'class':'item-list item-list--updates'}).find_all('p')[1].text,
'URL' : url,
'Lat' : float(züri_soup.find('div', {'id':'js-map-data'}).get('data-latitude')),
'Long': float(züri_soup.find('div', {'id':'js-map-data'}).get('data-longitude'))
}
lst.append(Mini_dict)
else:
lst_pass.append(url)
date = time.strftime("%Y-%m-%d%H:%M:%S")
pd.DataFrame(lst).to_csv(date+'züriwieneu.csv')
| mit | -5,546,033,929,979,838,000 | 37.194444 | 114 | 0.599273 | false | 2.852697 | false | false | false |
ActiveState/code | recipes/Python/223610_Yet_another_way_use_fields/recipe-223610.py | 1 | 2458 | import types
class FieldNameError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return `"""Field name '%s' does not exist""" % self.value`
class fRow(tuple):
# class for each row
def __getattr__(self,i):
return tuple.__getitem__(self,self.__Field2Index__(i))
def __call__(self,i):
return tuple.__getitem__(self,self.__Field2Index__(i))
def __getitem__(self,i):
if type(i) != types.SliceType:
return tuple.__getitem__(self,self.__Field2Index__(i))
else:
if i.start is not None and i.stop is not None:
return self[self.__Field2Index__(i.start):self.__Field2Index__(i.stop)]
elif i.start is None:
return self[:self.__Field2Index__(i.stop)]
elif i.stop is None:
return self[self.__Field2Index__(i.start):]
else:
return self[:]
def __Field2Index__():
return None
class fRowset(list):
# list to hold the rows
def __init__(self,rowset,description):
# save the description as is
self.description = fRow(description)
self.description.__Field2Index__ = self.__fieldToIndex
# Create the list and dict of fields
self.fields = []
self.__fieldDict = {}
for f in range(len(description)):
if type(description[f]) == types.TupleType or type(description[f]) == types.ListType:
self.__fieldDict[description[f][0].lower()] = f
self.fields.append( description[f][0].lower())
else:
self.__fieldDict[description[f].lower()] = f
self.fields.append( description[f].lower())
# Add all the rows
for r in rowset:
self.append(r)
def append(self,new):
# Create a new record
fR = fRow(new)
# Pass it the function that looks up the index
fR.__Field2Index__ = self.__fieldToIndex
list.append(self,fR)
return
# Look up the field and return the index
def __fieldToIndex(self,field):
if type(field) == int:
return field
try:
return self.__fieldDict[field.lower()]
except:
raise FieldNameError, field
def ffetchall(cursor):
# Nice wrapper for fetchall
return fRowset(cursor.fetchall(),cursor.description)
def ffetchmany(cursor):
# Nice wrapper for fetchmany
return fRowset(cursor.fetchmany(),cursor.description)
def fquery(connection,query):
curs = connection.cursor()
curs.execute(query)
rows = fRowset(curs.fetchall(),curs.description)
curs.close()
return rows
| mit | 9,081,782,035,038,960,000 | 26.931818 | 91 | 0.635883 | false | 3.588321 | false | false | false |
geduldig/TweetBase | TweetBase/TweetCouch.py | 1 | 5890 | import couchdb
from couchdb.design import ViewDefinition
class TweetCouch(object):
def __init__(self, dbname, url=None):
try:
self.server = couchdb.Server(url=url)
self.db = self.server.create(dbname)
self._create_views()
except couchdb.http.PreconditionFailed:
self.db = self.server[dbname]
def _create_views(self):
# twitter/count_type
count_type_map = 'function(doc) { emit([doc.type, doc.id], 1); }'
count_type_reduce = 'function(keys, values) { return sum(values); }'
view = ViewDefinition('twitter', 'count_type', count_type_map, reduce_fun=count_type_reduce)
view.sync(self.db)
# The unique key for each tweet is doc.id. The key (a 64-bit long) is represented as a string because
# the largest JavaScript long is 2^53.
# A problem arises because keys are sorted as strings and a doc.id may have fewer digits but a larger
# leading digit. So, it is sorted in the wrong order.
# The solutions is to zero-pad the doc.id to fill 19 digits. (The max 64-bit long - 2^63 - has 19 digits.)
# That is why we emit the doc.id key as ("0000000000000000000"+doc.id).slice(-19).
# twitter/get_tweets
get_tweets = 'function(doc) { if (doc.type == "TWITTER_STATUS") emit(("0000000000000000000"+doc.id).slice(-19), doc); }'
view = ViewDefinition('twitter', 'get_tweets', get_tweets)
view.sync(self.db)
# twitter/get_tweets_by_date (sort by date and tweet id)
get_tweets_by_date = 'function(doc) { if (doc.type == "TWITTER_STATUS") emit((new Date(doc.created_at).getTime())+"-"+("0000000000000000000"+doc.id).slice(-19), doc); }'
view = ViewDefinition('twitter', 'get_tweets_by_date', get_tweets_by_date)
view.sync(self.db)
# twitter/get_users
get_users = 'function(doc) { if (doc.type == "TWITTER_USER") emit(doc.id, doc); }'
view = ViewDefinition('twitter', 'get_users', get_users)
view.sync(self.db)
def tweet_count(self):
for row in self.db.view('twitter/count_type', group=True, group_level=1,
startkey=['TWITTER_STATUS'], endkey=['TWITTER_STATUS',{}]):
return row['value']
return -1
def user_count(self):
for row in self.db.view('twitter/count_type', group=True, group_level=1,
startkey=['TWITTER_USER'], endkey=['TWITTER_USER',{}]):
return row['value']
return -1
def prune_tweets(self, count):
for row in self.db.view('twitter/get_tweets', limit=count, descending=False):
self.db.delete(self.db[row.id])
def compact(self):
self.db.compact()
self.db.cleanup()
def delete(self):
self.server.delete(self.db.name)
def _new_tweet_doc(self, tw, id_time):
return {
'_id': tw['id_str'],
'type': 'TWITTER_STATUS',
'coordinates': tw['coordinates']['coordinates'] if tw['coordinates'] else None,
'created_at': tw['created_at'],
'entities': tw['entities'],
'favorite_count': tw['favorite_count'],
'id': tw['id_str'],
'in_reply_to_screen_name': tw['in_reply_to_screen_name'],
'in_reply_to_status_id': tw['in_reply_to_status_id'],
'in_reply_to_user_id': tw['in_reply_to_user_id'],
'lang': tw['lang'],
'place': tw['place'],
'retweet_count': tw['retweet_count'],
'retweeted_status_id': tw['retweeted_status']['id_str'] if 'retweeted_status' in tw else None, # PARENT
'retweeted_by_list': [], # extra field containing id's of CHILD tweets
'source': tw['source'],
'text': tw['text'],
'truncated': tw['truncated'],
'user_id': tw['user']['id_str']
}
def _new_user_doc(self, user):
return {
'_id': user['id_str'],
'type': 'TWITTER_USER',
'created_at': user['created_at'],
'description': user['description'],
'entities': user['entities'] if 'entities' in user else None,
'favourites_count': user['favourites_count'],
'followers_count': user['followers_count'],
'friends_count': user['friends_count'],
'geo_enabled': user['geo_enabled'],
'id': user['id_str'],
'lang': user['lang'],
'location': user['location'],
'name': user['name'],
'profile_image_url': user['profile_image_url'],
'screen_name': user['screen_name'],
'statuses_count': user['statuses_count'],
'url': user['url'],
'utc_offset': user['utc_offset'],
'verified': user['verified']
}
# def save_tweet(self, tw, retweeted_by_id=None, save_retweeted_status=True, id_time=False):
# doc = self.db.get(tw['id_str'])
# if not doc:
# if save_retweeted_status and 'retweeted_status' in tw:
# self.save_tweet(tw['retweeted_status'], tw['id_str'])
# # NEED TO UPDATE retweet_count OF tw['retweeted_status'] ???
# self.save_user(tw['user'])
# doc = self._new_tweet_doc(tw, id_time)
# if retweeted_by_id:
# doc['retweeted_by_list'].append(retweeted_by_id)
# self.db.save(doc)
def save_tweet(self, tw, retweeted_by_id=None, save_retweeted_status=True, raw=False):
if raw:
tw['_id'] = tw['id_str']
tw['type'] = 'TWITTER_STATUS'
self.db.save(tw)
else:
# SAVE TWEET W/O USER FIELD, AND SAVE USER AS A SEPARATE RECORD
if save_retweeted_status and 'retweeted_status' in tw:
self.save_tweet(tw['retweeted_status'], tw['id_str'])
self.save_user(tw['user'])
doc = self._new_tweet_doc(tw)
if retweeted_by_id:
doc['retweeted_by_list'].append(retweeted_by_id)
self.db.save(doc)
def save_user(self, user):
if not self.db.get(user['id_str']):
doc = self._new_user_doc(user)
self.db.save(doc)
| mit | -2,338,989,167,631,813,600 | 40.478873 | 171 | 0.589643 | false | 2.967254 | false | false | false |
itucsdb1612/itucsdb1612 | Authors.py | 1 | 3560 | import psycopg2 as dbapi2
class Author:
def __init__(self,id, name, lastname, birthdate, nationality, penname,description,picture):
self.id= id
self.name= name
self.lastname=lastname
self.birthdate=birthdate
self.nationality=nationality
self.penname = penname
self.description = description
self.picture=picture
author1 = Author(None,"Ernest","Hemingway",1899,"American",None,None,"https://raw.githubusercontent.com/itucsdb1612/itucsdb1612/master/wiki_screenshots/sahalemre/Pic/hemingway.jpg")
author2 = Author(None,"Samuel","Clemens",1835,"American","Mark Twain",None,"https://raw.githubusercontent.com/itucsdb1612/itucsdb1612/master/wiki_screenshots/sahalemre/Pic/marktwain.jpg")
author3 = Author(None,"Metehan","Gültekin",1994,"Turkish",None,None,"https://raw.githubusercontent.com/itucsdb1612/itucsdb1612/master/wiki_screenshots/sahalemre/Pic/mete.jpg")
author4 = Author(None,"Ilay","Köksal",1995,"Turkish",None,None,"https://raw.githubusercontent.com/itucsdb1612/itucsdb1612/master/wiki_screenshots/sahalemre/Pic/ilay.jpg")
def insertAuthor(dsn,author):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
statement = """ INSERT INTO AUTHORS (NAME, LASTNAME, BIRTHDATE, NATIONALITY, PENNAME, DESCRIPTION,PICTURE) VALUES (%s,%s,%s,%s,%s,%s,%s)"""
cursor.execute(statement,(author.name,author.lastname,author.birthdate,author.nationality,author.penname,author.description,author.picture))
cursor.close()
def selectAuthor(dsn):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM AUTHORS ORDER BY ID ASC"""
cursor.execute(statement)
authors = cursor.fetchall()
return authors
cursor.close()
def selectAuthorbyLastName(dsn,lastname):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM AUTHORS WHERE LASTNAME = %s"""
cursor.execute(statement,(lastname))
authors = cursor.fetchall()
return authors
cursor.close()
def selectAuthorbyId(dsn,selectid):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM AUTHORS WHERE ID = %s"""
cursor.execute(statement,[selectid])
authors = cursor.fetchall()
return authors
cursor.close()
def deleteAuthor(dsn,deleteid):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
statement = """ DELETE FROM AUTHORS WHERE ID = %s"""
cursor.execute(statement,(deleteid))
cursor.close()
def updateAuthor(dsn,updateid,newauthor):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
statement = """ UPDATE AUTHORS SET NAME = %s, LASTNAME = %s, BIRTHDATE = %s, NATIONALITY = %s, PENNAME = %s DESCRIPTION = %s PICTURE = %s WHERE ID = %s"""
cursor.execute(statement,(newauthor.name,newauthor.lastname,newauthor.birthdate,newauthor.nationality,newauthor.penname,newauthor.description,newauthor.picture,updateid))
cursor.close()
def getBooksofAuthor(dsn,authorid):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
statement = """ SELECT TITLE FROM BOOKS WHERE AUTHORID = %s"""
cursor.execute(statement,(authorid,))
books = cursor.fetchall()
cursor.close()
return books | gpl-3.0 | 7,379,727,859,289,970,000 | 44.628205 | 187 | 0.676785 | false | 3.590313 | false | false | false |
yokiwhh/bookLab7 | bookapp/models.py | 1 | 1024 | from django.db import models
from django.contrib import admin
# Create your models here.
# -*- coding: utf-8 -*-
def decode(info):
return info.decode('utf-8')
class Book(models.Model):
class Meta:
verbose_name = 'book'
verbose_name_plural = verbose_name
ISBN = models.CharField('ISBN',max_length=500)
title = models.CharField('title',max_length=200)
author = models.CharField('author',max_length=60)
price = models.CharField('price',max_length=60,blank=True)
publisher = models.CharField('publisher',max_length=200,blank=True)
pubdate = models.CharField('PublishDate',max_length=60,blank=True)
def __unicode__(self):
return str(self.title)
class Author(models.Model):
class Meta:
verbose_name = 'author'
verbose_name_plural = verbose_name
Name = models.CharField('Name',max_length=60)
Age = models.IntegerField('Age')
Country = models.CharField('Country',max_length=60)
def __unicode__(self):
return str(self.Name)
| mit | -702,074,715,538,148,600 | 33.133333 | 71 | 0.669922 | false | 3.618375 | false | false | false |
vladimirdolzhenko/gflogger | scripts/calc-report.py | 1 | 5526 | #!/usr/bin/python
import sys;
def sortedKeys(dict):
keys = dict.keys()
keys.sort()
return keys
if __name__ == '__main__':
i = 0
width = 1200
height = 600
reader = open(sys.argv[1], "r")
jsname = '%s.js' % sys.argv[1]
jswriter = open(jsname, "w")
htmlwriter = open(sys.argv[1] + '.html', "w")
charts = []
colors = ["'#F00'", "'#080'", "'#00009c'", "'orange'", "'purple'", "'grey'", "'cyan'", "'black'"]
name = ''
threads = ''
messages = ''
loggerMap = {}
threadsMap = {}
messagesMap = {}
timeMap = {}
gcMap = {}
for line in reader:
#print i , " " , line
line = line.strip()
idx = i % 3
a = line.split()
if idx == 0 :
name = a[0]
threads = a[4]
messages = a[6]
loggerMap[name] = True
threadsMap[int(threads)] = True
messagesMap[int(messages)] = True
elif idx == 1 :
time = float(a[3])
key = name + '_' + threads + '_' + messages
if key not in timeMap:
timeMap[key] = []
timeMap[key].append(time)
elif idx == 2 :
if len(line) == 0:
gc = 0.0
else :
gc = float(line)
key = name + '_' + threads + '_' + messages
if key not in gcMap:
gcMap[key] = []
gcMap[key].append(gc)
i = i + 1
v = []
v.append('threads')
v.append('messages')
for name in sortedKeys(loggerMap):
v.append(name + " throughput")
v.append(name + " gc")
jswriter.write('function drawChart(){ \n')
for threads in sortedKeys(threadsMap):
v = []
v.append(str(threads))
for messages in sortedKeys(messagesMap):
v2 = list(v)
v2.append(str(messages))
for name in sortedKeys(loggerMap):
key = name + '_' + str(threads) + '_' + str(messages)
avg = sum(timeMap[key])/len(timeMap[key])
v2.append('%.2f' % (float(messages) / avg))
# into ms
avg = sum(gcMap[key])/len(gcMap[key])
v2.append('%.2f' % (1000 * avg))
for threads in sortedKeys(threadsMap):
t = str(threads)
ts = str(threads)
tname = "threads"
if threads == 1:
tname = "thread"
ts = 'single'
v = []
for messages in sortedKeys(messagesMap):
v2 = []
v2.append("'" + str(messages) + "'")
vc = ['Messages']
for name in sortedKeys(loggerMap):
key = name + '_' + str(threads) + '_' + str(messages)
avg = sum(timeMap[key])/len(timeMap[key])
v2.append('%.2f' % (float(messages) / avg))
# into ms
#v2.append('%.2f' % (1000 * gcMap[key]))
vc.append(name)
v.append('[' + ( ','.join(v2)) + ']')
logger_names = ','.join(vc)
logger_names = '';
for i in range(0, len(vc)):
type = 'number'
if i == 0:
type = 'string'
logger_names = "%s\n\
data.addColumn('%s', '%s');" % (logger_names, type, vc[i])
chartname = 'throughput_%s_chart' % t
charts.append(chartname)
jswriter.write("\t/********* %s %s **************/ \n\
//throughput \n\
data = new google.visualization.DataTable(); \n\
%s \n\
data.addRows([ \n\
%s\n\
]); \n\
\n\
chart = new google.visualization.LineChart(document.getElementById('%s')); \n\
chart.draw(data, \n\
{ \n\
width: %d, height: %d, \n\
title: 'Throughput, %s %s', \n\
hAxis: {title: 'number of messages', titleTextStyle: {color: '#000'}, logScale: true}, \n\
vAxis: {title: 'messages / ms', gridlines: {color: '#ccc', count: 8}}, \n\
legend: {position: 'right', textStyle: {color: 'black', fontSize: 10}}, \n\
colors: [%s]\n\
});\n\
" % (ts, tname, logger_names, ',\n\t\t\t'.join(v), chartname, width, height, ts, tname, ','.join(colors)))
v = []
for messages in sortedKeys(messagesMap):
v2 = []
v2.append("'" + str(messages) + "'")
vc = ['Messages']
for name in sortedKeys(loggerMap):
key = name + '_' + str(threads) + '_' + str(messages)
#v2.append('%.2f' % (float(messages) / timeMap[key]))
# into ms
avg = sum(gcMap[key])/len(gcMap[key])
v2.append('%.2f' % (1000 * avg))
vc.append(name)
v.append('[' + ( ','.join(v2)) + ']')
logger_names = '';
for i in range(0, len(vc)):
type = 'number'
if i == 0:
type = 'string'
logger_names = "%s\n\
data.addColumn('%s', '%s');" % (logger_names, type, vc[i])
chartname = 'gc_%s_chart' % t
charts.append(chartname)
jswriter.write("//gc \n\
data = new google.visualization.DataTable(); \n\
%s\n\
data.addRows([ \n\
%s \n\
]); \n\
\n\
chart = new google.visualization.LineChart(document.getElementById('%s'));\n\
chart.draw(data, \n\
{\n\
width: %d, height: %d,\n\
title: 'Total stop the world, %s %s',\n\
hAxis: {title: 'number of messages', titleTextStyle: {color: '#000'}, logScale: true},\n\
vAxis: {title: 'ms', gridlines: {color: '#ccc', count: 8}},\n\
legend: {position: 'right', textStyle: {color: 'black', fontSize: 10}},\n\
colors: [%s]\n\
});\n" % (logger_names, ',\n\t\t\t'.join(v), chartname, width, height, ts, tname, ','.join(colors)))
jswriter.write('}\n')
htmlwriter.write('<html>\n\
<body>\n\
<div id="chart"></div>\n\
%s\n\
<script type="text/javascript" src="https://www.google.com/jsapi"></script>\n\
<script type="text/javascript" src="%s"></script>\n\
<script type="text/javascript">\n\
google.load("visualization", "1", {packages:["imagelinechart", "imagechart", "corechart"]});\n\
google.setOnLoadCallback(drawChart);\n\
</script>\n\
</body> \n\
</html>\n' % ( "\n".join(map(lambda c: '<div id="%s"></div>' % c, charts)), jsname ) )
jswriter.close()
htmlwriter.close()
| apache-2.0 | -6,747,957,376,518,885,000 | 24.702326 | 108 | 0.549946 | false | 2.66185 | false | false | false |
jtraver/dev | python/hex/hex2.py | 1 | 1148 | #!/usr/bin/python
def main():
# hex1()
hex2()
def hex1():
# ba1 = bytearray.fromhex("0x69")
ba1 = bytearray.fromhex("69")
print "ba1 = %s" % str(ba1)
def hex2():
strings = []
strings.append("asclient.connection.recv 1037 BYTE = 0x21 33 '!'")
strings.append("asclient.connection.recv 1038 BYTE = 0x69 105 'i'")
strings.append("asclient.connection.recv 1039 BYTE = 0x6f 111 'o'")
strings.append("asclient.connection.recv 1040 BYTE = 0x34 52 '4'")
strings.append("asclient.connection.recv 1041 BYTE = 0x54 84 'T'")
strings.append("asclient.connection.recv 1042 BYTE = 0xcf 207 '?'")
strings.append("asclient.connection.recv 1043 BYTE = 0x29 41 ')'")
strings.append("asclient.connection.recv 1044 BYTE = 0x7a 122 'z'")
strings.append("asclient.connection.recv 1045 BYTE = 0xd2 210 '?'")
strings.append("asclient.connection.recv 1046 BYTE = 0x51 81 'Q'")
for str1 in strings:
print "str1 = %s" % str(str1)
fields1 = str1.split(" ")
for i1 in xrange(len(fields1)):
field1 = fields1[i1]
print " %s %s" % (str(i1), str(field1))
main()
| mit | 4,119,470,077,678,726,000 | 36.032258 | 71 | 0.627178 | false | 2.997389 | false | false | false |
latticelabs/Mitty | mitty/benchmarking/misalignment_plot.py | 1 | 9184 | """Prepare a binned matrix of misalignments and plot it in different ways"""
import click
import pysam
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from matplotlib.colors import LogNorm
import numpy as np
def we_have_too_many_bins(bins):
return sum([len(bb) for bb in bins]) > 5000 # This is our threshold for too many bins to compute
def autoscale_bin_size(chrom_lens, bin_cnt=100.0):
return int(sum(chrom_lens) / bin_cnt)
def compute_misalignment_matrix_from_bam(bam_fp, bin_size=None, i_know_what_i_am_doing=False):
"""Create a matrix of binned mis-alignments
:param bam_fp: input BAM
:param bin_size: size of bin in mega bases
:param i_know_what_i_am_doing: Set this to override the runtime warning of too many bins
"""
def binnify(_pos, _bins):
for n in range(1, len(_bins)):
if _pos < _bins[n]:
return n - 1
return len(_bins) - 1 # Should not get here
chrom_lens = [hdr['LN'] for hdr in bam_fp.header['SQ']]
bin_size = bin_size * 1e6 if bin_size is not None else autoscale_bin_size(chrom_lens)
bins = [np.array(range(0, hdr['LN'], bin_size) + [hdr['LN']], dtype=int) for hdr in bam_fp.header['SQ']]
if not i_know_what_i_am_doing and we_have_too_many_bins(bins):
raise RuntimeWarning('The number of bins will be very large. '
'If you are sure you want to do this, '
'use the --i-know-what-i-am-doing flag.')
bin_centers = [(bb[:-1] + bb[1:]) / 2.0 for bb in bins]
# Rows = source (correct pos) Cols = destination (aligned pos)
matrices = [[np.zeros(shape=(len(bins[j]) - 1, len(bins[i]) - 1), dtype='uint32') for i in range(len(bins))] for j in range(len(bins))]
# TAG TYPE VALUE
# XR i Aligned chromosome
# XP i Aligned pos
for r in bam_fp:
c_chrom, c_pos, a_chrom, a_pos = r.reference_id, r.pos, r.get_tag('XR'), r.get_tag('XP')
c_pos_binned, a_pos_binned = binnify(c_pos, bins[c_chrom]), binnify(a_pos, bins[a_chrom])
matrices[c_chrom][a_chrom][c_pos_binned, a_pos_binned] += 1
return chrom_lens, bins, bin_centers, matrices
def plot_genome_as_a_circle(ax, chrom_lens, chrom_gap=np.pi / 50, chrom_radius=1.0, chrom_thick=5, r_max=1.05):
"""Plot the chromosomes on a circle."""
total_len = sum(chrom_lens)
radians_per_base = (2.0 * np.pi - len(chrom_lens) * chrom_gap) / total_len # With allowance for chrom gaps
theta_stops, x_ticks, x_tick_labels = [], [], []
delta_radian = 0.01
start_radian = 0
for ch_no, l in enumerate(chrom_lens):
end_radian = start_radian + l * radians_per_base
theta = np.arange(start_radian, end_radian, delta_radian)
theta_stops.append((start_radian, end_radian))
ax.plot(theta, [chrom_radius * 1.01] * theta.size, lw=chrom_thick, zorder=-1) # , color=[.3, .3, .3])
x_ticks.append((start_radian + end_radian)/2)
x_tick_labels.append(str(ch_no + 1))
start_radian = end_radian + chrom_gap
plt.setp(ax.get_yticklabels(), visible=False)
ax.grid(False)
plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels)
ax.set_rmax(r_max)
return theta_stops
def plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops,
chrom_radius=1.0, scaling_factor=0.01):
scaling_factor *= 0.01
# http://matplotlib.org/users/path_tutorial.html
codes = [
Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
for i in range(len(bins)):
for j in range(len(bins)):
mat = matrices[i][j]
range_bp_origin, range_bp_dest = float(chrom_lens[i]), float(chrom_lens[j])
offset_origin, offset_dest = theta_stops[i][0], theta_stops[j][0]
range_origin, range_dest = theta_stops[i][1] - theta_stops[i][0], theta_stops[j][1] - theta_stops[j][0]
scale_origin, scale_dest = range_origin / range_bp_origin, range_dest / range_bp_dest
c_origin, c_dest = offset_origin + bin_centers[i] * scale_origin, offset_dest + bin_centers[j] * scale_dest
this_origin, this_dest = np.tile(c_origin, c_dest.shape[0]), np.repeat(c_dest, c_origin.shape[0])
mat_flat = mat.ravel()
idx, = mat_flat.nonzero()
for ii in idx:
t0, t1 = this_origin[ii], this_dest[ii]
this_radius = max(min(1.0, abs(t1 - t0) / np.pi), 0.05) * chrom_radius
vertices = [
(t0, chrom_radius), # P0
(t0, chrom_radius - this_radius), # P1
(t1, chrom_radius - this_radius), # P2
(t1, chrom_radius), # P3
]
path = Path(vertices, codes)
patch = patches.PathPatch(path, facecolor='none', lw=scaling_factor * mat_flat[ii])
ax.add_patch(patch)
def circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor):
"""Plot the confusion matrix as a circle plot."""
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
theta_stops = plot_genome_as_a_circle(ax, chrom_lens)
plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops, chrom_radius=1.0, scaling_factor=scaling_factor)
def plot_genome_as_a_square(ax, bins, chrom_gap=1000, chrom_thick=5):
"""Plot the chromosomes on a matrix."""
start_pos, linear_stops, x_ticks, x_tick_labels = chrom_gap, [], [], []
for ch_no, b in enumerate(bins):
linear_stops.append([start_pos, start_pos + b[-1]])
ax.plot([x + start_pos for x in b], [0 for _ in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1)
ax.plot([0 for _ in b], [x + start_pos for x in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1)
x_ticks.append((start_pos + start_pos + b[-1]) / 2)
x_tick_labels.append(str(ch_no + 1))
start_pos += b[-1] + chrom_gap
#plt.setp(ax.get_yticklabels(), visible=False)
ax.grid(False)
plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels, yticks=x_ticks, yticklabels=x_tick_labels)
return linear_stops
def plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops,
scaling_factor=1.0, plot_grid=True):
for i in range(len(bins)):
for j in range(len(bins)):
mat = matrices[i][j]
range_bp_x, range_bp_y = float(chrom_lens[i]), float(chrom_lens[j])
offset_x, offset_y = linear_stops[i][0], linear_stops[j][0]
range_x, range_y = linear_stops[i][1] - linear_stops[i][0], linear_stops[j][1] - linear_stops[j][0]
scale_x, scale_y = range_x / range_bp_x, range_y / range_bp_y
cx, cy = offset_x + bin_centers[i] * scale_x, offset_y + bin_centers[j] * scale_y
this_x, this_y = np.tile(cx, cy.shape[0]), np.repeat(cy, cx.shape[0])
if plot_grid: ax.plot(this_x, this_y, '.', color=(0.8, 0.8, 0.8), ms=2, zorder=-1)
mat_flat = mat.ravel()
idx, = mat_flat.nonzero()
if idx.size > 0:
ax.scatter(this_x[idx], this_y[idx], mat_flat[idx] * scaling_factor, facecolors='none')
def matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor, plot_grid=True):
"""Plot the confusion matrix as a ... matrix."""
fig = plt.figure()
ax = fig.add_subplot(111)
linear_stops = plot_genome_as_a_square(ax, bins, chrom_gap=max(chrom_lens) * 0.1)
plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops,
scaling_factor=scaling_factor, plot_grid=plot_grid)
plt.setp(ax, aspect=1, xlabel='Correct', ylabel='Aligned')
def is_grid_too_dense(bins):
return sum([len(bb) for bb in bins]) > 100 # This is our threshold for too dense a grid to show
def auto_scale_scaling_factor(matrices, scale=1000.0):
return scale / max([matrices[i][j].max() for i in range(len(matrices)) for j in range(len(matrices[i]))])
@click.command()
@click.argument('badbam', type=click.Path(exists=True))
@click.option('--circle', type=click.Path(), help='Name of figure file for circle plot')
@click.option('--matrix', type=click.Path(), help='Name of figure file for matrix plot')
@click.option('--bin-size', type=float, default=None, help='Bin size in Mb. Omit to auto-scale')
@click.option('--scaling-factor', type=float, default=None, help='Scale size of disks/lines in plot. Omit to auto-scale')
@click.option('--i-know-what-i-am-doing', is_flag=True, help='Override bin density safety')
def cli(badbam, circle, matrix, bin_size, scaling_factor, i_know_what_i_am_doing):
"""Prepare a binned matrix of mis-alignments and plot it in different ways"""
chrom_lens, bins, bin_centers, matrices = \
compute_misalignment_matrix_from_bam(pysam.AlignmentFile(badbam, 'rb'),
bin_size=bin_size, i_know_what_i_am_doing=i_know_what_i_am_doing)
scaling_factor = scaling_factor or auto_scale_scaling_factor(matrices)
if circle is not None:
circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor)
plt.savefig(circle)
if matrix is not None:
matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor,
plot_grid=not is_grid_too_dense(bins))
plt.savefig(matrix)
if __name__ == '__main__':
cli() | gpl-2.0 | 1,032,870,527,477,394,600 | 44.470297 | 145 | 0.643728 | false | 2.902655 | false | false | false |
idf/scipy_util | scipy_util/image/color_kmeans.py | 1 | 2314 | # USAGE
# python color_kmeans.py --image images/jp.png --clusters 3
# Author: Adrian Rosebrock
# Website: www.pyimagesearch.com
# import the necessary packages
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
def plot_colors(hist, centroids):
# initialize the bar chart representing the relative frequency
# of each of the colors
bar = np.zeros((50, 300, 3), dtype = "uint8")
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for (percent, color) in zip(hist, centroids):
# plot the relative percentage of each cluster
endX = startX + (percent * 300)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
color.astype("uint8").tolist(), -1)
startX = endX
# return the bar chart
return bar
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
ap.add_argument("-c", "--clusters", required=True, type=int,
help="# of clusters")
args = vars(ap.parse_args())
# load the image and convert it from BGR to RGB so that
# we can dispaly it with matplotlib
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# show our image
plt.figure()
plt.axis("off")
plt.imshow(image)
# reshape the image to be a list of pixels
image = image.reshape((image.shape[0]*image.shape[1], 3))
# cluster the pixel intensities
clt = KMeans(n_clusters=args["clusters"])
clt.fit(image)
# build a histogram of clusters and then create a figure
# representing the number of pixels labeled to each color
hist = centroid_histogram(clt)
bar = plot_colors(hist, clt.cluster_centers_)
# show our color bart
plt.figure()
plt.axis("off")
plt.imshow(bar)
plt.show() | bsd-3-clause | -5,846,669,147,645,297,000 | 27.9375 | 73 | 0.687554 | false | 3.495468 | false | false | false |
amigocloud/amigocloud_samples | python/recordhistory.py | 1 | 3281 | # Script to query record_history across all AmigoCloud projects and export results to a CSV
# Must have AmigoCloud Account
# All projects must have a record_history dataset (no projects older than 2017)
from amigocloud import AmigoCloud
import csv
# AmigoCloud variables - change based on user
# token found at app.amigocloud.com/accounts/tokens
# project owner = user id found in /api/v1/me
amigocloud = AmigoCloud(token='<>')
projectOwner = <>
projectNum = [] * 200
recordNum = [] * 200
# Project variables used to parse through all projects the user has
projects = amigocloud.get('users/%s/projects/' % (projectOwner))
projectsNext = projects['next']
# Project list is offset by 20. While loop is setup so if user has more than 20 projects it will grab the next set of 20.
while True:
for project in projects['results']: #parse through projects
projectNum.append(project['id'])
projID = project['id']
datasets = amigocloud.get('users/%s/projects/%s/datasets' % (projectOwner,projID))
for rh in datasets['results']: #parse throughh datasets
if rh['name'] == 'record_history': #if the dataset is called record_history append to recordNum list
recordNum.append(rh['id'])
projectsNext = projects['next']
if projectsNext is None:
break
projects = amigocloud.get(projectsNext)
# temp list and final list
rows = []
export = []
# for each record_history dataset in each project, set variables, run query, add results to rows, extend export list
for p, d in zip(range(len(projectNum)),range(len(recordNum))):
# query variables
rows[:] = []
offset = 0
limit = 1000
sqlURL = '/users/%s/projects/%s/sql' % (projectOwner, projectNum[p])
datasetURL = '/users/%s/projects/%s/datasets/%s' % (projectOwner, projectNum[p], recordNum[d])
dataset = amigocloud.get(datasetURL)
tblName = dataset['table_name']
query = "SELECT dataset_id, change_date, change_type, who_changed_custom_id, %s AS project_num, %s AS record_history FROM %s WHERE change_date > '2018-9-12' AND change_type ='inserted'" % (projectNum[p], recordNum[d], tblName)
response = amigocloud.get(dataset['master'])
master = response['master']
# While loop count variables
responseCt = amigocloud.get(sqlURL, {'query': query, 'offset': offset, 'limit': limit, 'state': master,'dataset_id': recordNum[d]})
rowCt = len(responseCt['data'])
print('Project: ' + str(projectNum[p]) + ' History: ' + str(recordNum[d]) + ' Row Count: ' + str(rowCt))
# query data for each record_history dataset and extend to export list
while len(rows) < rowCt:
response = amigocloud.get(sqlURL, {'query': query, 'offset': offset, 'limit': limit, 'state': master,'dataset_id': recordNum[d]})
dataRows = len(response['data'])
offset += dataRows
rows += response['data']
export.extend(rows)
# write export list to CSV, use full path
with open('', 'w') as myFile:
fieldnames = ['dataset_id', 'change_date', 'change_type', 'who_changed_custom_id', 'project_num', 'record_history']
writer = csv.DictWriter(myFile, fieldnames=fieldnames, lineterminator = '\n')
writer.writeheader()
writer.writerows(export)
| mit | 5,019,500,417,981,999,000 | 46.550725 | 230 | 0.673575 | false | 3.53556 | false | false | false |
bugsduggan/locust | locust/stats.py | 1 | 26840 | import hashlib
import time
from collections import namedtuple, OrderedDict
from copy import copy
from itertools import chain
import gevent
import six
from six.moves import xrange
from . import events
from .exception import StopLocust
from .log import console_logger
STATS_NAME_WIDTH = 60
"""Default interval for how frequently the CSV file is written if this option
is configured."""
CSV_STATS_INTERVAL_SEC = 2
"""Default interval for how frequently results are written to console."""
CONSOLE_STATS_INTERVAL_SEC = 2
"""
Default window size/resolution - in seconds - when calculating the current
response time percentile
"""
CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW = 10
CachedResponseTimes = namedtuple("CachedResponseTimes", ["response_times", "num_requests"])
class RequestStatsAdditionError(Exception):
pass
def calculate_response_time_percentile(response_times, num_requests, percent):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int((num_requests * percent))
processed_count = 0
for response_time in sorted(six.iterkeys(response_times), reverse=True):
processed_count += response_times[response_time]
if(num_requests - processed_count <= num_of_request):
return response_time
def diff_response_time_dicts(latest, old):
"""
Returns the delta between two {response_times:request_count} dicts.
Used together with the response_times cache to get the response times for the
last X seconds, which in turn is used to calculate the current response time
percentiles.
"""
new = {}
for time in latest:
diff = latest[time] - old.get(time, 0)
if diff:
new[time] = diff
return new
class RequestStats(object):
def __init__(self):
self.entries = {}
self.errors = {}
self.total = StatsEntry(self, "Total", None, use_response_times_cache=True)
self.start_time = None
@property
def num_requests(self):
return self.total.num_requests
@property
def num_failures(self):
return self.total.num_failures
@property
def last_request_timestamp(self):
return self.total.last_request_timestamp
def log_request(self, method, name, response_time, content_length):
self.total.log(response_time, content_length)
self.get(name, method).log(response_time, content_length)
def log_error(self, method, name, error):
self.total.log_error(error)
self.get(name, method).log_error(error)
# store error in errors dict
key = StatsError.create_key(method, name, error)
entry = self.errors.get(key)
if not entry:
entry = StatsError(method, name, error)
self.errors[key] = entry
entry.occured()
def get(self, name, method):
"""
Retrieve a StatsEntry instance by name and method
"""
entry = self.entries.get((name, method))
if not entry:
entry = StatsEntry(self, name, method)
self.entries[(name, method)] = entry
return entry
def reset_all(self):
"""
Go through all stats entries and reset them to zero
"""
self.start_time = time.time()
self.total.reset()
for r in six.itervalues(self.entries):
r.reset()
def clear_all(self):
"""
Remove all stats entries and errors
"""
self.total = StatsEntry(self, "Total", None, use_response_times_cache=True)
self.entries = {}
self.errors = {}
self.start_time = None
def serialize_stats(self):
return [self.entries[key].get_stripped_report() for key in six.iterkeys(self.entries) if not (self.entries[key].num_requests == 0 and self.entries[key].num_failures == 0)]
def serialize_errors(self):
return dict([(k, e.to_dict()) for k, e in six.iteritems(self.errors)])
class StatsEntry(object):
"""
Represents a single stats entry (name and method)
"""
name = None
""" Name (URL) of this stats entry """
method = None
""" Method (GET, POST, PUT, etc.) """
num_requests = None
""" The number of requests made """
num_failures = None
""" Number of failed request """
total_response_time = None
""" Total sum of the response times """
min_response_time = None
""" Minimum response time """
max_response_time = None
""" Maximum response time """
num_reqs_per_sec = None
""" A {second => request_count} dict that holds the number of requests made per second """
response_times = None
"""
A {response_time => count} dict that holds the response time distribution of all
the requests.
The keys (the response time in ms) are rounded to store 1, 2, ... 9, 10, 20. .. 90,
100, 200 .. 900, 1000, 2000 ... 9000, in order to save memory.
This dict is used to calculate the median and percentile response times.
"""
use_response_times_cache = False
"""
If set to True, the copy of the response_time dict will be stored in response_times_cache
every second, and kept for 20 seconds (by default, will be CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10).
We can use this dict to calculate the *current* median response time, as well as other response
time percentiles.
"""
response_times_cache = None
"""
If use_response_times_cache is set to True, this will be a {timestamp => CachedResponseTimes()}
OrderedDict that holds a copy of the response_times dict for each of the last 20 seconds.
"""
total_content_length = None
""" The sum of the content length of all the requests for this entry """
start_time = None
""" Time of the first request for this entry """
last_request_timestamp = None
""" Time of the last request for this entry """
def __init__(self, stats, name, method, use_response_times_cache=False):
self.stats = stats
self.name = name
self.method = method
self.use_response_times_cache = use_response_times_cache
self.reset()
def reset(self):
self.start_time = time.time()
self.num_requests = 0
self.num_failures = 0
self.total_response_time = 0
self.response_times = {}
self.min_response_time = None
self.max_response_time = 0
self.last_request_timestamp = int(time.time())
self.num_reqs_per_sec = {}
self.total_content_length = 0
if self.use_response_times_cache:
self.response_times_cache = OrderedDict()
self._cache_response_times(int(time.time()))
def log(self, response_time, content_length):
# get the time
t = int(time.time())
if self.use_response_times_cache and self.last_request_timestamp and t > self.last_request_timestamp:
# see if we shall make a copy of the respone_times dict and store in the cache
self._cache_response_times(t-1)
self.num_requests += 1
self._log_time_of_request(t)
self._log_response_time(response_time)
# increase total content-length
self.total_content_length += content_length
def _log_time_of_request(self, t):
self.num_reqs_per_sec[t] = self.num_reqs_per_sec.setdefault(t, 0) + 1
self.last_request_timestamp = t
def _log_response_time(self, response_time):
self.total_response_time += response_time
if self.min_response_time is None:
self.min_response_time = response_time
self.min_response_time = min(self.min_response_time, response_time)
self.max_response_time = max(self.max_response_time, response_time)
# to avoid to much data that has to be transfered to the master node when
# running in distributed mode, we save the response time rounded in a dict
# so that 147 becomes 150, 3432 becomes 3400 and 58760 becomes 59000
if response_time < 100:
rounded_response_time = response_time
elif response_time < 1000:
rounded_response_time = int(round(response_time, -1))
elif response_time < 10000:
rounded_response_time = int(round(response_time, -2))
else:
rounded_response_time = int(round(response_time, -3))
# increase request count for the rounded key in response time dict
self.response_times.setdefault(rounded_response_time, 0)
self.response_times[rounded_response_time] += 1
def log_error(self, error):
self.num_failures += 1
@property
def fail_ratio(self):
try:
return float(self.num_failures) / (self.num_requests + self.num_failures)
except ZeroDivisionError:
if self.num_failures > 0:
return 1.0
else:
return 0.0
@property
def avg_response_time(self):
try:
return float(self.total_response_time) / self.num_requests
except ZeroDivisionError:
return 0
@property
def median_response_time(self):
if not self.response_times:
return 0
return median_from_dict(self.num_requests, self.response_times)
@property
def current_rps(self):
if self.stats.last_request_timestamp is None:
return 0
slice_start_time = max(self.stats.last_request_timestamp - 12, int(self.stats.start_time or 0))
reqs = [self.num_reqs_per_sec.get(t, 0) for t in range(slice_start_time, self.stats.last_request_timestamp-2)]
return avg(reqs)
@property
def total_rps(self):
if not self.stats.last_request_timestamp or not self.stats.start_time:
return 0.0
return self.num_requests / max(self.stats.last_request_timestamp - self.stats.start_time, 1)
@property
def avg_content_length(self):
try:
return self.total_content_length / self.num_requests
except ZeroDivisionError:
return 0
def extend(self, other):
"""
Extend the data from the current StatsEntry with the stats from another
StatsEntry instance.
"""
self.last_request_timestamp = max(self.last_request_timestamp, other.last_request_timestamp)
self.start_time = min(self.start_time, other.start_time)
self.num_requests = self.num_requests + other.num_requests
self.num_failures = self.num_failures + other.num_failures
self.total_response_time = self.total_response_time + other.total_response_time
self.max_response_time = max(self.max_response_time, other.max_response_time)
self.min_response_time = min(self.min_response_time or 0, other.min_response_time or 0) or other.min_response_time
self.total_content_length = self.total_content_length + other.total_content_length
for key in other.response_times:
self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key]
for key in other.num_reqs_per_sec:
self.num_reqs_per_sec[key] = self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key]
def serialize(self):
return {
"name": self.name,
"method": self.method,
"last_request_timestamp": self.last_request_timestamp,
"start_time": self.start_time,
"num_requests": self.num_requests,
"num_failures": self.num_failures,
"total_response_time": self.total_response_time,
"max_response_time": self.max_response_time,
"min_response_time": self.min_response_time,
"total_content_length": self.total_content_length,
"response_times": self.response_times,
"num_reqs_per_sec": self.num_reqs_per_sec,
}
@classmethod
def unserialize(cls, data):
obj = cls(None, data["name"], data["method"])
for key in [
"last_request_timestamp",
"start_time",
"num_requests",
"num_failures",
"total_response_time",
"max_response_time",
"min_response_time",
"total_content_length",
"response_times",
"num_reqs_per_sec",
]:
setattr(obj, key, data[key])
return obj
def get_stripped_report(self):
"""
Return the serialized version of this StatsEntry, and then clear the current stats.
"""
report = self.serialize()
self.reset()
return report
def __str__(self):
try:
fail_percent = (self.num_failures/float(self.num_requests + self.num_failures))*100
except ZeroDivisionError:
fail_percent = 0
return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %7d %7d %7d | %7d %7.2f") % (
(self.method and self.method + " " or "") + self.name,
self.num_requests,
"%d(%.2f%%)" % (self.num_failures, fail_percent),
self.avg_response_time,
self.min_response_time or 0,
self.max_response_time,
self.median_response_time or 0,
self.current_rps or 0
)
def get_response_time_percentile(self, percent):
"""
Get the response time that a certain number of percent of the requests
finished within.
Percent specified in range: 0.0 - 1.0
"""
return calculate_response_time_percentile(self.response_times, self.num_requests, percent)
def get_current_response_time_percentile(self, percent):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
for i in xrange(9):
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-i)
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW+i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we fond an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_percentile(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
percent,
)
def percentile(self, tpl=" %-" + str(STATS_NAME_WIDTH) + "s %8d %6d %6d %6d %6d %6d %6d %6d %6d %6d"):
if not self.num_requests:
raise ValueError("Can't calculate percentile on url with no successful requests")
return tpl % (
(self.method and self.method + " " or "") + self.name,
self.num_requests,
self.get_response_time_percentile(0.5),
self.get_response_time_percentile(0.66),
self.get_response_time_percentile(0.75),
self.get_response_time_percentile(0.80),
self.get_response_time_percentile(0.90),
self.get_response_time_percentile(0.95),
self.get_response_time_percentile(0.98),
self.get_response_time_percentile(0.99),
self.get_response_time_percentile(1.00)
)
def _cache_response_times(self, t):
self.response_times_cache[t] = CachedResponseTimes(
response_times=copy(self.response_times),
num_requests=self.num_requests,
)
# We'll use a cache size of CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10 since - in the extreme case -
# we might still use response times (from the cache) for t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-10
# to calculate the current response time percentile, if we're missing cached values for the subsequent
# 20 seconds
cache_size = CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10
if len(self.response_times_cache) > cache_size:
# only keep the latest 20 response_times dicts
for i in xrange(len(self.response_times_cache) - cache_size):
self.response_times_cache.popitem(last=False)
class StatsError(object):
def __init__(self, method, name, error, occurences=0):
self.method = method
self.name = name
self.error = error
self.occurences = occurences
@classmethod
def parse_error(cls, error):
string_error = repr(error)
target = "object at 0x"
target_index = string_error.find(target)
if target_index < 0:
return string_error
start = target_index + len(target) - 2
end = string_error.find(">", start)
if end < 0:
return string_error
hex_address = string_error[start:end]
return string_error.replace(hex_address, "0x....")
@classmethod
def create_key(cls, method, name, error):
key = "%s.%s.%r" % (method, name, StatsError.parse_error(error))
return hashlib.md5(key.encode('utf-8')).hexdigest()
def occured(self):
self.occurences += 1
def to_name(self):
return "%s %s: %r" % (self.method,
self.name, repr(self.error))
def to_dict(self):
return {
"method": self.method,
"name": self.name,
"error": StatsError.parse_error(self.error),
"occurences": self.occurences
}
@classmethod
def from_dict(cls, data):
return cls(
data["method"],
data["name"],
data["error"],
data["occurences"]
)
def avg(values):
return sum(values, 0.0) / max(len(values), 1)
def median_from_dict(total, count):
"""
total is the number of requests made
count is a dict {response_time: count}
"""
pos = (total - 1) / 2
for k in sorted(six.iterkeys(count)):
if pos < count[k]:
return k
pos -= count[k]
global_stats = RequestStats()
"""
A global instance for holding the statistics. Should be removed eventually.
"""
def on_request_success(request_type, name, response_time, response_length):
global_stats.log_request(request_type, name, response_time, response_length)
def on_request_failure(request_type, name, response_time, exception):
global_stats.log_error(request_type, name, exception)
def on_report_to_master(client_id, data):
data["stats"] = global_stats.serialize_stats()
data["stats_total"] = global_stats.total.get_stripped_report()
data["errors"] = global_stats.serialize_errors()
global_stats.errors = {}
def on_slave_report(client_id, data):
for stats_data in data["stats"]:
entry = StatsEntry.unserialize(stats_data)
request_key = (entry.name, entry.method)
if not request_key in global_stats.entries:
global_stats.entries[request_key] = StatsEntry(global_stats, entry.name, entry.method)
global_stats.entries[request_key].extend(entry)
for error_key, error in six.iteritems(data["errors"]):
if error_key not in global_stats.errors:
global_stats.errors[error_key] = StatsError.from_dict(error)
else:
global_stats.errors[error_key].occurences += error["occurences"]
# save the old last_request_timestamp, to see if we should store a new copy
# of the response times in the response times cache
old_last_request_timestamp = global_stats.total.last_request_timestamp
# update the total StatsEntry
global_stats.total.extend(StatsEntry.unserialize(data["stats_total"]))
if global_stats.total.last_request_timestamp > old_last_request_timestamp:
# If we've entered a new second, we'll cache the response times. Note that there
# might still be reports from other slave nodes - that contains requests for the same
# time periods - that hasn't been received/accounted for yet. This will cause the cache to
# lag behind a second or two, but since StatsEntry.current_response_time_percentile()
# (which is what the response times cache is used for) uses an approximation of the
# last 10 seconds anyway, it should be fine to ignore this.
global_stats.total._cache_response_times(global_stats.total.last_request_timestamp)
events.request_success += on_request_success
events.request_failure += on_request_failure
events.report_to_master += on_report_to_master
events.slave_report += on_slave_report
def print_stats(stats):
console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %7s %12s %7s %7s %7s | %7s %7s") % ('Name', '# reqs', '# fails', 'Avg', 'Min', 'Max', 'Median', 'req/s'))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
total_rps = 0
total_reqs = 0
total_failures = 0
for key in sorted(six.iterkeys(stats)):
r = stats[key]
total_rps += r.current_rps
total_reqs += r.num_requests
total_failures += r.num_failures
console_logger.info(r)
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
try:
fail_percent = (total_failures/float(total_reqs))*100
except ZeroDivisionError:
fail_percent = 0
console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %42.2f") % ('Total', total_reqs, "%d(%.2f%%)" % (total_failures, fail_percent), total_rps))
console_logger.info("")
def print_percentile_stats(stats):
console_logger.info("Percentage of the requests completed within given times")
console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %8s %6s %6s %6s %6s %6s %6s %6s %6s %6s") % ('Name', '# reqs', '50%', '66%', '75%', '80%', '90%', '95%', '98%', '99%', '100%'))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for key in sorted(six.iterkeys(stats)):
r = stats[key]
if r.response_times:
console_logger.info(r.percentile())
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
total_stats = global_stats.total
if total_stats.response_times:
console_logger.info(total_stats.percentile())
console_logger.info("")
def print_error_report():
if not len(global_stats.errors):
return
console_logger.info("Error report")
console_logger.info(" %-18s %-100s" % ("# occurences", "Error"))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for error in six.itervalues(global_stats.errors):
console_logger.info(" %-18i %-100s" % (error.occurences, error.to_name()))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
console_logger.info("")
def stats_printer():
from . import runners
while True:
print_stats(runners.locust_runner.request_stats)
gevent.sleep(CONSOLE_STATS_INTERVAL_SEC)
def stats_writer(base_filepath):
"""Writes the csv files for the locust run."""
while True:
write_stat_csvs(base_filepath)
gevent.sleep(CSV_STATS_INTERVAL_SEC)
def write_stat_csvs(base_filepath):
"""Writes the requests and distribution csvs."""
with open(base_filepath + '_requests.csv', "w") as f:
f.write(requests_csv())
with open(base_filepath + '_distribution.csv', 'w') as f:
f.write(distribution_csv())
def sort_stats(stats):
return [stats[key] for key in sorted(six.iterkeys(stats))]
def requests_csv():
from . import runners
"""Returns the contents of the 'requests' tab as CSV."""
rows = [
",".join([
'"Method"',
'"Name"',
'"# requests"',
'"# failures"',
'"Median response time"',
'"Average response time"',
'"Min response time"',
'"Max response time"',
'"Average Content Size"',
'"Requests/s"',
])
]
for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.total]):
rows.append('"%s","%s",%i,%i,%i,%i,%i,%i,%i,%.2f' % (
s.method,
s.name,
s.num_requests,
s.num_failures,
s.median_response_time,
s.avg_response_time,
s.min_response_time or 0,
s.max_response_time,
s.avg_content_length,
s.total_rps,
))
return "\n".join(rows)
def distribution_csv():
"""Returns the contents of the 'distribution' tab as CSV."""
from . import runners
rows = [",".join((
'"Name"',
'"# requests"',
'"50%"',
'"66%"',
'"75%"',
'"80%"',
'"90%"',
'"95%"',
'"98%"',
'"99%"',
'"100%"',
))]
for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.total]):
if s.num_requests:
rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i'))
else:
rows.append('"%s",0,"N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A"' % s.name)
return "\n".join(rows)
| mit | 5,510,569,928,374,752,000 | 35.517007 | 187 | 0.609277 | false | 3.832096 | false | false | false |
robogen/CMS-Mining | RunScripts/es_mainreduce.py | 1 | 20005 | from elasticsearch import Elasticsearch
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.dates import AutoDateLocator, AutoDateFormatter
import numpy as np
import datetime as dt
import math
import json
with open('sites.json', 'r+') as txt:
sitesArray = json.load(txt)
with open('cms.json', 'r+') as txt:
cmsLocate = json.load(txt)
with open("config", "r+") as txt:
contents = list(map(str.rstrip, txt))
def conAtlasTime(time):
return (dt.datetime.strptime(time, '%Y-%m-%dT%X')).replace(tzinfo=dt.timezone.utc).timestamp()
def utcDate(time):
return dt.datetime.fromtimestamp(time, dt.timezone.utc)
esAtlas = Elasticsearch([{
'host': contents[2], 'port': contents[3]
}], timeout=50)
esHCC = Elasticsearch([{
'host': contents[0], 'port': contents[1]
}], timeout=50)
scrollPreserve="3m"
startDate = "2016-07-17T00:00:00"
endDate = "2016-07-25T00:00:00"
tenMin = np.multiply(10,60)
loc = {}
loc["location"] = np.array([])
def atlasLatency(srcSite, destSite):
queryAtlas={"query" :
{"bool": {
"must": [
{"match" :
{"_type" : "latency" }
},
{"match" :
{"srcSite" : srcSite }
},
{"match" :
{"destSite" : destSite }
},
{"range" : {
"timestamp" : {
#"gt" : int(conAtlasTime(startDate)),
#"lt" : int(conAtlasTime(endDate))
"gt" : startDate,
"lt" : endDate
}
}}
]
}
}
}
scannerAtlas = esAtlas.search(index="network_weather_2-*",
body=queryAtlas,
search_type="scan",
scroll=scrollPreserve)
scrollIdAtlas = scannerAtlas['_scroll_id']
atlasTotalRec = scannerAtlas["hits"]["total"]
arrRet = np.array([])
if atlasTotalRec == 0:
return None
else:
while atlasTotalRec > 0:
responseAtlas = esAtlas.scroll(scroll_id=scrollIdAtlas,
scroll=scrollPreserve)
for hit in responseAtlas["hits"]["hits"]:
tempRay = None # Initialize
if hit["_source"]["src"] == hit["_source"]["MA"]: # means MA is the src site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["delay_mean"]),
float(hit["_source"]["delay_median"]),
float(hit["_source"]["delay_sd"]),
float(0.0)])
elif hit["_source"]["dest"] == hit["_source"]["MA"]: # means MA is the dest site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["delay_mean"]),
float(hit["_source"]["delay_median"]),
float(hit["_source"]["delay_sd"]),
float(1.0)])
else:
raise NameError('MA is not the src or dest')
if arrRet.size == 0:
arrRet = np.reshape(tempRay, (1,6))
else:
arrRet = np.vstack((arrRet, tempRay))
atlasTotalRec -= len(responseAtlas['hits']['hits'])
arrRet.view('f8,f8,f8,f8,f8,f8').sort(order=['f0'], axis=0)
return arrRet
def atlasPacketLoss(srcSite, destSite):
queryAtlas={"query" :
{"bool": {
"must": [
{"match" :
{"_type" : "packet_loss_rate"}
},
{"match" :
{"srcSite" : srcSite }
},
{"match" :
{"destSite" : destSite }
},
{"range" : {
"timestamp" : {
#"gt" : int(conAtlasTime(startDate)),
#"lt" : int(conAtlasTime(endDate))
"gt" : startDate,
"lt" : endDate
}
}}
]
}
}
}
scannerAtlas = esAtlas.search(index="network_weather_2-*",
body=queryAtlas,
search_type="scan",
scroll=scrollPreserve)
scrollIdAtlas = scannerAtlas['_scroll_id']
atlasTotalRec = scannerAtlas["hits"]["total"]
arrRet = np.array([])
if atlasTotalRec == 0:
return None
else:
while atlasTotalRec > 0:
responseAtlas = esAtlas.scroll(scroll_id=scrollIdAtlas,
scroll=scrollPreserve)
for hit in responseAtlas["hits"]["hits"]:
tempRay = None # Initialize
if hit["_source"]["src"] == hit["_source"]["MA"]: # means MA is the src site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["packet_loss"]),
float(0.0)])
elif hit["_source"]["dest"] == hit["_source"]["MA"]: # means MA is the dest site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["packet_loss"]),
float(1.0)])
else:
raise NameError('MA is not src or dest')
if arrRet.size == 0:
arrRet = np.reshape(tempRay, (1, 4))
else:
arrRet = np.vstack((arrRet, tempRay))
atlasTotalRec -= len(responseAtlas['hits']['hits'])
arrRet.view('f8,f8,f8,f8').sort(order=['f0'], axis=0)
return arrRet
def atlasThroughput(srcSite, destSite):
queryAtlas={"query" :
{"bool": {
"must": [
{"match" :
{"_type" : "throughput"}
},
{"match" :
{"srcSite" : srcSite }
},
{"match" :
{"destSite" : destSite }
},
{"range" : {
"timestamp" : {
#"gt" : int(conAtlasTime(startDate)),
#"lt" : int(conAtlasTime(endDate))
"gt" : startDate,
"lt" : endDate
}
}}
]
}
}
}
scannerAtlas = esAtlas.search(index="network_weather_2-*",
body=queryAtlas,
search_type="scan",
scroll=scrollPreserve)
scrollIdAtlas = scannerAtlas['_scroll_id']
atlasTotalRec = scannerAtlas["hits"]["total"]
arrRet = np.array([])
if atlasTotalRec == 0:
return None
else:
while atlasTotalRec > 0:
responseAtlas = esAtlas.scroll(scroll_id=scrollIdAtlas,
scroll=scrollPreserve)
for hit in responseAtlas["hits"]["hits"]:
tempRay = None #Initialize in local context
if hit["_source"]["src"] == hit["_source"]["MA"]: # Means MA is the src site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["throughput"]),
float(0.0)])
elif hit["_source"]["dest"] == hit["_source"]["MA"]: #Means MA is the dest site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["throughput"]),
float(1.0)])
else:
raise NameError('MA is not src or dest')
if arrRet.size == 0:
arrRet = np.reshape(tempRay, (1, 4))
else:
arrRet = np.vstack((arrRet, tempRay))
atlasTotalRec -= len(responseAtlas['hits']['hits'])
arrRet.view('f8,f8,f8,f8').sort(order=['f0'], axis=0)
return arrRet
def hccQuery(site):
queryHCC={"query" :
{"bool": {
"must": [
{"match" :
{"CMS_JobType" : "Processing"}
},
{"range" :
{"EventRate" : {"gte" : "0"}}
},
#{"match" :
# {"TaskType" : "Production"}
#},
{"range" : {
"CompletionDate" : {
"gt" : int(conAtlasTime(startDate)),
"lt" : int(conAtlasTime(endDate))
}
}},
{"match" :
{"DataLocationsCount" : 1}
},
{"match" :
{"Site" : site }
},
{"match" :
{"InputData" : "Offsite"}
}
]
}
}
}
scannerHCC = esHCC.search(index="cms-*",
doc_type="job",
body=queryHCC,
search_type="scan",
scroll=scrollPreserve)
scrollIdHCC = scannerHCC['_scroll_id']
countHitsHCC = scannerHCC["hits"]["total"]
arrRet = {}
if countHitsHCC == 0:
return None
else:
while countHitsHCC > 0:
responseHCC = esHCC.scroll(scroll_id=scrollIdHCC,
scroll=scrollPreserve)
for hit in responseHCC["hits"]["hits"]:
location = hit["_source"]["DataLocations"]
if str(location[0]).lower() in cmsLocate["locations"]:
tempHolder = np.array([hit["_source"]["CpuEff"],
#hit["_source"]["EventRate"],
hit["_source"]["ChirpCMSSWEventRate"],
hit["_source"]["JobCurrentStartDate"],
hit["_source"]["JobFinishedHookDone"],
hit["_source"]["CpuTimeHr"],
hit["_source"]["WallClockHr"],
hit["_source"]["RequestCpus"],
hit["_source"]["MemoryMB"],
hit["_source"]["QueueHrs"],
hit["_source"]["RequestMemory"],
hit["_source"]["CoreHr"],
hit["_source"]["CpuBadput"],
hit["_source"]["KEvents"]])
if not str(location[0]) in loc["location"]:
loc["location"] = np.append(loc["location"],
str(location[0]))
arrRet[str(location[0])] = np.reshape(tempHolder, (1,13))
else:
arrRet[str(location[0])] = np.vstack((arrRet[str(location[0])],tempHolder))
countHitsHCC -= len(responseHCC['hits']['hits'])
for hit in arrRet:
#print(arrRet)
#tempRay = arrRet[str(hit)]
#arrRet[str(hit)] = tempRay[tempRay[:,2].argsort()]
#arrRet[str(hit)] = sorted(arrRet[str(hit)], key=lambda x : x[2])
arrRet[str(hit)].view('f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8').sort(order=['f2'], axis=0)
return arrRet
with PdfPages('CMS_Plots.pdf') as pp:
d = pp.infodict()
d['Title'] = 'CMS Grid Plots'
d['Author'] = u'Jerrod T. Dixon\xe4nen'
d['Subject'] = 'Plot of network affects on grid jobs'
d['Keywords'] = 'PdfPages matplotlib CMS grid'
d['CreationDate'] = dt.datetime.today()
d['ModDate'] = dt.datetime.today()
for hit in cmsLocate["locations"]:
loc["location"] = np.array([])
hccResult = hccQuery(hit)
for note in loc["location"]:
atlasT = None #atlasThroughput(sitesArray[hit], sitesArray[note.lower()])
atlasP = atlasPacketLoss(sitesArray[hit], sitesArray[note.lower()])
atlasL = atlasLatency(sitesArray[hit], sitesArray[note.lower()])
tempArr = hccResult[note]
arrCpu = np.array([]);
arrEvent = np.array([]);
arrStart = np.array([]);
arrEnd = np.array([]);
for tpl in tempArr:
arrCpu = np.append(arrCpu, tpl[0]);
arrEvent = np.append(arrEvent, tpl[1]);
arrStart = np.append(arrStart, utcDate(tpl[2]));
arrEnd = np.append(arrEnd, utcDate(tpl[3]));
figH, axH = plt.subplots(2, sharex=True)
axH[1].xaxis.set_major_formatter(AutoDateFormatter(locator=AutoDateLocator(),
defaultfmt="%m-%d %H:%M"))
figH.autofmt_xdate(bottom=0.2, rotation=30, ha='right')
axH[0].plot(arrStart, arrCpu, 'bs')
axH[0].hlines(arrCpu,
arrStart,
arrEnd)
axH[0].set_ylabel("CpuEff")
axH[0].set_title(str("2016 From " + hit + " To " + note))
axH[1].plot(arrStart, arrEvent, 'bs')
axH[1].hlines(arrEvent,
arrStart,
arrEnd)
axH[1].set_ylabel("EventRate")
pp.savefig(figH)
plt.close(figH)
#axA[2].xaxis.set_major_formatter(AutoDateFormatter(locator=AutoDateLocator(),
# defaultfmt="%m-%d %H:%M"))
if not type(atlasP) == type(None):
#tDate = np.array([])
#tDatef = np.array([])
#tPut = np.array([])
pDate = np.array([])
pDatef = np.array([])
pLoss = np.array([])
#for tpl in atlasT:
# tDate = np.append(tDate, tpl[0])
# tDatef = np.append(tDatef, tpl[1])
# tPut = np.append(tPut, tpl[2])
for tpl in atlasP:
pDate = np.append(pDate, tpl[0])
pDatef = np.append(pDatef, tpl[1])
pLoss = np.append(pLoss, tpl[2])
figA, axA = plt.subplots(2, sharex=True)
axA[0].set_title(str("2016 From " + \
hit + " (" + \
sitesArray[hit] + \
")" + " To " + \
note + " (" + sitesArray[note.lower()] + ")"))
figA.autofmt_xdate(bottom=0.2, rotation=30, ha='right')
axA[0].plot(pDate, pLoss, 'bs')
axA[0].set_ylabel("Packet Loss")
axA[0].hlines(pLoss,
pDatef,
pDate)
#axA[1].set_ylabel("Throughput")
#axA[1].plot(tDate, tPut, 'bs')
#axA[1].hlines(tPut,
# tDatef,
# tDate)
pp.savefig(figA)
plt.close(figA)
if not type(atlasL) == type(None):
lDate = np.array([])
lDatef = np.array([])
lMean = np.array([])
lMedian = np.array([])
lStd = np.array([])
for tpl in atlasL:
lDate = np.append(lDate, tpl[0])
lDatef = np.append(lDatef, tpl[1])
lMean = np.append(lMean, tpl[2])
lMedian = np.append(lMedian, tpl[3])
lStd = np.append(lStd, tpl[4])
figL, axL = plt.subplots(3, sharex=True)
axL[0].set_title(str("2016 Latency From " + \
hit + " (" + \
sitesArray[hit] + \
")" + " To " + \
note + " (" + sitesArray[note.lower()] + ")"))
figL.autofmt_xdate(bottom=0.2, rotation=30, ha='right')
axL[0].set_ylabel("Mean")
axL[0].plot(lDate, lMean, 'bs', label="delay_mean")
axL[0].hlines(lMean,
lDatef,
lDate)
axL[1].set_ylabel("Median")
axL[1].plot(lDate, lMedian, 'rs', label="delay_median")
axL[1].hlines(lMedian,
lDatef,
lDate)
axL[2].set_ylabel("Std. Dev")
axL[2].plot(lDate, lStd, 'g^', label="delay_sd")
axL[2].hlines(lStd,
lDatef,
lDate)
pp.savefig(figL)
plt.close(figL)
| mit | 398,106,286,063,834,500 | 43.85426 | 102 | 0.389803 | false | 4.524994 | false | false | false |
atumanov/ray | python/ray/rllib/optimizers/multi_gpu_optimizer.py | 1 | 9886 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import numpy as np
from collections import defaultdict
import ray
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.optimizers.multi_gpu_impl import LocalSyncParallelOptimizer
from ray.rllib.optimizers.rollout import collect_samples, \
collect_samples_straggler_mitigation
from ray.rllib.utils.annotations import override
from ray.rllib.utils.timer import TimerStat
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
class LocalMultiGPUOptimizer(PolicyOptimizer):
"""A synchronous optimizer that uses multiple local GPUs.
Samples are pulled synchronously from multiple remote workers,
concatenated, and then split across the memory of multiple local GPUs.
A number of SGD passes are then taken over the in-memory data. For more
details, see `multi_gpu_impl.LocalSyncParallelOptimizer`.
This optimizer is Tensorflow-specific and require the underlying
Policy to be a TFPolicy instance that support `.copy()`.
Note that all replicas of the TFPolicy will merge their
extra_compute_grad and apply_grad feed_dicts and fetches. This
may result in unexpected behavior.
"""
def __init__(self,
workers,
sgd_batch_size=128,
num_sgd_iter=10,
sample_batch_size=200,
num_envs_per_worker=1,
train_batch_size=1024,
num_gpus=0,
standardize_fields=[],
straggler_mitigation=False):
PolicyOptimizer.__init__(self, workers)
self.batch_size = sgd_batch_size
self.num_sgd_iter = num_sgd_iter
self.num_envs_per_worker = num_envs_per_worker
self.sample_batch_size = sample_batch_size
self.train_batch_size = train_batch_size
self.straggler_mitigation = straggler_mitigation
if not num_gpus:
self.devices = ["/cpu:0"]
else:
self.devices = [
"/gpu:{}".format(i) for i in range(int(math.ceil(num_gpus)))
]
self.batch_size = int(sgd_batch_size / len(self.devices)) * len(
self.devices)
assert self.batch_size % len(self.devices) == 0
assert self.batch_size >= len(self.devices), "batch size too small"
self.per_device_batch_size = int(self.batch_size / len(self.devices))
self.sample_timer = TimerStat()
self.load_timer = TimerStat()
self.grad_timer = TimerStat()
self.update_weights_timer = TimerStat()
self.standardize_fields = standardize_fields
logger.info("LocalMultiGPUOptimizer devices {}".format(self.devices))
self.policies = dict(self.workers.local_worker()
.foreach_trainable_policy(lambda p, i: (i, p)))
logger.debug("Policies to train: {}".format(self.policies))
for policy_id, policy in self.policies.items():
if not isinstance(policy, TFPolicy):
raise ValueError(
"Only TF policies are supported with multi-GPU. Try using "
"the simple optimizer instead.")
# per-GPU graph copies created below must share vars with the policy
# reuse is set to AUTO_REUSE because Adam nodes are created after
# all of the device copies are created.
self.optimizers = {}
with self.workers.local_worker().tf_sess.graph.as_default():
with self.workers.local_worker().tf_sess.as_default():
for policy_id, policy in self.policies.items():
with tf.variable_scope(policy_id, reuse=tf.AUTO_REUSE):
if policy._state_inputs:
rnn_inputs = policy._state_inputs + [
policy._seq_lens
]
else:
rnn_inputs = []
self.optimizers[policy_id] = (
LocalSyncParallelOptimizer(
policy._optimizer, self.devices,
[v
for _, v in policy._loss_inputs], rnn_inputs,
self.per_device_batch_size, policy.copy))
self.sess = self.workers.local_worker().tf_sess
self.sess.run(tf.global_variables_initializer())
@override(PolicyOptimizer)
def step(self):
with self.update_weights_timer:
if self.workers.remote_workers():
weights = ray.put(self.workers.local_worker().get_weights())
for e in self.workers.remote_workers():
e.set_weights.remote(weights)
with self.sample_timer:
if self.workers.remote_workers():
if self.straggler_mitigation:
samples = collect_samples_straggler_mitigation(
self.workers.remote_workers(), self.train_batch_size)
else:
samples = collect_samples(
self.workers.remote_workers(), self.sample_batch_size,
self.num_envs_per_worker, self.train_batch_size)
if samples.count > self.train_batch_size * 2:
logger.info(
"Collected more training samples than expected "
"(actual={}, train_batch_size={}). ".format(
samples.count, self.train_batch_size) +
"This may be because you have many workers or "
"long episodes in 'complete_episodes' batch mode.")
else:
samples = []
while sum(s.count for s in samples) < self.train_batch_size:
samples.append(self.workers.local_worker().sample())
samples = SampleBatch.concat_samples(samples)
# Handle everything as if multiagent
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({
DEFAULT_POLICY_ID: samples
}, samples.count)
for policy_id, policy in self.policies.items():
if policy_id not in samples.policy_batches:
continue
batch = samples.policy_batches[policy_id]
for field in self.standardize_fields:
value = batch[field]
standardized = (value - value.mean()) / max(1e-4, value.std())
batch[field] = standardized
# Important: don't shuffle RNN sequence elements
if not policy._state_inputs:
batch.shuffle()
num_loaded_tuples = {}
with self.load_timer:
for policy_id, batch in samples.policy_batches.items():
if policy_id not in self.policies:
continue
policy = self.policies[policy_id]
tuples = policy._get_loss_inputs_dict(batch)
data_keys = [ph for _, ph in policy._loss_inputs]
if policy._state_inputs:
state_keys = policy._state_inputs + [policy._seq_lens]
else:
state_keys = []
num_loaded_tuples[policy_id] = (
self.optimizers[policy_id].load_data(
self.sess, [tuples[k] for k in data_keys],
[tuples[k] for k in state_keys]))
fetches = {}
with self.grad_timer:
for policy_id, tuples_per_device in num_loaded_tuples.items():
optimizer = self.optimizers[policy_id]
num_batches = max(
1,
int(tuples_per_device) // int(self.per_device_batch_size))
logger.debug("== sgd epochs for {} ==".format(policy_id))
for i in range(self.num_sgd_iter):
iter_extra_fetches = defaultdict(list)
permutation = np.random.permutation(num_batches)
for batch_index in range(num_batches):
batch_fetches = optimizer.optimize(
self.sess, permutation[batch_index] *
self.per_device_batch_size)
for k, v in batch_fetches[LEARNER_STATS_KEY].items():
iter_extra_fetches[k].append(v)
logger.debug("{} {}".format(i,
_averaged(iter_extra_fetches)))
fetches[policy_id] = _averaged(iter_extra_fetches)
self.num_steps_sampled += samples.count
self.num_steps_trained += tuples_per_device * len(self.devices)
self.learner_stats = fetches
return fetches
@override(PolicyOptimizer)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"sample_time_ms": round(1000 * self.sample_timer.mean, 3),
"load_time_ms": round(1000 * self.load_timer.mean, 3),
"grad_time_ms": round(1000 * self.grad_timer.mean, 3),
"update_time_ms": round(1000 * self.update_weights_timer.mean,
3),
"learner": self.learner_stats,
})
def _averaged(kv):
out = {}
for k, v in kv.items():
if v[0] is not None and not isinstance(v[0], dict):
out[k] = np.mean(v)
return out
| apache-2.0 | -4,175,666,945,924,557,000 | 42.743363 | 79 | 0.556039 | false | 4.335965 | false | false | false |
jgosmann/psyrun | psyrun/pspace.py | 1 | 9919 | """Construction of parameter spaces."""
import collections
import itertools
from six import string_types
from psyrun.utils.doc import inherit_docs
def dict_concat(args):
"""Concatenates elements with the same key in the passed dictionaries.
Parameters
----------
args : sequenece of dict
Dictionaries with sequences to concatenate.
Returns
-------
dict
The dictionary with the union of all the keys of the dictionaries
passed in and elements with the same key concatenated. Missing elements
will be None.
Examples
--------
>>> from pprint import pprint
>>> pprint(dict_concat(({'a': 0, 'b': 0}, {'a': 1})))
{'a': [0, 1], 'b': [0, None]}
"""
keys = set()
for a in args:
keys = keys.union(a.keys())
return {k: [a.get(k, None) for a in args] for k in keys}
class ParameterSpace(collections.Sized):
"""Abstract base class for objects representing a parameter space.
Supports addition, subtraction, and multiplication operators to construct
more complicated parameter spaces.
Deriving classes are supposed to implement the `iterate` and ``__len__``
methods.
Parameters
----------
keys : sequence of strings
Parameter names.
"""
def __init__(self, keys):
self._keys = list(keys)
def build(self):
"""Builds the parameter space into a dictionary of parameter lists.
Returns
-------
dict
A dictionary with the parameter names as keys and lists with the
parameter values.
Examples
--------
>>> from pprint import pprint
>>> pprint(Param(a=[1, 2], b=[1, 2]).build())
{'a': [1, 2], 'b': [1, 2]}
"""
built = dict_concat(list(self.iterate()))
for k in self.keys():
built.setdefault(k, [])
return built
def iterate(self):
"""Iterates over the parameter assignments in the parameter space."""
raise NotImplementedError()
def keys(self):
"""Returns the parameter names."""
return self._keys
def __len__(self):
raise NotImplementedError()
def __add__(self, other):
return Sum(self, other)
def __mul__(self, other):
return Product(self, other)
def __sub__(self, other):
return Difference(self, other)
def __repr__(self):
keys = sorted(self.keys())
built = self.build()
return "Param(**{{{params}}})".format(
params=", ".join(
"{k!r}: {v!r}".format(k=k, v=built[k]) for k in keys))
def __str__(self):
keys = sorted(self.keys())
built = self.build()
return "Param({params})".format(
params=", ".join(
"{k!s}={v!r}".format(k=k, v=built[k]) for k in keys))
@inherit_docs
class Param(ParameterSpace):
"""Constructs a simple parameter space from constructor arguments.
Supports addition, subtraction, and multiplication operators to construct
more complicated parameter spaces.
Parameters
----------
params :
Each keyword argument defines a parameter with a sequence of parameter
values for it. The length of all lists has to be equal. If a scalar
instead of a sequence is passed in, it will be replicated to match the
length of the other parameters. At least one keyword argument has to be
a sequence.
"""
def __init__(self, **params):
super(Param, self).__init__(params.keys())
self._params = params
# Make sure strings etc are in a list.
for k in self._params:
if isinstance(self._params[k], string_types + (bytes,)):
self._params[k] = [self._params[k]]
self._len = None
for v in self._params.values():
try:
l = len(v)
except TypeError:
pass
else:
if self._len is None:
self._len = l
elif self._len != l:
raise ValueError("Parameter lists differ in length.")
if self._len is None:
self._len = 1 if len(self._params) > 0 else 0
def iterate(self):
for i in range(len(self)):
yield {k: self.get_param(k, i) for k in self.keys()}
def __len__(self):
return self._len
def get_param(self, key, i):
"""Return the *i*-th parameter assignment.
Parameters
----------
key : str
Parameter name of parameter to retrieve.
i : int
Index of assigned value to return.
"""
p = self._params[key]
try:
return p[i]
except TypeError:
return p
@inherit_docs
class Difference(ParameterSpace):
"""Implements the difference of two parameter spaces.
Parameters
----------
minuend : `ParameterSpace`
Minuend (left operand).
subtrahend : `ParameterSpace`
Subtrahend (right operand).
Attributes
----------
minuend : `ParameterSpace`
Minuend (left operand).
subtrahend : `ParameterSpace`
Subtrahend (right operand).
Examples
--------
>>> from pprint import pprint
>>> pprint(Difference(Param(a=[1, 2], b=[1, 2]), Param(a=[1])).build())
{'a': [2], 'b': [2]}
"""
def __init__(self, minuend, subtrahend):
super(Difference, self).__init__(minuend.keys())
for k in subtrahend.keys():
if k not in self._keys:
raise AmbiguousOperationError(
'Key `{0}` not existent in minuend.'.format(k))
self.left = minuend
self.right = subtrahend
if len(self.right) == 0:
self._cached = list(self.left.iterate())
else:
exclude = self.right.build()
self._cached = [item for item in self.left.iterate()
if not all(item[k] in exclude[k]
for k in exclude.keys())]
def iterate(self):
return iter(self._cached)
def __len__(self):
return sum(1 for item in self.iterate())
@inherit_docs
class Product(ParameterSpace):
"""Implements the Cartesian product of two parameter spaces.
Parameters
----------
left : `ParameterSpace`
Left operand.
right : `ParameterSpace`
Right operand.
Attributes
----------
left : `ParameterSpace`
Left operand.
right : `ParameterSpace`
Right operand.
Examples
--------
>>> from pprint import pprint
>>> pprint(Product(Param(a=[1, 2]), Param(b=[1, 2])).build())
{'a': [1, 1, 2, 2], 'b': [1, 2, 1, 2]}
"""
def __init__(self, left, right):
shared_keys = set(left.keys()).intersection(set(right.keys()))
if len(shared_keys) > 0:
raise AmbiguousOperationError(
'Duplicate param keys: {0}'.format(shared_keys))
super(Product, self).__init__(list(left.keys()) + list(right.keys()))
self.left = left
self.right = right
def iterate(self):
if len(self.left.keys()) == 0:
return self.right.iterate()
elif len(self.right.keys()) == 0:
return self.left.iterate()
else:
return (self._merge(*item) for item in itertools.product(
self.left.iterate(), self.right.iterate()))
@staticmethod
def _merge(left, right):
merged = {}
merged.update(left)
merged.update(right)
return merged
def __len__(self):
if len(self.left.keys()) == 0:
return len(self.right)
elif len(self.right.keys()) == 0:
return len(self.left)
else:
return len(self.left) * len(self.right)
@inherit_docs
class Sum(ParameterSpace):
"""Implements the concatenation of two parameter spaces.
Parameters
----------
left : `ParameterSpace`
Left operand.
right : `ParameterSpace`
Right operand.
Attributes
----------
left : `ParameterSpace`
Left operand.
right : `ParameterSpace`
Right operand.
Examples
--------
>>> from pprint import pprint
>>> pprint(Sum(Param(a=[1]), Param(a=[2])).build())
{'a': [1, 2]}
"""
def __init__(self, left, right):
super(Sum, self).__init__(set(left.keys()).union(set(right.keys())))
self.left = left
self.right = right
def iterate(self):
return ({k: item.get(k, float('nan')) for k in self.keys()}
for item in itertools.chain(
self.left.iterate(), self.right.iterate()))
def __len__(self):
return len(self.left) + len(self.right)
def missing(minuend, subtrahend):
"""Return set of parameter assignments missing from another set.
This differs from a simple subtraction by allowing additional keys in the
subtrahend, but no additional keys in the minuend.
Parameters
----------
minuend : `ParameterSpace`
Parameter space with all assignments.
subtrahend : :class:`Param`
Parameter space with assignments to remove from the parameter space.
Returns
-------
`ParameterSpace`
The reduced parameter space.
Examples
--------
>>> from pprint import pprint
>>> pprint(missing(Param(a=[1, 2, 3]), Param(a=[2])).build())
{'a': [1, 3]}
"""
if len(subtrahend) <= 0:
return minuend
for k in minuend.keys():
if k not in subtrahend.keys():
raise AmbiguousOperationError()
return minuend - Param(
**{k: v for k, v in subtrahend.build().items() if k in minuend.keys()})
class AmbiguousOperationError(RuntimeError):
"""Attempt to combine two parameter spaces in an ambiguous way."""
pass
| mit | 5,030,411,892,395,405,000 | 26.940845 | 79 | 0.556104 | false | 4.186999 | false | false | false |
lablup/backend.ai-manager | src/ai/backend/manager/models/alembic/versions/e35332f8d23d_add_modified_at_to_users_and_kernels.py | 1 | 3049 | """add_modified_at_to_users_and_kernels
Revision ID: e35332f8d23d
Revises: da24ff520049
Create Date: 2020-07-01 14:02:11.022032
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql.expression import bindparam
from ai.backend.manager.models.base import convention, IDColumn
# revision identifiers, used by Alembic.
revision = 'e35332f8d23d'
down_revision = 'da24ff520049'
branch_labels = None
depends_on = None
def upgrade():
metadata = sa.MetaData(naming_convention=convention)
# partial table to be preserved and referred
users = sa.Table(
'users', metadata,
IDColumn('uuid'),
sa.Column('created_at', sa.DateTime(timezone=True),
server_default=sa.func.now()),
sa.Column('modified_at', sa.DateTime(timezone=True),
server_default=sa.func.now(), onupdate=sa.func.current_timestamp()),
)
keypairs = sa.Table(
'keypairs', metadata,
sa.Column('access_key', sa.String(length=20), primary_key=True),
sa.Column('created_at', sa.DateTime(timezone=True),
server_default=sa.func.now()),
sa.Column('modified_at', sa.DateTime(timezone=True),
server_default=sa.func.now(), onupdate=sa.func.current_timestamp()),
)
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('keypairs', sa.Column('modified_at', sa.DateTime(timezone=True),
server_default=sa.text('now()'), nullable=True))
op.add_column('users', sa.Column('modified_at', sa.DateTime(timezone=True),
server_default=sa.text('now()'), nullable=True))
# ### end Alembic commands ###
conn = op.get_bind()
# Set user's modified_at with the value of created_at.
query = sa.select([users.c.uuid, users.c.created_at]).select_from(users)
updates = []
for row in conn.execute(query).fetchall():
updates.append({'b_uuid': row['uuid'], 'modified_at': row['created_at']})
if updates:
query = (sa.update(users)
.values(modified_at=bindparam('modified_at'))
.where(users.c.uuid == bindparam('b_uuid')))
conn.execute(query, updates)
# Set keypairs's modified_at with the value of created_at.
query = sa.select([keypairs.c.access_key, keypairs.c.created_at]).select_from(keypairs)
updates = []
for row in conn.execute(query).fetchall():
updates.append({'b_access_key': row['access_key'], 'modified_at': row['created_at']})
if updates:
query = (sa.update(keypairs)
.values(modified_at=bindparam('modified_at'))
.where(keypairs.c.access_key == bindparam('b_access_key')))
conn.execute(query, updates)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'modified_at')
op.drop_column('keypairs', 'modified_at')
# ### end Alembic commands ###
| lgpl-3.0 | 236,808,993,664,304,350 | 38.089744 | 93 | 0.625451 | false | 3.664663 | false | false | false |
psci2195/espresso-ffans | samples/lb_profile.py | 1 | 2856 | # Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Simulate the flow of a lattice-Boltzmann fluid past a cylinder,
obtain the velocity profile in polar coordinates and compare it
to the analytical solution.
"""
import numpy as np
import matplotlib.pyplot as plt
import espressomd
required_features = ["CUDA", "LB_BOUNDARIES_GPU"]
espressomd.assert_features(required_features)
import espressomd.lb
import espressomd.observables
import espressomd.shapes
import espressomd.lbboundaries
import espressomd.accumulators
system = espressomd.System(box_l=[10.0, 10.0, 5.0])
system.time_step = 0.01
system.cell_system.skin = 0.4
n_steps = 500
lb_fluid = espressomd.lb.LBFluidGPU(
agrid=1.0, dens=1.0, visc=1.0, tau=0.01, ext_force_density=[0, 0, 0.15], kT=1.0, seed=32)
system.actors.add(lb_fluid)
system.thermostat.set_lb(LB_fluid=lb_fluid, seed=23)
fluid_obs = espressomd.observables.CylindricalLBVelocityProfile(
center=[5.0, 5.0, 0.0],
axis='z',
n_r_bins=100,
n_phi_bins=1,
n_z_bins=1,
min_r=0.0,
max_r=4.0,
min_phi=-np.pi,
max_phi=np.pi,
min_z=0.0,
max_z=10.0,
sampling_delta_x=0.05,
sampling_delta_y=0.05,
sampling_delta_z=1.0)
cylinder_shape = espressomd.shapes.Cylinder(
center=[5.0, 5.0, 5.0],
axis=[0, 0, 1],
direction=-1,
radius=4.0,
length=20.0)
cylinder_boundary = espressomd.lbboundaries.LBBoundary(shape=cylinder_shape)
system.lbboundaries.add(cylinder_boundary)
system.integrator.run(n_steps)
accumulator = espressomd.accumulators.MeanVarianceCalculator(obs=fluid_obs)
system.auto_update_accumulators.add(accumulator)
system.integrator.run(n_steps)
lb_fluid_profile = accumulator.get_mean()
lb_fluid_profile = np.reshape(lb_fluid_profile, (100, 1, 1, 3))
def poiseuille_flow(r, R, ext_force_density):
return ext_force_density * 1. / 4 * (R**2.0 - r**2.0)
# Please note that due to symmetry and interpolation, a plateau is seen
# near r=0.
n_bins = len(lb_fluid_profile[:, 0, 0, 2])
r_max = 4.0
r = np.linspace(0.0, r_max, n_bins)
plt.plot(r, lb_fluid_profile[:, 0, 0, 2], label='LB profile')
plt.plot(r, poiseuille_flow(r, r_max, 0.15), label='analytical solution')
plt.legend()
plt.show()
| gpl-3.0 | -3,930,206,780,037,609,000 | 30.384615 | 93 | 0.719188 | false | 2.824926 | false | false | false |
openaps/openaps | openaps/vendors/units.py | 1 | 2534 |
"""
Units - units tool for openaps
"""
from openaps.uses.use import Use
from openaps.uses.registry import Registry
from openaps.glucose.convert import Convert as GlucoseConvert
import json
import argparse
def set_config (args, device):
return device
def display_device (device):
return ''
use = Registry( )
class ConvertInput (Use):
def get_params (self, args):
return dict(input=args.input, to=args.to)
def configure_app (self, app, parser):
parser.add_argument('--to','-t', default='mg/dL', choices=['mmol/L', 'mg/dL'])
parser.add_argument('input', default='-')
def get_program (self, args):
params = self.get_params(args)
program = json.load(argparse.FileType('r')(params.get('input')))
return program
CONVERTERS = { 'mmol/L': GlucoseConvert.mg_dl_to_mmol_l
, 'mg/dL': GlucoseConvert.mmol_l_to_mg_dl }
def set_converter (self, args):
params = self.get_params(args)
converters = self.CONVERTERS
self.units = params.get('to')
self.to_unit = converters.get(self.units)
def convert (self, program):
raise NotImplementedError( )
def main (self, args, app):
self.set_converter(args)
program = self.get_program(args)
results = self.convert(program)
return results
@use( )
class bg_targets (ConvertInput):
"""
Convert bg_targets json to preferred unit.
"""
def convert (self, bg_targets):
assert bg_targets['units'] in ['mg/dL', 'mmol/L']
if bg_targets['units'] != self.units:
for target in bg_targets['targets']:
target['high'] = self.to_unit(target['high'])
target['low'] = self.to_unit(target['low'])
bg_targets['user_preferred_units'] = bg_targets['units']
bg_targets['units'] = self.units
return bg_targets
@use( )
class insulin_sensitivities (ConvertInput):
"""
Convert read_insulin_sensitivities json to preferred unit.
"""
def convert (self, insulin_sensitivities):
assert insulin_sensitivities['units'] in ['mg/dL', 'mmol/L']
if insulin_sensitivities ['units'] != self.units:
for sens in insulin_sensitivities['sensitivities']:
sens['sensitivity'] = self.to_unit(sens['sensitivity'])
insulin_sensitivities['user_preferred_units'] = insulin_sensitivities['units']
insulin_sensitivities['units'] = self.units
return insulin_sensitivities
def get_uses (device, config):
all_uses = use.get_uses(device, config)
all_uses.sort(key=lambda usage: getattr(usage, 'sortOrder', usage.__name__))
return all_uses
| mit | 3,989,318,095,668,751,400 | 27.47191 | 82 | 0.66614 | false | 3.261261 | false | false | false |
henriquegemignani/randovania | randovania/game_connection/nintendont_backend.py | 1 | 10056 | import asyncio
import dataclasses
import struct
from asyncio import StreamReader, StreamWriter
from typing import List, Optional, Dict
from randovania.game_connection.backend_choice import GameBackendChoice
from randovania.game_connection.connection_backend import ConnectionBackend, MemoryOperation, MemoryOperationException
from randovania.game_connection.connection_base import GameConnectionStatus
from randovania.game_description.world import World
@dataclasses.dataclass(frozen=True)
class SocketHolder:
reader: StreamReader
writer: StreamWriter
api_version: int
max_input: int
max_output: int
max_addresses: int
class RequestBatch:
def __init__(self):
self.data = b""
self.ops = []
self.num_read_bytes = 0
self.addresses = []
def copy(self) -> "RequestBatch":
new = RequestBatch()
new.data = self.data
new.ops = list(self.ops)
new.num_read_bytes = self.num_read_bytes
new.addresses.extend(self.addresses)
return new
def build_request_data(self):
header = struct.pack(f">BBBB{len(self.addresses)}I", 0, len(self.ops), len(self.addresses), 1, *self.addresses)
return header + self.data
@property
def input_bytes(self):
return len(self.data) + 4 * len(self.addresses)
@property
def num_validator_bytes(self):
return 1 + (len(self.ops) - 1) // 8 if self.ops else 0
@property
def output_bytes(self):
return self.num_read_bytes + self.num_validator_bytes
def is_compatible_with(self, holder: SocketHolder):
return (len(self.addresses) < holder.max_addresses
and self.output_bytes <= holder.max_output
and self.input_bytes <= holder.max_input)
def add_op(self, op: MemoryOperation):
if op.address not in self.addresses:
self.addresses.append(op.address)
if op.read_byte_count is not None:
self.num_read_bytes += op.read_byte_count
op_byte = self.addresses.index(op.address)
if op.read_byte_count is not None:
op_byte = op_byte | 0x80
if op.write_bytes is not None:
op_byte = op_byte | 0x40
if op.byte_count == 4:
op_byte = op_byte | 0x20
if op.offset is not None:
op_byte = op_byte | 0x10
self.data += struct.pack(">B", op_byte)
if op.byte_count != 4:
self.data += struct.pack(">B", op.byte_count)
if op.offset is not None:
self.data += struct.pack(">h", op.offset)
if op.write_bytes is not None:
self.data += op.write_bytes
self.ops.append(op)
def _was_invalid_address(response: bytes, i: int) -> bool:
try:
return not response[i // 8] & (1 << (i % 8))
except IndexError:
raise MemoryOperationException("Server response too short for validator bytes")
class NintendontBackend(ConnectionBackend):
_world: Optional[World] = None
_port = 43673
_socket: Optional[SocketHolder] = None
def __init__(self, ip: str):
super().__init__()
self._ip = ip
@property
def ip(self):
return self._ip
@property
def lock_identifier(self) -> Optional[str]:
return None
@property
def backend_choice(self) -> GameBackendChoice:
return GameBackendChoice.NINTENDONT
# Game Backend Stuff
async def _connect(self) -> bool:
if self._socket is not None:
return True
try:
self._socket_error = None
self.logger.info(f"Connecting to {self._ip}:{self._port}.")
reader, writer = await asyncio.open_connection(self._ip, self._port)
# Send API details request
self.logger.info(f"Connection open, requesting API details.")
writer.write(struct.pack(f">BBBB", 1, 0, 0, 1))
await asyncio.wait_for(writer.drain(), timeout=30)
self.logger.debug(f"Waiting for API details response.")
response = await asyncio.wait_for(reader.read(1024), timeout=15)
api_version, max_input, max_output, max_addresses = struct.unpack_from(">IIII", response, 0)
self.logger.info(f"Remote replied with API level {api_version}, connection successful.")
self._socket = SocketHolder(reader, writer, api_version, max_input, max_output, max_addresses)
return True
except (OSError, asyncio.TimeoutError, struct.error) as e:
self._socket = None
self.logger.warning(f"Unable to connect to {self._ip}:{self._port} - ({type(e).__name__}) {e}")
self._socket_error = e
def _disconnect(self):
socket = self._socket
self._socket = None
if socket is not None:
socket.writer.close()
def _prepare_requests_for(self, ops: List[MemoryOperation]) -> List[RequestBatch]:
requests: List[RequestBatch] = []
current_batch = RequestBatch()
def _new_request():
nonlocal current_batch
requests.append(current_batch)
current_batch = RequestBatch()
processes_ops = []
max_write_size = self._socket.max_input - 20
for op in ops:
if op.byte_count == 0:
continue
op.validate_byte_sizes()
if op.read_byte_count is None and (op.write_bytes is not None
and len(op.write_bytes) > max_write_size):
self.logger.debug(f"Operation had {len(op.write_bytes)} bytes, "
f"above the limit of {max_write_size}. Splitting.")
for offset in range(0, len(op.write_bytes), max_write_size):
if op.offset is None:
address = op.address + offset
op_offset = None
else:
address = op.address
op_offset = op.offset + offset
processes_ops.append(MemoryOperation(
address=address,
offset=op_offset,
write_bytes=op.write_bytes[offset:min(offset + max_write_size, len(op.write_bytes))],
))
else:
processes_ops.append(op)
for op in processes_ops:
experimental = current_batch.copy()
experimental.add_op(op)
if not experimental.is_compatible_with(self._socket):
_new_request()
current_batch.add_op(op)
if not current_batch.is_compatible_with(self._socket):
raise ValueError(f"Request {op} is not compatible with current server.")
# Finish the last batch
_new_request()
return requests
async def _send_requests_to_socket(self, requests: List[RequestBatch]) -> List[bytes]:
all_responses = []
try:
for request in requests:
data = request.build_request_data()
self._socket.writer.write(data)
await self._socket.writer.drain()
if request.output_bytes > 0:
response = await asyncio.wait_for(self._socket.reader.read(1024), timeout=15)
all_responses.append(response)
else:
all_responses.append(b"")
except (OSError, asyncio.TimeoutError) as e:
if isinstance(e, asyncio.TimeoutError):
self.logger.warning(f"Timeout when reading response from {self._ip}")
self._socket_error = MemoryOperationException(f"Timeout when reading response")
else:
self.logger.warning(f"Unable to send {len(requests)} request to {self._ip}:{self._port}: {e}")
self._socket_error = MemoryOperationException(f"Unable to send {len(requests)} requests: {e}")
self._disconnect()
raise self._socket_error from e
return all_responses
async def _perform_memory_operations(self, ops: List[MemoryOperation]) -> Dict[MemoryOperation, bytes]:
if self._socket is None:
raise MemoryOperationException("Not connected")
requests = self._prepare_requests_for(ops)
all_responses = await self._send_requests_to_socket(requests)
result = {}
for request, response in zip(requests, all_responses):
read_index = request.num_validator_bytes
for i, op in enumerate(request.ops):
if op.read_byte_count is None:
continue
if _was_invalid_address(response, i):
raise MemoryOperationException("Operation tried to read an invalid address")
split = response[read_index:read_index + op.read_byte_count]
if len(split) != op.read_byte_count:
raise MemoryOperationException(f"Received {len(split)} bytes, expected {op.read_byte_count}")
else:
assert op not in result
result[op] = split
read_index += op.read_byte_count
return result
async def update(self, dt: float):
if not self._enabled:
return
if not await self._connect():
return
if not await self._identify_game():
return
await self._interact_with_game(dt)
@property
def name(self) -> str:
return "Nintendont"
@property
def current_status(self) -> GameConnectionStatus:
if self._socket is None:
return GameConnectionStatus.Disconnected
if self.patches is None:
return GameConnectionStatus.UnknownGame
if self._world is None:
return GameConnectionStatus.TitleScreen
elif not self.checking_for_collected_index:
return GameConnectionStatus.TrackerOnly
else:
return GameConnectionStatus.InGame
| gpl-3.0 | -8,874,167,788,248,815,000 | 34.408451 | 119 | 0.584029 | false | 4.202257 | false | false | false |
iceman1989/Check_mk | web/htdocs/htmllib.py | 1 | 58082 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Notes for future rewrite:
#
# - Make clear which functions return values and which write out values
# render_*, add_*, write_* (e.g. icon() -> outputs directly,
# render_icon() -> returns icon
# render_icon() -> returns icon
#
# - Order of arguments:
# e.g. icon(help, icon) -> change and make help otional?
#
# - Fix names of message() show_error() show_warning()
#
# - General rules:
# 1. values of type str that are passed as arguments or
# return values or are stored in datastructures most not contain
# non-Ascii characters! UTF-8 encoding must just be used in
# the last few CPU cycles before outputting. Conversion from
# input to str or unicode must happen as early as possible,
# directly when reading from file or URL.
import time, os, pwd, urllib, random, re
from lib import *
# Python 2.3 does not have 'set' in normal namespace.
# But it can be imported from 'sets'
try:
set()
except NameError:
from sets import Set as set
# Only parse variable adhering to the following regular expressions
varname_regex = re.compile('^[\w\d_.%+-\\\*]+$')
# Information about uri
class InvalidUserInput(Exception):
def __init__(self, varname, text):
self.varname = varname
self.text = text
class html:
def __init__(self):
self.user_errors = {}
self.focus_object = None
self.global_vars = []
self.render_headfoot = True
self.browser_reload = 0
self.browser_redirect = ''
self.events = set([]) # currently used only for sounds
self.header_sent = False
self.output_format = "html"
self.status_icons = {}
self.link_target = None
self.form_name = None
self.form_vars = []
self.var_stash = []
self.context_buttons_open = False
self.mobile = False
self.buffering = True
self.transformations = []
self.final_javascript_code = ""
self.auto_id = 0
self.have_help = False
self.plugged = False
self.keybindings = []
self.keybindings_enabled = True
self.io_error = False
self.enable_debug = False
self.screenshotmode = False
self.help_visible = False
self.treestates = {}
self.treestates_for_id = None
self.caches = {}
self.new_transids = []
self.ignore_transids = False
self.current_transid = None
self.page_context = {}
# Time measurement
self.times = {}
self.start_time = time.time()
self.last_measurement = self.start_time
RETURN = 13
SHIFT = 16
CTRL = 17
ALT = 18
BACKSPACE = 8
F1 = 112
def set_page_context(self, c):
self.page_context = c
def set_buffering(self, b):
self.buffering = b
def push_transformation(self, tf):
self.transformations.append(tf)
def pop_transformation(self):
del self.transformations[-1]
def some_id(self):
self.auto_id += 1
return "id_%d" % self.auto_id
def set_output_format(self, f):
self.output_format = f
def set_link_target(self, framename):
self.link_target = framename
def write(self, text):
for tf in self.transformations:
text = tf(text)
if type(text) == unicode:
text = text.encode("utf-8")
if self.plugged:
self.plugged_text += text
else:
self.lowlevel_write(text)
def plug(self):
self.plugged = True
self.plugged_text = ''
def flush(self):
if self.plugged:
self.lowlevel_write(self.plugged_text)
self.plugged_text = ''
def drain(self):
if self.plugged:
t = self.plugged_text
self.plugged_text = ''
return t
else:
return ''
def unplug(self):
self.flush()
self.plugged = False
def heading(self, text):
self.write("<h2>%s</h2>\n" % text)
def rule(self):
self.write("<hr/>")
def age_text(self, timedif):
timedif = int(timedif)
if timedif < 120:
return "%d sec" % timedif
minutes = timedif / 60
if minutes < 120:
return "%d min" % minutes
hours = minutes / 60
if hours < 48:
return "%d hrs" % hours
days = hours / 24
return "%d days" % days
def in_form(self):
return self.form_name != None
def begin_form(self, name, action = None, method = "GET",
onsubmit = None, add_transid = True):
self.form_vars = []
if action == None:
action = self.myfile + ".py"
self.current_form = name
if method.lower() == "post":
enctype = ' enctype="multipart/form-data"'
else:
enctype = ''
if onsubmit:
onsubmit = ' onsubmit="%s"' % self.attrencode(onsubmit)
else:
onsubmit = ''
enc_name = self.attrencode(name)
self.write('<form id="form_%s" name="%s" class="%s" action="%s" method="%s"%s%s>\n' %
(enc_name, enc_name, enc_name, self.attrencode(action), self.attrencode(method),
enctype, onsubmit))
self.hidden_field("filled_in", name)
if add_transid:
self.hidden_field("_transid", str(self.get_transid()))
self.hidden_fields(self.global_vars)
self.form_name = name
def end_form(self):
self.write("</form>\n")
self.form_name = None
def form_submitted(self, form_name=None):
if form_name:
return self.var("filled_in") == form_name
else:
return self.has_var("filled_in")
def add_user_error(self, varname, message):
if type(varname) == list:
for v in varname:
self.add_user_error(v, message)
else:
self.user_errors[varname] = message
def has_user_errors(self):
return len(self.user_errors) > 0
def show_user_errors(self):
if self.has_user_errors():
self.write('<div class=error>\n')
self.write('<br>'.join(self.user_errors.values()))
self.write('</div>\n')
def hidden_field(self, var, value, id = None, add_var = False):
if value != None:
id = id and ' id="%s"' % self.attrencode(id) or ''
self.write("<input type=\"hidden\" name=\"%s\" value=\"%s\"%s />" %
(self.attrencode(var), self.attrencode(value), id))
if add_var:
self.add_form_var(var)
# Beware: call this method just before end_form(). It will
# add all current non-underscored HTML variables as hiddedn
# field to the form - *if* they are not used in any input
# field. (this is the reason why you must not add any further
# input fields after this method has been called).
def hidden_fields(self, varlist = None, **args):
add_action_vars = args.get("add_action_vars", False)
if varlist != None:
for var in varlist:
value = self.vars.get(var, "")
self.hidden_field(var, value)
else: # add *all* get variables, that are not set by any input!
for var, value in self.vars.items():
if var not in self.form_vars and \
(var[0] != "_" or add_action_vars): # and var != "filled_in":
self.hidden_field(var, value)
def add_global_vars(self, varnames):
self.global_vars += varnames
# [('varname1', value1), ('varname2', value2) ]
def makeuri(self, addvars, remove_prefix = None, filename=None):
new_vars = [ nv[0] for nv in addvars ]
vars = [ (v, self.var(v))
for v in self.vars
if v[0] != "_" and v not in new_vars ]
if remove_prefix != None:
vars = [ i for i in vars if not i[0].startswith(remove_prefix) ]
vars = vars + addvars
if filename == None:
filename = self.urlencode(self.myfile) + ".py"
if vars:
return filename + "?" + self.urlencode_vars(vars)
else:
return filename
def makeactionuri(self, addvars):
return self.makeuri(addvars + [("_transid", self.get_transid())])
def makeuri_contextless(self, vars, filename=None):
if not filename:
filename = self.myfile + ".py"
if vars:
return filename + "?" + self.urlencode_vars(vars)
else:
return filename
def image_button(self, varname, title, cssclass = ''):
if not self.mobile:
self.write('<label for="%s" class="image_button">' % self.attrencode(varname))
self.raw_button(varname, title, cssclass)
if not self.mobile:
self.write('</label>')
def button(self, *args):
self.image_button(*args)
def raw_button(self, varname, title, cssclass=""):
self.write("<input onfocus=\"if (this.blur) this.blur();\" "
"type=\"submit\" name=\"%s\" id=\"%s\" value=\"%s\" "
"class=\"%s\" />\n" % \
( varname, varname, title, cssclass))
def buttonlink(self, href, text, add_transid=False, obj_id='', style='', title='', disabled=''):
if add_transid:
href += "&_transid=%s" % self.get_transid()
if not obj_id:
obj_id = self.some_id()
obj_id = ' id=%s' % obj_id
if style:
style = ' style="%s"' % style
if title:
title = ' title="%s"' % title
if disabled:
title = ' disabled="%s"' % disabled
if not self.mobile:
self.write('<label for="%s" class="image_button">' % obj_id)
self.write('<input%s%s%s%s value="%s" class="buttonlink" type="button" onclick="location.href=\'%s\'" />\n' % \
(obj_id, style, title, disabled, text, href))
if not self.mobile:
self.write('</label>')
def icon(self, help, icon):
self.write(self.render_icon(icon, help))
def render_icon(self, icon, help="", middle=True):
align = middle and ' align=absmiddle' or ''
title = help and ' title="%s"' % self.attrencode(help) or ""
if "/" in icon:
src = "images/" + icon
else:
src = "images/icon_%s.png" % icon
return '<img src="%s" class=icon%s%s />' % (src, align, title)
def empty_icon(self):
self.write('<img class=icon src="images/trans.png" />')
def icon_button(self, url, help, icon, id="", onclick="", style="", target="", cssclass=""):
if id:
id = "id='%s' " % id
if onclick:
onclick = 'onclick="%s" ' % onclick
url = "javascript:void(0)"
if style:
style = 'style="%s" ' % style
if target:
target = 'target="%s" ' % target
if cssclass:
cssclass = 'class="%s" ' % cssclass
self.write('<a %s%s%s%s%sonfocus="if (this.blur) this.blur();" href="%s">'
'<img align=absmiddle class=iconbutton title="%s" '
'src="images/button_%s_lo.png" '
'onmouseover=\"hilite_icon(this, 1)\" '
'onmouseout=\"hilite_icon(this, 0)\">'
'</a>' % (id, onclick, style, target, cssclass, url, self.attrencode(help), icon))
def empty_icon_button(self):
self.write('<img class="iconbutton trans" src="images/trans.png">')
def disabled_icon_button(self, icon):
self.write('<img class="iconbutton" align=absmiddle src="images/icon_%s.png">' % icon)
def jsbutton(self, varname, text, onclick, style=''):
if style:
style = ' style="%s"' % style
self.write("<input type=button name=%s id=%s onclick=\"%s\" "
"class=button%s value=\"%s\" />" % (varname, varname, onclick, style, text))
def begin_context_buttons(self):
if not self.context_buttons_open:
self.context_button_hidden = False
self.write("<table class=contextlinks><tr><td>\n")
self.context_buttons_open = True
def end_context_buttons(self):
if self.context_buttons_open:
if self.context_button_hidden:
self.write('<div title="%s" id=toggle class="contextlink short" '
% _("Show all buttons"))
self.context_button_hover_code("_short")
self.write("><a onclick='unhide_context_buttons(this);' href='#'>...</a></div>")
self.write("</td></tr></table>\n")
self.context_buttons_open = False
def context_button(self, title, url, icon=None, hot=False, id=None, bestof=None, hover_title='', fkey=None):
title = self.attrencode(title)
display = "block"
if bestof:
counts = self.get_button_counts()
weights = counts.items()
weights.sort(cmp = lambda a,b: cmp(a[1], b[1]))
best = dict(weights[-bestof:])
if id not in best:
display="none"
self.context_button_hidden = True
if not self.context_buttons_open:
self.begin_context_buttons()
if icon:
title = '<img src="images/icon_%s.png">%s' % (self.attrencode(icon), self.attrencode(title))
if id:
idtext = " id='%s'" % self.attrencode(id)
else:
idtext = ""
self.write('<div%s style="display:%s" class="contextlink%s%s" ' %
(idtext, display, hot and " hot" or "", (fkey and self.keybindings_enabled) and " button" or ""))
self.context_button_hover_code(hot and "_hot" or "")
self.write('>')
self.write('<a href="%s"' % self.attrencode(url))
if hover_title:
self.write(' title="%s"' % self.attrencode(hover_title))
if bestof:
self.write(' onclick="count_context_button(this); " ')
if fkey and self.keybindings_enabled:
title += '<div class=keysym>F%d</div>' % fkey
self.add_keybinding([html.F1 + (fkey - 1)], "document.location='%s';" % self.attrencode(url))
self.write('>%s</a></div>\n' % title)
def context_button_hover_code(self, what):
self.write(r'''onmouseover='this.style.backgroundImage="url(\"images/contextlink%s_hi.png\")";' ''' % what)
self.write(r'''onmouseout='this.style.backgroundImage="url(\"images/contextlink%s.png\")";' ''' % what)
def number_input(self, varname, deflt = "", size=8, style="", submit=None):
self.text_input(varname, str(deflt), "number", size=size, style=style, submit=submit)
# Needed if input elements are put into forms without the helper
# functions of us.
def add_form_var(self, varname):
self.form_vars.append(varname)
def text_input(self, varname, default_value = "", cssclass = "text", label = None, id = None,
submit = None, attrs = {}, **args):
if default_value == None:
default_value = ""
addprops = ""
add_style = ""
if "size" in args and args["size"]:
if args["size"] == "max":
add_style = "width: 100%; "
else:
addprops += " size=\"%d\"" % (args["size"] + 1)
if not args.get('omit_css_width', False) and "width:" not in args.get("style", "") and not self.mobile:
add_style = "width: %d.8ex; " % args["size"]
if "type" in args:
mytype = args["type"]
else:
mytype = "text"
if "autocomplete" in args:
addprops += " autocomplete=\"%s\"" % args["autocomplete"]
if args.get("style"):
addprops += " style=\"%s%s\"" % (add_style, args["style"])
elif add_style:
addprops += " style=\"%s\"" % add_style
if args.get("read_only"):
addprops += " readonly"
if submit != None:
if not id:
id = "ti_%s" % varname
self.final_javascript('document.getElementById("%s").onkeydown = '
'function(e) { if (!e) e = window.event; textinput_enter_submit(e, "%s"); };'
% (id, submit))
value = self.vars.get(varname, default_value)
error = self.user_errors.get(varname)
html = ""
if error:
html = "<x class=\"inputerror\">"
if label:
if not id:
id = "ti_%s" % varname
html += '<label for="%s">%s</label>' % (id, label)
if id:
addprops += ' id="%s"' % id
attributes = ' ' + ' '.join([ '%s="%s"' % (k, v) for k, v in attrs.iteritems() ])
html += "<input type=\"%s\" class=\"%s\" value=\"%s\" name=\"%s\"%s%s />\n" % \
(mytype, cssclass, self.attrencode(value), varname, addprops, attributes)
if error:
html += "</x>"
self.set_focus(varname)
self.write(html)
self.form_vars.append(varname)
def password_input(self, varname, default_value = "", size=12, **args):
self.text_input(varname, default_value, type="password", size = size, **args)
def text_area(self, varname, deflt="", rows=4, cols=30, attrs = {}):
value = self.var(varname, deflt)
error = self.user_errors.get(varname)
if error:
self.write("<x class=inputerror>")
attributes = ' ' + ' '.join([ '%s="%s"' % (k, v) for k, v in attrs.iteritems() ])
self.write("<textarea rows=%d cols=%d name=\"%s\"%s>%s</textarea>\n" % (
rows, cols, varname, attributes, self.attrencode(value)))
if error:
self.write("</x>")
self.set_focus(varname)
self.form_vars.append(varname)
def sorted_select(self, varname, choices, deflt="", onchange=None, attrs = {}):
# Sort according to display texts, not keys
sorted = choices[:]
sorted.sort(lambda a,b: cmp(a[1].lower(), b[1].lower()))
self.select(varname, sorted, deflt, onchange, attrs)
# Choices is a list pairs of (key, title)
def select(self, varname, choices, deflt="", onchange=None, attrs = {}):
current = self.var_utf8(varname, deflt)
onchange_code = onchange and " onchange=\"%s\"" % (onchange) or ""
attrs.setdefault('size', 1)
attributes = ' ' + ' '.join([ '%s="%s"' % (k, v) for k, v in attrs.iteritems() ])
self.write("<select%s name=\"%s\" id=\"%s\"%s>\n" %
(onchange_code, varname, varname, attributes))
for value, text in choices:
if value == None:
value = ""
elif type(value) == unicode:
value = value.encode('utf-8')
sel = value == current and " selected" or ""
self.write("<option value=\"%s\"%s>%s</option>\n" %
(self.attrencode(value), sel, self.attrencode(text)))
self.write("</select>\n")
if varname:
self.form_vars.append(varname)
def icon_select(self, varname, options, deflt=""):
current = self.var(varname, deflt)
self.write("<select class=icon name=\"%s\" id=\"%s\" size=\"1\">\n" %
(varname, varname))
for value, text, icon in options:
if value == None: value = ""
sel = value == current and " selected" or ""
self.write('<option style="background-image:url(images/icon_%s.png);" '
'value=\"%s\"%s>%s</option>\n' %
(icon, self.attrencode(value), sel, self.attrencode(text)))
self.write("</select>\n")
if varname:
self.form_vars.append(varname)
def begin_radio_group(self, horizontal=False):
if self.mobile:
if horizontal:
add = 'data-type="horizontal" '
else:
add = ''
self.write('<fieldset %s data-role="controlgroup">' % add)
def end_radio_group(self):
if self.mobile:
self.write('</fieldset>')
def radiobutton(self, varname, value, checked, label):
if self.has_var(varname):
checked = self.var(varname) == value
checked_text = checked and " checked" or ""
if label:
id = "rb_%s_%s" % (varname, self.attrencode(value))
idtxt = ' id="%s"' % id
else:
idtxt = ""
self.write("<input type=radio name=%s value=\"%s\"%s%s>\n" %
(varname, self.attrencode(value), checked_text, idtxt))
if label:
self.write('<label for="%s">%s</label>\n' % (id, label))
self.form_vars.append(varname)
def begin_checkbox_group(self, horizonal=False):
self.begin_radio_group(horizonal)
def end_checkbox_group(self):
self.end_radio_group()
def checkbox(self, varname, deflt=False, cssclass = '', onclick = None, label=None, id=None, add_attr = None):
if add_attr == None:
add_attr = [] # do not use [] as default element, it will be a global variable!
error = self.user_errors.get(varname)
if error:
self.write("<x class=inputerror>")
self.write("<span class=checkbox>")
# Problem with checkboxes: The browser will add the variable
# only to the URL if the box is checked. So in order to detect
# wether we should add the default value, we need to detect
# if the form is printed for the first time. This is the
# case if "filled_in" is not set.
value = self.get_checkbox(varname)
if value == None: # form not yet filled in
value = deflt
checked = value and " CHECKED " or ""
if cssclass:
cssclass = ' class="%s"' % cssclass
onclick_code = onclick and " onclick=\"%s\"" % (onclick) or ""
if label and not id:
id = "cb_" + varname
if id:
add_attr.append('id="%s"' % id)
add_attr_code = ''
if add_attr:
add_attr_code = ' ' + ' '.join(add_attr)
self.write("<input type=checkbox name=\"%s\"%s%s%s%s>\n" %
(varname, checked, cssclass, onclick_code, add_attr_code))
self.form_vars.append(varname)
if label:
self.write('<label for="%s">%s</label>\n' % (id, label))
self.write("</span>")
if error:
self.write("</x>")
# Check if the current form is currently filled in (i.e. we display
# the form a second time while showing value typed in at the first
# time and complaining about invalid user input)
def form_filled_in(self, form_name = None):
if form_name == None:
form_name = self.form_name
return self.has_var("filled_in") and (
form_name == None or \
form_name in self.list_var("filled_in"))
# Get value of checkbox. Return True, False or None. None means
# that no form has been submitted. The problem here is the distintion
# between False and None. The browser does not set the variables for
# Checkboxes that are not checked :-(
def get_checkbox(self, varname, form_name = None):
if self.has_var(varname):
return not not self.var(varname)
elif not self.form_filled_in(form_name):
return None
else:
# Form filled in but variable missing -> Checkbox not checked
return False
def datetime_input(self, varname, default_value, submit=None):
try:
t = self.get_datetime_input(varname)
except:
t = default_value
if varname in self.user_errors:
self.add_user_error(varname + "_date", self.user_errors[varname])
self.add_user_error(varname + "_time", self.user_errors[varname])
self.set_focus(varname + "_date")
br = time.localtime(t)
self.date_input(varname + "_date", br.tm_year, br.tm_mon, br.tm_mday, submit=submit)
self.write(" ")
self.time_input(varname + "_time", br.tm_hour, br.tm_min, submit=submit)
self.form_vars.append(varname + "_date")
self.form_vars.append(varname + "_time")
def time_input(self, varname, hours, mins, submit=None):
self.text_input(varname, "%02d:%02d" % (hours, mins), cssclass="time", size=5,
submit=submit, omit_css_width = True)
def date_input(self, varname, year, month, day, submit=None):
self.text_input(varname, "%04d-%02d-%02d" % (year, month, day),
cssclass="date", size=10, submit=submit, omit_css_width = True)
def get_datetime_input(self, varname):
t = self.var(varname + "_time")
d = self.var(varname + "_date")
if not t or not d:
raise MKUserError([varname + "_date", varname + "_time"],
_("Please specify a date and time."))
try:
br = time.strptime(d + " " + t, "%Y-%m-%d %H:%M")
except:
raise MKUserError([varname + "_date", varname + "_time"],
_("Please enter the date/time in the format YYYY-MM-DD HH:MM."))
return int(time.mktime(br))
def get_time_input(self, varname, what):
t = self.var(varname)
if not t:
raise MKUserError(varname, _("Please specify %s.") % what)
try:
h, m = t.split(":")
m = int(m)
h = int(h)
if m < 0 or m > 59 or h < 0:
raise Exception()
except:
raise MKUserError(varname, _("Please enter the time in the format HH:MM."))
return m * 60 + h * 3600
def upload_file(self, varname):
error = self.user_errors.get(varname)
if error:
self.write("<x class=inputerror>")
self.write('<input type="file" name="%s">' % varname)
if error:
self.write("</x>")
self.form_vars.append(varname)
def html_head(self, title, javascripts = [], stylesheets = ["pages"], force=False):
if not self.header_sent or force:
self.write(
u'''<!DOCTYPE HTML>
<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\n''')
self.write('<title>')
self.write(self.attrencode(title))
self.write('</title>\n')
self.write('<meta http-equiv="X-UA-Compatible" content="IE=edge" />')
# If the variable _link_target is set, then all links in this page
# should be targetted to the HTML frame named by _link_target. This
# is e.g. useful in the dash-board
if self.link_target:
self.write('<base target="%s">\n' % self.attrencode(self.link_target))
# Load all specified style sheets and all user style sheets in htdocs/css
for css in [ "check_mk" ] + stylesheets + [ 'ie' ]:
if defaults.omd_root:
fname = '%s-%s.css' % (css, defaults.check_mk_version)
else:
fname = '%s.css' % css
if css == 'ie':
self.write('<!--[if IE]>\n')
self.write('<link rel="stylesheet" type="text/css" href="%s" />\n' % fname)
if css == 'ie':
self.write('<![endif]-->\n')
self.add_custom_style_sheet()
# Load specified Javascript files
for js in [ "checkmk", "hover" ] + javascripts:
if defaults.omd_root:
fname = 'js/%s-%s.js' % (js, defaults.check_mk_version)
else:
fname = 'js/%s.js' % js
self.write('<script type="text/javascript" src="%s"></script>\n' % fname)
if self.browser_reload != 0:
if self.browser_redirect != '':
self.write("<script type=\"text/javascript\">setReload(%s, '%s')</script>\n" %
(self.browser_reload, self.browser_redirect))
else:
self.write("<script type=\"text/javascript\">setReload(%s)</script>\n" % self.browser_reload)
self.write("</head>\n")
self.header_sent = True
def html_foot(self):
self.write("</html>\n")
def set_render_headfoot(self, render):
self.render_headfoot = render
def set_browser_reload(self, secs):
self.browser_reload = secs
def http_redirect(self, url):
raise MKGeneralException("http_redirect not implemented")
def set_browser_redirect(self, secs, url):
self.browser_reload = secs
self.browser_redirect = url
def immediate_browser_redirect(self, secs, url):
self.javascript("setReload(%s, '%s');" % (secs, url))
def body_css_classes(self):
body_classes = [ "main" ]
if self.var("_body_class"):
body_classes.append(self.var("_body_class"))
if self.screenshotmode:
body_classes.append("screenshotmode")
return " ".join(body_classes)
def body_start(self, title='', **args):
self.html_head(title, **args)
self.write('<body class="%s">' % self.body_css_classes())
def header(self, title='', **args):
if self.output_format == "html":
if not self.header_sent:
self.body_start(title, **args)
self.header_sent = True
if self.render_headfoot:
self.top_heading(title)
def top_heading_left(self, title):
self.write('<table class=header><tr><td width="*" class=heading>')
self.write('<a href="#" onfocus="if (this.blur) this.blur();" '
'onclick="this.innerHTML=\'%s\'; document.location.reload();">%s</a></td>' %
(_("Reloading..."), self.attrencode(title)))
def top_heading_right(self):
cssclass = self.help_visible and "active" or "passive"
self.write('<a id=helpbutton class=%s href="#" onclick="help_toggle();" style="display: none"></a>' %
cssclass)
self.write("%s</td></tr></table>" %
_("<a href=\"http://mathias-kettner.de\"><img src=\"images/logo_mk_small.png\"/></a>"))
self.write("<hr class=header>\n")
if self.enable_debug:
self.dump_get_vars()
def dump_get_vars(self):
self.begin_foldable_container("html", "debug_vars", True, _("GET/POST variables of this page"))
self.debug_vars(hide_with_mouse = False)
self.end_foldable_container()
def bottom_focuscode(self):
if self.focus_object:
formname, varname = self.focus_object
obj = formname + "." + varname
self.write("<script language=\"javascript\" type=\"text/javascript\">\n"
"<!--\n"
"if (document.%s) {"
" document.%s.focus();\n"
" document.%s.select();\n"
"}\n"
"// -->\n"
"</script>\n" % (obj, obj, obj))
def bottom_footer(self):
if self.header_sent:
self.bottom_focuscode()
corner_text = ""
corner_text += '<div style="display: %s" id=foot_refresh>%s</div>' % (
(self.browser_reload and "inline-block" or "none",
_("refresh: <div id=foot_refresh_time>%s</div> secs") % self.browser_reload))
if self.render_headfoot:
si = self.render_status_icons()
self.write("<table class=footer><tr>"
"<td class=left>%s</td>"
"<td class=middle></td>"
"<td class=right>%s</td></tr></table>"
% (si, corner_text))
def body_end(self):
if self.have_help:
self.javascript("help_enable();")
if self.keybindings_enabled and self.keybindings:
self.javascript("var keybindings = %r;\n"
"document.body.onkeydown = keybindings_keydown;\n"
"document.body.onkeyup = keybindings_keyup;\n"
"document.body.onfocus = keybindings_focus;\n" % self.keybindings)
if self.final_javascript_code:
self.javascript(self.final_javascript_code)
self.write("</body></html>\n")
# Hopefully this is the correct place to performe some "finalization" tasks.
self.store_new_transids()
def footer(self):
if self.output_format == "html":
self.bottom_footer()
self.body_end()
def add_status_icon(self, img, tooltip, url = None):
if url:
self.status_icons[img] = tooltip, url
else:
self.status_icons[img] = tooltip
def render_status_icons(self):
h = '<a target="_top" href="%s"><img class=statusicon src="images/status_frameurl.png" title="%s"></a>\n' % \
(self.makeuri([]), _("URL to this frame"))
h += '<a target="_top" href="%s"><img class=statusicon src="images/status_pageurl.png" title="%s"></a>\n' % \
("index.py?" + self.urlencode_vars([("start_url", self.makeuri([]))]), _("URL to this page including sidebar"))
if self.myfile == "view" and self.var('mode') != 'availability':
h += '<a target="_top" href="%s">' \
'<img class=statusicon src="images/status_download_csv.png" title="%s"></a>\n' % \
(self.makeuri([("output_format", "csv_export")]), _("Export as CSV"))
if self.myfile == "view":
mode_name = self.var('mode') == "availability" and "availability" or "view"
encoded_vars = {}
for k, v in self.page_context.items():
if v == None:
v = ''
elif type(v) == unicode:
v = v.encode('utf-8')
encoded_vars[k] = v
h += '<div class="visualadd"><a class="visualadd" href="javascript:void(0)" ' \
'onclick="toggle_add_to_visual(event, this, \'%s\', %s, {\'name\': \'%s\'})">' \
'<img class=statusicon src="images/status_add_dashlet.png" title="%s"></a></div>\n' % \
(mode_name, self.attrencode(repr(encoded_vars)), self.var('view_name'), _("Add this view to..."))
for img, tooltip in self.status_icons.items():
if type(tooltip) == tuple:
tooltip, url = tooltip
h += '<a target="_top" href="%s"><img class=statusicon src="images/status_%s.png" title="%s"></a>\n' % \
(url, img, tooltip)
else:
h += '<img class=statusicon src="images/status_%s.png" title="%s">\n' % (img, tooltip)
if self.times:
self.measure_time('body')
h += '<div class=execution_times>'
entries = self.times.items()
entries.sort()
for name, duration in entries:
h += "<div>%s: %.1fms</div>" % (name, duration * 1000)
h += '</div>'
return h
def show_exception(self, e):
details = \
'Check_MK Version: ' + defaults.check_mk_version + '\r\n' \
+ 'Page: ' + self.myfile + '.py\r\n\r\n' \
+ 'GET/POST-Variables:\r\n' \
+ '\r\n'.join([ ' '+n+'='+v for n, v in sorted(self.vars.items()) ]) + '\r\n' \
+ '\r\n' \
+ format_exception()
mail_body = \
"Dear Check_MK Developer team,\r\n\r\n" \
+ "I hereby send you a report of a crash in the Check_MK Web GUI:\r\n\r\n" \
+ details + "\r\n" \
+ "\r\n\r\nWith best regards,\r\n\r\n"
self.begin_context_buttons()
mailto_url = self.makeuri_contextless([
("subject", "Check_MK GUI Crash Report - " + defaults.check_mk_version),
("body", mail_body)], filename="mailto:[email protected]")
self.context_button(_("Submit Report"), mailto_url, "email")
self.end_context_buttons()
self.write("<div class=error>")
self.write("<b>%s:</b>\n%s<br><br>" % (_('Internal error'), self.attrencode(e)))
self.begin_foldable_container("html", "exc_details", False, _("Details"))
self.write('<div class=log_output>')
self.write("<pre>%s</pre>" % details)
self.write('</div>')
self.end_foldable_container()
self.write("</div>")
def show_error(self, msg):
self.message(msg, 'error')
def show_warning(self, msg):
self.message(msg, 'warning')
# obj might be either a string (str or unicode) or an exception object
def message(self, obj, what='message'):
if what == 'message':
cls = 'success'
prefix = _('MESSAGE')
elif what == 'warning':
cls = 'warning'
prefix = _('WARNING')
else:
cls = 'error'
prefix = _('ERROR')
# Only strip off some tags. We allow some simple tags like
# <b>, <tt>, <i> to be part of the exception message. The tags
# are escaped first and then fixed again after attrencode.
msg = self.attrencode(obj)
msg = re.sub(r'<(/?)(b|tt|i|br|pre)>', r'<\1\2>', msg)
if self.output_format == "html":
if self.mobile:
self.write('<center>')
self.write("<div class=%s>%s</div>\n" % (cls, msg))
if self.mobile:
self.write('</center>')
else:
self.write('%s: %s\n' % (prefix, self.strip_tags(msg)))
# Embed help box, whose visibility is controlled by a global
# button in the page.
def help(self, text):
if text and text.strip():
self.have_help = True
self.write('<div class=help style="display: %s">' % (
not self.help_visible and "none" or "block"))
self.write(text.strip())
self.write('</div>')
def do_actions(self):
return self.var("_do_actions") not in [ "", None, _("No") ]
def set_focus(self, varname):
self.focus_object = (self.form_name, varname)
def debug_vars(self, prefix=None, hide_with_mouse=True):
if hide_with_mouse:
hover = ' onmouseover="this.style.display=\'none\';"'
else:
hover = ""
self.write('<table %s class=debug_vars>' % hover)
self.write("<tr><th colspan=2>POST / GET Variables</th></tr>")
for name, value in sorted(self.vars.items()):
if not prefix or name.startswith(prefix):
self.write("<tr><td class=left>%s</td><td class=right>%s</td></tr>\n" %
(self.attrencode(name), self.attrencode(value)))
self.write("</table>")
def var(self, varname, deflt = None):
return self.vars.get(varname, deflt)
def has_var(self, varname):
return varname in self.vars
# Checks if a variable with a given prefix is present
def has_var_prefix(self, prefix):
for varname in self.vars:
if varname.startswith(prefix):
return True
return False
def var_utf8(self, varname, deflt = None):
val = self.vars.get(varname, deflt)
if val != None:
return val.decode("utf-8")
# Return all values of a variable that possible occurs more
# than once in the URL. note: self.listvars does contain those
# variable only, if the really occur more than once.
def list_var(self, varname):
if varname in self.listvars:
return self.listvars[varname]
elif varname in self.vars:
return [self.vars[varname]]
else:
return []
# Adds a variable to listvars and also set it
def add_var(self, varname, value):
self.listvars.setdefault(varname, [])
self.listvars[varname].append(value)
self.vars[varname] = value
def set_var(self, varname, value):
if value == None:
self.del_var(varname)
else:
self.vars[varname] = value
def del_var(self, varname):
if varname in self.vars:
del self.vars[varname]
if varname in self.listvars:
del self.listvars[varname]
def del_all_vars(self, prefix = None):
if not prefix:
self.vars = {}
self.listvars = {}
else:
self.vars = dict([(k,v) for (k,v) in self.vars.iteritems() if not k.startswith(prefix)])
self.listvars = dict([(k,v) for (k,v) in self.listvars.iteritems() if not k.startswith(prefix)])
def stash_vars(self):
self.var_stash.append(self.vars.copy())
def unstash_vars(self):
self.vars = self.var_stash.pop()
def javascript(self, code):
self.write("<script language=\"javascript\">\n%s\n</script>\n" % code)
def final_javascript(self, code):
self.final_javascript_code += code + "\n"
def javascript_file(self, name):
self.write('<script type="text/javascript" src="js/%s.js"></script>\n' % name)
def reload_sidebar(self):
if not self.has_var("_ajaxid"):
self.javascript("if(parent && parent.frames[0]) parent.frames[0].location.reload();");
def set_ignore_transids(self):
self.ignore_transids = True
# Compute a (hopefully) unique transaction id. This is generated during rendering
# of a form or an action link, stored in a user specific file for later validation,
# sent to the users browser via HTML code, then submitted by the user together
# with the action (link / form) and then validated if it is a known transid. When
# it is a known transid, it will be used and invalidated. If the id is not known,
# the action will not be processed.
def fresh_transid(self):
transid = "%d/%d" % (int(time.time()), random.getrandbits(32))
self.new_transids.append(transid)
return transid
def get_transid(self):
if not self.current_transid:
self.current_transid = self.fresh_transid()
return self.current_transid
# Marks a transaction ID as used. This is done by saving
# it in a user specific settings file "transids.mk". At this
# time we remove all entries from that list that are older
# than one week.
def store_new_transids(self):
if self.new_transids:
valid_ids = self.load_transids(lock = True)
cleared_ids = []
now = time.time()
for valid_id in valid_ids:
timestamp, rand = valid_id.split("/")
if now - int(timestamp) < 86400: # one day
cleared_ids.append(valid_id)
self.save_transids(cleared_ids + self.new_transids, unlock = True)
# Remove the used transid from the list of valid ones
def invalidate_transid(self, used_id):
valid_ids = self.load_transids(lock = True)
try:
valid_ids.remove(used_id)
except ValueError:
return
self.save_transids(valid_ids, unlock = True)
# Checks, if the current transaction is valid, i.e. in case of
# browser reload a browser reload, the form submit should not
# be handled a second time.. The HTML variable _transid must be present.
#
# In case of automation users (authed by _secret in URL): If it is empty
# or -1, then it's always valid (this is used for webservice calls).
# This was also possible for normal users, but has been removed to preven
# security related issues.
def transaction_valid(self):
if not self.has_var("_transid"):
return False
id = self.var("_transid")
if self.ignore_transids and (not id or id == '-1'):
return True # automation
if '/' not in id:
return False
# Normal user/password auth user handling
timestamp, rand = id.split("/", 1)
# If age is too old (one week), it is always
# invalid:
now = time.time()
if now - int(timestamp) >= 604800: # 7 * 24 hours
return False
# Now check, if this id is a valid one
return id in self.load_transids()
# Checks, if the current page is a transation, i.e. something
# that is secured by a transid (such as a submitted form)
def is_transaction(self):
return self.has_var("_transid")
# called by page functions in order to check, if this was
# a reload or the original form submission. Increases the
# transid of the user, if the latter was the case.
# There are three return codes:
# True: -> positive confirmation by the user
# False: -> not yet confirmed, question is being shown
# None: -> a browser reload or a negative confirmation
def check_transaction(self):
if self.transaction_valid():
id = self.var("_transid")
if id and id != "-1":
self.invalidate_transid(id)
return True
else:
return False
# The confirm dialog is normally not a dialog which need to be protected
# by a transid itselfs. It is only a intermediate step to the real action
# But there are use cases where the confirm dialog is used during rendering
# a normal page, for example when deleting a dashlet from a dashboard. In
# such cases, the transid must be added by the confirm dialog.
def confirm(self, msg, method="POST", action=None, add_transid=False):
if self.var("_do_actions") == _("No"):
# User has pressed "No", now invalidate the unused transid
self.check_transaction()
return # None --> "No"
if not self.has_var("_do_confirm"):
if self.mobile:
self.write('<center>')
self.write("<div class=really>%s" % msg)
self.begin_form("confirm", method=method, action=action, add_transid=add_transid)
self.hidden_fields(add_action_vars = True)
self.button("_do_confirm", _("Yes!"), "really")
self.button("_do_actions", _("No"), "")
self.end_form()
self.write("</div>")
if self.mobile:
self.write('</center>')
return False # False --> "Dialog shown, no answer yet"
else:
# Now check the transaction
return self.check_transaction() and True or None # True: "Yes", None --> Browser reload of "yes" page
def register_event(self, name):
self.events.add(name)
def has_event(self, name):
return name in self.events
def play_sound(self, url):
self.write('<object type="audio/x-wav" data="%s" height="0" width="0">\n'
'<param name="filename" value="%s">\n'
'<param name="src" value="%s">\n'
'<param name="autostart" value="true">\n'
'<param name="playcount" value="1">\n'
'</object>\n' % (url, url, url))
if self.enable_debug:
self.write("(playing sound %s)" % url)
def apache_user(self):
return pwd.getpwuid( os.getuid() )[ 0 ]
def debug(self, *x):
import pprint
for element in x:
self.lowlevel_write("<pre>%s</pre>\n" % pprint.pformat(element))
def has_cookie(self, varname):
return varname in self.cookies
def get_cookie_names(self):
return self.cookies.keys()
def cookie(self, varname, deflt):
try:
return self.cookies[varname].value
except:
return deflt
# Keyboard control
def add_keybinding(self, keylist, jscode):
self.keybindings.append([keylist, jscode])
def add_keybindings(self, bindings):
self.keybindings += bindings
def disable_keybindings(self):
self.keybindings_enabled = False
# From here: Former not class functions
# Encode HTML attributes: replace " with ", also replace
# < and >. This code is slow. Works on str and unicode without
# changing the type. Also works on things that can be converted
# with %s.
def attrencode(self, value):
ty = type(value)
if ty == int:
return str(value)
elif ty not in [str, unicode]: # also possible: type Exception!
value = "%s" % value # Note: this allows Unicode. value might not have type str now
return value.replace('"', """).replace("<", "<").replace(">", ">")
# This function returns a str object, never unicode!
# Beware: this code is crucial for the performance of Multisite!
# Changing from the self coded urlencode to urllib.quote
# is saving more then 90% of the total HTML generating time
# on more complex pages!
def urlencode_vars(self, vars):
output = []
for varname, value in vars:
if type(value) == int:
value = str(value)
elif type(value) == unicode:
value = value.encode("utf-8")
try:
# urllib is not able to encode non-Ascii characters. Yurks
output.append(varname + '=' + urllib.quote(value))
except:
output.append(varname + '=' + self.urlencode(value)) # slow but working
return '&'.join(output)
def urlencode(self, value):
if type(value) == unicode:
value = value.encode("utf-8")
elif value == None:
return ""
ret = ""
for c in value:
if c == " ":
c = "+"
elif ord(c) <= 32 or ord(c) > 127 or c in [ '#', '+', '"', "'", "=", "&", ":", "%" ]:
c = "%%%02x" % ord(c)
ret += c
return ret
# Escape a variable name so that it only uses allowed charachters for URL variables
def varencode(self, varname):
if varname == None:
return "None"
if type(varname) == int:
return varname
ret = ""
for c in varname:
if not c.isdigit() and not c.isalnum() and c != "_":
ret += "%%%02x" % ord(c)
else:
ret += c
return ret
def u8(self, c):
if ord(c) > 127:
return "&#%d;" % ord(c)
else:
return c
def utf8_to_entities(self, text):
if type(text) != unicode:
return text
else:
return text.encode("utf-8")
# remove all HTML-tags
def strip_tags(self, ht):
if type(ht) not in [str, unicode]:
return ht
while True:
x = ht.find('<')
if x == -1:
break
y = ht.find('>', x)
if y == -1:
break
ht = ht[0:x] + ht[y+1:]
return ht
def strip_scripts(self, ht):
while True:
x = ht.find('<script')
if x == -1:
break
y = ht.find('</script>')
if y == -1:
break
ht = ht[0:x] + ht[y+9:]
return ht
def begin_foldable_container(self, treename, id, isopen, title, indent=True, first=False, icon=None, fetch_url=None):
self.folding_indent = indent
if self.user:
isopen = self.foldable_container_is_open(treename, id, isopen)
img_num = isopen and "90" or "00"
onclick = ' onclick="toggle_foldable_container(\'%s\', \'%s\', \'%s\')"' % (
treename, id, fetch_url and fetch_url or '');
onclick += ' onmouseover="this.style.cursor=\'pointer\';" '
onclick += ' onmouseout="this.style.cursor=\'auto\';" '
if indent == "nform":
self.write('<tr class=heading><td id="nform.%s.%s" %s colspan=2>' % (treename, id, onclick))
if icon:
self.write('<img class="treeangle title" src="images/icon_%s.png">' % icon)
else:
self.write('<img align=absbottom class="treeangle nform" src="images/tree_%s.png">' % (
isopen and "90" or "00"))
self.write('%s</td></tr>' % title)
else:
if not icon:
self.write('<img align=absbottom class="treeangle" id="treeimg.%s.%s" '
'src="images/tree_%s.png" %s>' %
(treename, id, img_num, onclick))
if title.startswith('<'): # custom HTML code
self.write(title)
if indent != "form":
self.write("<br>")
else:
self.write('<b class="treeangle title" class=treeangle %s>' % onclick)
if icon:
self.write('<img class="treeangle title" src="images/icon_%s.png">' % icon)
self.write('%s</b><br>' % title)
indent_style = "padding-left: %dpx; " % (indent == True and 15 or 0)
if indent == "form":
self.write("</td></tr></table>")
indent_style += "margin: 0; "
self.write('<ul class="treeangle %s" style="%s" id="tree.%s.%s">' %
(isopen and "open" or "closed", indent_style, treename, id))
# give caller information about current toggling state (needed for nform)
return isopen
def foldable_container_is_open(self, treename, id, isopen):
# try to get persisted state of tree
tree_state = self.get_tree_states(treename)
if id in tree_state:
isopen = tree_state[id] == "on"
return isopen
def end_foldable_container(self):
if self.folding_indent != "nform":
self.write("</ul>")
def get_tree_states(self, tree):
self.load_tree_states()
return self.treestates.get(tree, {})
def set_tree_state(self, tree, key, val):
self.load_tree_states()
if tree not in self.treestates:
self.treestates[tree] = {}
self.treestates[tree][key] = val
def set_tree_states(self, tree, val):
self.load_tree_states()
self.treestates[tree] = val
def parse_field_storage(self, fields, handle_uploads_as_file_obj = False):
self.vars = {}
self.listvars = {} # for variables with more than one occurrance
self.uploads = {}
for field in fields.list:
varname = field.name
# To prevent variours injections, we only allow a defined set
# of characters to be used in variables
if not varname_regex.match(varname):
continue
# put uploaded file infos into separate storage
if field.filename is not None:
if handle_uploads_as_file_obj:
value = field.file
else:
value = field.value
self.uploads[varname] = (field.filename, field.type, value)
else: # normal variable
# Multiple occurrance of a variable? Store in extra list dict
if varname in self.vars:
if varname in self.listvars:
self.listvars[varname].append(field.value)
else:
self.listvars[varname] = [ self.vars[varname], field.value ]
# In the single-value-store the last occurrance of a variable
# has precedence. That makes appending variables to the current
# URL simpler.
self.vars[varname] = field.value
def uploaded_file(self, varname, default = None):
return self.uploads.get(varname, default)
#
# Per request caching
#
def set_cache(self, name, value):
self.caches[name] = value
def is_cached(self, name):
return name in self.caches
def get_cached(self, name):
return self.caches.get(name)
def measure_time(self, name):
self.times.setdefault(name, 0.0)
now = time.time()
elapsed = now - self.last_measurement
self.times[name] += elapsed
self.last_measurement = now
| gpl-2.0 | -4,768,458,956,842,692,000 | 37.618351 | 124 | 0.534847 | false | 3.818672 | false | false | false |
Jajcus/pyxmpp | pyxmpp/objects.py | 1 | 6158 | #
# (C) Copyright 2003-2010 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# pylint: disable-msg=W0232, E0201
"""General base classes for PyXMPP objects."""
__docformat__="restructuredtext en"
import libxml2
from pyxmpp.xmlextra import common_doc
class StanzaPayloadObject(object):
"""Base class for objects that may be used as XMPP stanza payload and don't keep
internal XML representation, only parsed values.
Provides `as_xml` method. Derived classes must override `xml_element_name` and
`xml_element_namespace` class attributes and the `complete_xml_element` method.
Please note that not all classes derived from `StanzaPayloadObject` should be
used directly as stanza payload. Some of them are parts of higher level objects.
:Cvariables:
- `xml_element_name`: name for the XML element provided by the class.
- `xml_element_namespace`: namespace URI for the XML element provided
by the class.
:Types:
- `xml_element_name`: `unicode`
- `xml_element_namespace`: `unicode`
"""
xml_element_name = None
xml_element_namespace = None
def as_xml(self, parent = None, doc = None):
"""Get the XML representation of `self`.
New document will be created if no `parent` and no `doc` is given.
:Parameters:
- `parent`: the parent for the XML element.
- `doc`: the document where the element should be created. If not
given and `parent` is provided then autodetection is attempted.
If that fails, then `common_doc` is used.
:Types:
- `parent`: `libxml2.xmlNode`
- `doc`: `libxml2.xmlDoc`
:return: the new XML element or document created.
:returntype: `libxml2.xmlNode` or `libxml2.xmlDoc`"""
if parent:
if not doc:
n = parent
while n:
if n.type == "xml_document":
doc = n
break
n = n.parent
if not doc:
doc = common_doc
try:
ns = parent.searchNsByHref(doc, self.xml_element_namespace)
except libxml2.treeError:
ns = None
xmlnode = parent.newChild(ns,self.xml_element_name,None)
if not ns:
ns = xmlnode.newNs(self.xml_element_namespace,None)
xmlnode.setNs(ns)
doc1 = doc
else:
if doc:
doc1 = doc
else:
doc1 = libxml2.newDoc("1.0")
xmlnode = doc1.newChild(None,self.xml_element_name, None)
ns = xmlnode.newNs(self.xml_element_namespace, None)
xmlnode.setNs(ns)
self.complete_xml_element(xmlnode, doc1)
if doc or parent:
return xmlnode
doc1.setRootElement(xmlnode)
return doc1
def complete_xml_element(self, xmlnode, doc):
"""Complete the XML node with `self` content.
Should be overriden in classes derived from `StanzaPayloadObject`.
:Parameters:
- `xmlnode`: XML node with the element being built. It has already
right name and namespace, but no attributes or content.
- `doc`: document to which the element belongs.
:Types:
- `xmlnode`: `libxml2.xmlNode`
- `doc`: `libxml2.xmlDoc`"""
pass
class StanzaPayloadWrapperObject(object):
"""Base class for objects that may be used as XMPP stanza payload and maintain
an internal XML representation of self.
Provides `as_xml` method. Objects of derived classes must have the `xmlnode` attribute.
Please note that not all classes derived from `StanzaPayloadWrapperObject` should be
used directly as stanza payload. Some of them are parts of higher level objects.
:Ivariables:
- `xmlnode`: XML node of the object.
:Types:
- `xmlnode`: `libxml2.xmlNode`
"""
def as_xml(self, parent = None, doc = None):
"""Get the XML representation of `self`.
New document will be created if no `parent` and no `doc` is given.
:Parameters:
- `parent`: the parent for the XML element.
- `doc`: the document where the element should be created. If not
given and `parent` is provided then autodetection is attempted.
If that fails, then `common_doc` is used.
:Types:
- `parent`: `libxml2.xmlNode`
- `doc`: `libxml2.xmlDoc`
:return: the new XML element (copy of `self.xmlnode`) or document
created (containg the copy as the root element).
:returntype: `libxml2.xmlNode` or `libxml2.xmlDoc`"""
if parent:
if not doc:
n = parent
while n:
if n.type == "xml_document":
doc = n
break
n = n.parent
if not doc:
doc = common_doc
copy=self.xmlnode.docCopyNode(doc,True)
parent.addChild(copy)
return copy
else:
if not doc:
doc1=libxml2.newDoc("1.0")
else:
doc1=doc
xmlnode=doc1.addChild(self.xmlnode.docCopyNode(doc,True))
doc1.setRootElement(xmlnode)
if doc:
return xmlnode
return doc1
# vi: sts=4 et sw=4
| lgpl-2.1 | 1,634,089,275,262,912,500 | 35.874251 | 91 | 0.592563 | false | 4.127346 | false | false | false |
tere-valdivia/icy_tower_tarea | ControladorPlataforma.py | 1 | 1051 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 15:26:23 2017
@author: terevaldivia
"""
import os
from CC3501Utils_personal import *
import numpy as np
from Plataforma import *
class ControladorPlataforma:
def __init__(self):
self.lista = []
self.generar()
def generar(self):
#20 plataformas de piedra
alturas = np.linspace(150, 2100, 19)
mu = 400
sigma = 100
self.lista.append(PlataformaPiedra(Vector(180 + 12, 50)))
for a in alturas:
new = PlataformaPiedra(Vector(np.random.normal(loc=mu, scale=sigma), a))
self.lista.append(new)
#20 de liana
alturas +=2050
for a in alturas:
new = PlataformaLiana(Vector(np.random.normal(loc=mu, scale=sigma), a))
self.lista.append(new)
#y 10 de madera
alturas += 2050
for a in alturas:
new = PlataformaMadera(Vector(np.random.normal(loc=mu, scale=sigma), a))
self.lista.append(new)
| mit | -6,942,142,383,207,466,000 | 27.432432 | 84 | 0.58706 | false | 3.204268 | false | false | false |
coin-or/Dip | Dip/src/dippy/examples/bpp/mdbin_pack_func.py | 1 | 2645 | from builtins import range
from builtins import object
import sys
from pulp import LpVariable, lpSum, LpBinary, LpStatusOptimal
try:
import path
except ImportError:
pass
try:
import dippy
except ImportError:
try:
import src.dippy as dippy
except ImportError:
import coinor.dippy as dippy
from math import floor, ceil
class MDBinPackProb(object):
def __init__(self, ITEMS, LIMITS, volume, capacity):
self.ITEMS = ITEMS
self.LIMITS = LIMITS
self.volume = volume
self.capacity = capacity
self.BINS = list(range(len(ITEMS))) # Create 1 bin for each item, indices
# start at 0
def formulate(bpp):
prob = dippy.DipProblem("Bin Packing",
display_mode = 'xdot',
# layout = 'bak',
display_interval = None,
)
assign_vars = LpVariable.dicts("x",
[(i, j) for i in bpp.ITEMS
for j in bpp.BINS],
cat=LpBinary)
use_vars = LpVariable.dicts("y", bpp.BINS, cat=LpBinary)
waste_vars = LpVariable.dicts("w", [(j, k) for j in bpp.BINS
for k in bpp.LIMITS], 0, None)
prob += lpSum(use_vars[j] for j in bpp.BINS), "min_bins"
for j in bpp.BINS:
for k in bpp.LIMITS:
prob += lpSum(bpp.volume[i, k] * assign_vars[i, j] for i in bpp.ITEMS) \
+ waste_vars[j, k] == bpp.capacity[k] * use_vars[j]
for i in bpp.ITEMS:
prob += lpSum(assign_vars[i, j] for j in bpp.BINS) == 1
for i in bpp.ITEMS:
for j in bpp.BINS:
prob += assign_vars[i, j] <= use_vars[j]
for n in range(0, len(bpp.BINS) - 1):
prob += use_vars[bpp.BINS[n]] >= use_vars[bpp.BINS[n + 1]]
# Attach the problem data and variable dictionaries to the DipProblem
prob.bpp = bpp
prob.assign_vars = assign_vars
prob.use_vars = use_vars
prob.waste_vars = waste_vars
return prob
def solve(prob):
dippyOpts = {
# 'doPriceCut' : '1',
'CutCGL': '1',
# 'SolveMasterAsIp': '0'
# 'generateInitVars': '1',
# 'LogDebugLevel': 5,
# 'LogDumpModel': 5,
}
status, message, primals, duals = dippy.Solve(prob, dippyOpts)
if status == LpStatusOptimal:
return dict((var, var.value()) for var in prob.variables())
else:
return None
| epl-1.0 | -8,794,339,438,024,126,000 | 28.719101 | 82 | 0.517958 | false | 3.404118 | false | false | false |
fperez/sympy | sympy/series/order.py | 1 | 8346 | from sympy.core.basic import Basic, S, C, sympify
from sympy.core import oo, Rational, Pow
from sympy.core.cache import cacheit
class Order(Basic):
"""
Represents O(f(x)) at the point x = 0.
Definition
==========
g(x) = O(f(x)) as x->0 if and only if
|g(x)|<=M|f(x)| near x=0 (1)
for some positive but finite M. An equivalent way of saying (1) is:
lim_{x->0} |g(x)/f(x)| < oo
Let's illustrate it on the following example:
sin x = x - x**3/3! + O(x**5)
where in this case O(x**5) = x**5/5! - x**7/7! + .... and the definition
of O means:
|x**5/5! - x**7/7! + ....| <= M|x**5| near x=0
or equivalently:
lim_{x->0} | (x**5/5! - x**7/7! + ....) / x**5| < oo
which surely is true, because
lim_{x->0} | (x**5/5! - x**7/7! + ....) / x**5| = 1/5!
So intuitively O(x**3) means: all terms x**3, x**4 and
higher. But not x**2, x or 1.
Examples:
=========
>>> from sympy import O
>>> from sympy.abc import x
>>> O(x)
O(x)
>>> O(x)*x
O(x**2)
>>> O(x)-O(x)
O(x)
External links
--------------
U{Big O notation<http://en.wikipedia.org/wiki/Big_O_notation>}
Properties:
===========
g(x) = O(f(x)) as x->0 <-> |g(x)|<=M|f(x)| near x=0 <-> lim_{x->0} |g(x)/f(x)| < oo
g(x,y) = O(f(x,y)) <-> lim_{x,y->0} |g(x,y)/f(x,y)| < oo, we'll assume that limits commute.
Notes:
======
In O(f(x),x) the expression f(x) is assumed to have a leading term.
O(f(x),x) is automatically transformed to O(f(x).as_leading_term(x),x).
O(expr*f(x),x) is O(f(x),x)
O(expr,x) is O(1)
O(0, x) is 0.
Multivariate O is also supported:
O(f(x,y),x,y) is transformed to O(f(x,y).as_leading_term(x,y).as_leading_term(y), x, y)
If O is used with only expression argument then the symbols are
all symbols in the expression.
"""
is_Order = True
__slots__ = []
@cacheit
def __new__(cls, expr, *symbols, **assumptions):
expr = sympify(expr).expand()
if expr is S.NaN:
return S.NaN
if symbols:
symbols = map(sympify, symbols)
else:
symbols = list(expr.atoms(C.Symbol))
symbols.sort(Basic.compare)
if expr.is_Order:
new_symbols = list(expr.symbols)
for s in symbols:
if s not in new_symbols:
new_symbols.append(s)
if len(new_symbols)==len(expr.symbols):
return expr
symbols = new_symbols
elif symbols:
symbol_map = {}
new_symbols = []
for s in symbols:
if isinstance(s, C.Symbol):
new_symbols.append(s)
continue
z = C.Symbol('z',dummy=True)
x1,s1 = s.solve4linearsymbol(z)
expr = expr.subs(x1,s1)
symbol_map[z] = s
new_symbols.append(z)
if symbol_map:
r = Order(expr, *new_symbols, **assumptions)
expr = r.expr.subs(symbol_map)
symbols = []
for s in r.symbols:
if symbol_map.has_key(s):
symbols.append(symbol_map[s])
else:
symbols.append(s)
else:
if expr.is_Add:
lst = expr.extract_leading_order(*symbols)
expr = C.Add(*[f.expr for (e,f) in lst])
else:
expr = expr.as_leading_term(*symbols)
coeff, terms = expr.as_coeff_terms()
if coeff is S.Zero:
return coeff
expr = C.Mul(*[t for t in terms if t.has(*symbols)])
elif expr is not S.Zero:
expr = S.One
if expr is S.Zero:
return expr
# create Order instance:
obj = Basic.__new__(cls, expr, *symbols, **assumptions)
return obj
def _hashable_content(self):
if self.args[0].is_number:
return (self.args[0],)
return self.args
def oseries(self, order):
return self
def _eval_nseries(self, x, x0, n):
return self
@classmethod
def find_limit(cls, f, x):
"""Basically identical to:
return limit(f, x, 0, dir="+")
but first trying some easy cases (like x**2) using heuristics, to avoid
infinite recursion. This is only needed in the Order class and series
expansion (that shouldn't rely on the Gruntz algorithm too much),
that's why find_limit() is defined here.
"""
from sympy import limit, Wild, log
if f.is_Pow:
if f.args[0] == x:
if f.args[1].is_Rational:
if f.args[1] > 0:
return S.Zero
else:
return oo
if f.args[1].is_number:
if f.args[1].evalf() > 0:
return S.Zero
else:
return oo
if f == x:
return S.Zero
p, q = Wild("p"), Wild("q")
r = f.match(x**p * log(x)**q)
if r:
p, q = r[p], r[q]
if q.is_number and p.is_number:
if q > 0:
if p > 0:
return S.Zero
else:
return -oo
elif q < 0:
if p >= 0:
return S.Zero
else:
return -oo
return limit(f, x, 0, dir="+")
@property
def expr(self):
return self._args[0]
@property
def symbols(self):
return self._args[1:]
def _eval_power(b, e):
if e.is_Number:
return Order(b.expr ** e, *b.symbols)
return
def as_expr_symbols(self, order_symbols):
if order_symbols is None:
order_symbols = self.symbols
else:
for s in self.symbols:
if s not in order_symbols:
order_symbols = order_symbols + (s,)
return self.expr, order_symbols
@cacheit
def contains(self, expr):
"""
Return True if expr belongs to Order(self.expr, *self.symbols).
Return False if self belongs to expr.
Return None if the inclusion relation cannot be determined (e.g. when self and
expr have different symbols).
"""
from sympy import powsimp
if expr is S.Zero:
return True
if expr is S.NaN:
return False
if expr.is_Order:
if self.symbols and expr.symbols:
common_symbols = tuple([s for s in self.symbols if s in expr.symbols])
elif self.symbols:
common_symbols = self.symbols
else:
common_symbols = expr.symbols
if not common_symbols:
if not (self.symbols or expr.symbols): # O(1),O(1)
return True
return None
r = None
for s in common_symbols:
l = Order.find_limit(powsimp(self.expr/expr.expr, deep=True,\
combine='exp'), s) != 0
if r is None:
r = l
else:
if r != l:
return
return r
obj = Order(expr, *self.symbols)
return self.contains(obj)
def _eval_subs(self, old, new):
if self==old:
return new
if isinstance(old, C.Symbol) and old in self.symbols:
i = list(self.symbols).index(old)
if isinstance(new, C.Symbol):
return Order(self.expr._eval_subs(old, new), *(self.symbols[:i]+(new,)+self.symbols[i+1:]))
return Order(self.expr._eval_subs(old, new), *(self.symbols[:i]+self.symbols[i+1:]))
return Order(self.expr._eval_subs(old, new), *self.symbols)
def _sage_(self):
#XXX: SAGE doesn't have Order yet. Let's return 0 instead.
return Rational(0)._sage_()
Basic.singleton['O'] = lambda : Order
| bsd-3-clause | 4,637,688,196,921,517,000 | 28.701068 | 107 | 0.472682 | false | 3.683142 | false | false | false |
sporto/rails_go_to_spec | rails_go_to_spec.py | 1 | 1760 | import sublime, sublime_plugin, os
import RailsGoToSpec.resolver
import re
class RailsGoToSpecCommand(sublime_plugin.WindowCommand):
def run(self):
sublime.status_message('Running Rails Go To Spec')
win = self.window
view = win.active_view()
current_file = view.file_name()
# remove the root dir
root_path = win.folders()[0]
current_file = re.sub(root_path, '', current_file)
if os.name == 'nt':
current_file = current_file.replace('\\', '/')
spec_base = view.settings().get('go_to_spec_directory') or 'spec'
related_files = RailsGoToSpec.resolver.Resolver().run(current_file, spec_base)
# add the root dir to all files
for ix, file in enumerate(related_files):
related_files[ix] = root_path + file
self.open_any(related_files)
def is_enabled(self):
return self.window.active_view() != None
def open_any(self, files):
if len(files) == 0:
sublime.status_message("Not a valid file")
return
opened = False
for file in files:
if not opened:
opened = self.open(file)
if opened:
return
first = files[0]
if sublime.ok_cancel_dialog("Create file? " + first):
self.create(first)
self.window.open_file(first)
def open(self, file):
if file == "":
sublime.status_message("Not a valid file")
return False
if os.path.exists(file):
sublime.status_message("File exists " + file)
self.window.open_file(file)
sublime.status_message("Opening " + file)
return True
else:
return False
def create(self, filename):
base, filename = os.path.split(filename)
self.create_folder(base)
def create_folder(self, base):
if not os.path.exists(base):
parent = os.path.split(base)[0]
if not os.path.exists(parent):
self.create_folder(parent)
os.mkdir(base)
| mit | -6,015,399,855,885,229,000 | 23.109589 | 80 | 0.680682 | false | 2.943144 | false | false | false |
zozo123/buildbot | master/buildbot/test/fake/fakemaster.py | 1 | 5314 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
import os.path
import weakref
from buildbot import config
from buildbot import interfaces
from buildbot.status import build
from buildbot.test.fake import bslavemanager
from buildbot.test.fake import fakedata
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemq
from buildbot.test.fake import pbmanager
from buildbot.test.fake.botmaster import FakeBotMaster
from twisted.internet import defer
from zope.interface import implements
class FakeCache(object):
"""Emulate an L{AsyncLRUCache}, but without any real caching. This
I{does} do the weakref part, to catch un-weakref-able objects."""
def __init__(self, name, miss_fn):
self.name = name
self.miss_fn = miss_fn
def get(self, key, **kwargs):
d = self.miss_fn(key, **kwargs)
def mkref(x):
if x is not None:
weakref.ref(x)
return x
d.addCallback(mkref)
return d
def put(self, key, val):
pass
class FakeCaches(object):
def get_cache(self, name, miss_fn):
return FakeCache(name, miss_fn)
class FakeStatus(object):
def __init__(self, master):
self.master = master
self.lastBuilderStatus = None
def builderAdded(self, name, basedir, tags=None, description=None):
bs = FakeBuilderStatus(self.master)
self.lastBuilderStatus = bs
return bs
def getBuilderNames(self):
return []
def getSlaveNames(self):
return []
def slaveConnected(self, name):
pass
def build_started(self, brid, buildername, build_status):
pass
def getURLForBuild(self, builder_name, build_number):
return "URLForBuild/%s/%d" % (builder_name, build_number)
def getURLForBuildrequest(self, buildrequestid):
return "URLForBuildrequest/%d" % (buildrequestid,)
class FakeBuilderStatus(object):
implements(interfaces.IBuilderStatus)
def __init__(self, master=None, buildername="Builder"):
if master:
self.master = master
self.basedir = os.path.join(master.basedir, 'bldr')
self.lastBuildStatus = None
self._tags = None
self.name = buildername
def setDescription(self, description):
self._description = description
def getDescription(self):
return self._description
def getTags(self):
return self._tags
def setTags(self, tags):
self._tags = tags
def matchesAnyTag(self, tags):
return set(self._tags) & set(tags)
def setSlavenames(self, names):
pass
def setCacheSize(self, size):
pass
def setBigState(self, state):
pass
def newBuild(self):
bld = build.BuildStatus(self, self.master, 3)
self.lastBuildStatus = bld
return bld
def buildStarted(self, builderStatus):
pass
def addPointEvent(self, text):
pass
class FakeMaster(object):
"""
Create a fake Master instance: a Mock with some convenience
implementations:
- Non-caching implementation for C{self.caches}
"""
def __init__(self, master_id=fakedb.FakeBuildRequestsComponent.MASTER_ID):
self._master_id = master_id
self.config = config.MasterConfig()
self.caches = FakeCaches()
self.pbmanager = pbmanager.FakePBManager()
self.basedir = 'basedir'
self.botmaster = FakeBotMaster(master=self)
self.botmaster.parent = self
self.status = FakeStatus(self)
self.status.master = self
self.name = 'fake:/master'
self.masterid = master_id
self.buildslaves = bslavemanager.FakeBuildslaveManager(self)
def getObjectId(self):
return defer.succeed(self._master_id)
def subscribeToBuildRequests(self, callback):
pass
# work around http://code.google.com/p/mock/issues/detail?id=105
def _get_child_mock(self, **kw):
return mock.Mock(**kw)
# Leave this alias, in case we want to add more behavior later
def make_master(wantMq=False, wantDb=False, wantData=False,
testcase=None, **kwargs):
master = FakeMaster(**kwargs)
if wantData:
wantMq = wantDb = True
if wantMq:
assert testcase is not None, "need testcase for wantMq"
master.mq = fakemq.FakeMQConnector(master, testcase)
if wantDb:
assert testcase is not None, "need testcase for wantDb"
master.db = fakedb.FakeDBConnector(master, testcase)
if wantData:
master.data = fakedata.FakeDataConnector(master, testcase)
return master
| gpl-3.0 | -5,274,690,665,599,939,000 | 27.265957 | 79 | 0.666353 | false | 3.945063 | true | false | false |
Yarichi/Proyecto-DASI | Malmo/Python_Examples/MazeRunner.py | 1 | 9848 | # ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
import MalmoPython
import os
import random
import sys
import time
import json
import errno
maze1 = '''
<MazeDecorator>
<SizeAndPosition length="60" width="60" yOrigin="225" zOrigin="0" height="180"/>
<GapProbability variance="0.4">0.5</GapProbability>
<Seed>random</Seed>
<MaterialSeed>random</MaterialSeed>
<AllowDiagonalMovement>false</AllowDiagonalMovement>
<StartBlock fixedToEdge="true" type="emerald_block" height="1"/>
<EndBlock fixedToEdge="true" type="redstone_block diamond_block gold_block" height="12"/>
<PathBlock type="stained_hardened_clay" colour="WHITE ORANGE MAGENTA LIGHT_BLUE YELLOW LIME PINK GRAY SILVER CYAN PURPLE BLUE BROWN GREEN RED BLACK" height="1"/>
<FloorBlock type="stone"/>
<GapBlock type="air"/>
<AddQuitProducer description="finished maze"/>
<AddNavigationObservations/>
</MazeDecorator>
'''
maze2 = '''
<MazeDecorator>
<SizeAndPosition length="19" width="19" scale="3" yOrigin="225" zOrigin="0" height="180"/>
<GapProbability variance="0.4">0.5</GapProbability>
<Seed>random</Seed>
<MaterialSeed>random</MaterialSeed>
<AllowDiagonalMovement>false</AllowDiagonalMovement>
<StartBlock fixedToEdge="true" type="emerald_block" height="1"/>
<EndBlock fixedToEdge="true" type="redstone_block lapis_block" height="12"/>
<PathBlock type="stained_glass" colour="WHITE ORANGE MAGENTA LIGHT_BLUE YELLOW LIME PINK GRAY SILVER CYAN PURPLE BLUE BROWN GREEN RED BLACK" height="1"/>
<FloorBlock type="glowstone"/>
<GapBlock type="stone" height="10"/>
<AddQuitProducer description="finished maze"/>
<AddNavigationObservations/>
</MazeDecorator>
'''
maze3 = '''
<MazeDecorator>
<SizeAndPosition length="60" width="60" yOrigin="225" zOrigin="0" height="180"/>
<GapProbability>0.2</GapProbability>
<Seed>random</Seed>
<MaterialSeed>random</MaterialSeed>
<AllowDiagonalMovement>false</AllowDiagonalMovement>
<StartBlock fixedToEdge="true" type="emerald_block" height="1"/>
<EndBlock fixedToEdge="true" type="redstone_block" height="12"/>
<PathBlock type="glowstone stained_glass dirt" colour="WHITE ORANGE MAGENTA LIGHT_BLUE YELLOW LIME PINK GRAY SILVER CYAN PURPLE BLUE BROWN GREEN RED BLACK" height="1"/>
<FloorBlock type="stone" variant="smooth_granite"/>
<SubgoalBlock type="beacon sea_lantern glowstone"/>
<GapBlock type="air"/>
<AddQuitProducer description="finished maze"/>
<AddNavigationObservations/>
</MazeDecorator>
'''
maze4 = '''
<MazeDecorator>
<SizeAndPosition length="60" width="60" yOrigin="225" zOrigin="0" height="180"/>
<GapProbability variance="0.4">0.5</GapProbability>
<Seed>random</Seed>
<MaterialSeed>random</MaterialSeed>
<AllowDiagonalMovement>false</AllowDiagonalMovement>
<StartBlock fixedToEdge="true" type="emerald_block" height="1"/>
<EndBlock fixedToEdge="true" type="redstone_block" height="12"/>
<PathBlock type="stone dirt stained_hardened_clay" colour="WHITE ORANGE MAGENTA LIGHT_BLUE YELLOW LIME PINK GRAY SILVER CYAN PURPLE BLUE BROWN GREEN RED BLACK" height="1"/>
<FloorBlock type="stone" variant="smooth_granite"/>
<SubgoalBlock type="beacon sea_lantern glowstone"/>
<OptimalPathBlock type="stone" variant="smooth_granite andesite smooth_diorite diorite"/>
<GapBlock type="lapis_ore stained_hardened_clay air" colour="WHITE ORANGE MAGENTA LIGHT_BLUE YELLOW LIME PINK GRAY SILVER CYAN PURPLE BLUE BROWN GREEN RED BLACK" height="3" heightVariance="3"/>
<AddQuitProducer description="finished maze"/>
<AddNavigationObservations/>
</MazeDecorator>
'''
def GetMissionXML( mazeblock ):
return '''<?xml version="1.0" encoding="UTF-8" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Run the maze!</Summary>
</About>
<ModSettings>
<MsPerTick>''' + str(TICK_LENGTH) + '''</MsPerTick>
</ModSettings>
<ServerSection>
<ServerInitialConditions>
<AllowSpawning>false</AllowSpawning>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,220*1,5*3,2;3;,biome_1" />
''' + mazeblock + '''
<ServerQuitFromTimeUp timeLimitMs="45000"/>
<ServerQuitWhenAnyAgentFinishes />
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>James Bond</Name>
<AgentStart>
<Placement x="-204" y="81" z="217"/>
</AgentStart>
<AgentHandlers>
<ContinuousMovementCommands turnSpeedDegs="840">
<ModifierList type="deny-list"> <!-- Example deny-list: prevent agent from strafing -->
<command>strafe</command>
</ModifierList>
</ContinuousMovementCommands>
</AgentHandlers>
</AgentSection>
</Mission>'''
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
validate = True
mazeblocks = [maze1, maze2, maze3, maze4]
agent_host = MalmoPython.AgentHost()
agent_host.addOptionalIntArgument( "speed,s", "Length of tick, in ms.", 50)
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
agent_host.setObservationsPolicy(MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY)
if agent_host.receivedArgument("test"):
num_reps = 1
else:
num_reps = 30000
recordingsDirectory="MazeRecordings"
TICK_LENGTH = agent_host.getIntArgument("speed")
try:
os.makedirs(recordingsDirectory)
except OSError as exception:
if exception.errno != errno.EEXIST: # ignore error if already existed
raise
# Set up a recording
my_mission_record = MalmoPython.MissionRecordSpec()
my_mission_record.recordRewards()
my_mission_record.recordObservations()
for iRepeat in xrange(num_reps):
my_mission_record.setDestination(recordingsDirectory + "//" + "Mission_" + str(iRepeat) + ".tgz")
mazeblock = random.choice(mazeblocks)
my_mission = MalmoPython.MissionSpec(GetMissionXML(mazeblock),validate)
max_retries = 3
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, my_mission_record )
break
except RuntimeError as e:
if retry == max_retries - 1:
print "Error starting mission:",e
exit(1)
else:
time.sleep(2)
print "Waiting for the mission to start",
world_state = agent_host.getWorldState()
while not world_state.has_mission_begun:
sys.stdout.write(".")
time.sleep(0.1)
world_state = agent_host.getWorldState()
if len(world_state.errors):
print
for error in world_state.errors:
print "Error:",error.text
exit()
print
# main loop:
while world_state.is_mission_running:
if world_state.number_of_observations_since_last_state > 0:
print "Got " + str(world_state.number_of_observations_since_last_state) + " observations since last state."
msg = world_state.observations[-1].text
ob = json.loads(msg)
current_yaw_delta = ob.get(u'yawDelta', 0)
current_speed = (1-abs(current_yaw_delta))
print "Got observation: " + str(current_yaw_delta)
try:
agent_host.sendCommand( "move " + str(current_speed) )
agent_host.sendCommand( "turn " + str(current_yaw_delta) )
except RuntimeError as e:
print "Failed to send command:",e
pass
world_state = agent_host.getWorldState()
print "Mission has stopped."
time.sleep(0.5) # Give mod a little time to get back to dormant state.
| gpl-2.0 | 2,658,488,039,185,188,400 | 41.768889 | 201 | 0.625 | false | 3.771735 | false | false | false |
igemsoftware2017/USTC-Software-2017 | biohub/abacus/views.py | 1 | 1421 | from rest_framework import views, permissions, parsers
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from .handlers import get_handler, query
from .result import AbacusAsyncResult
from .security import validate_signature
class StartView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
parser_classes = (parsers.MultiPartParser, parsers.FormParser,)
def post(self, request):
handler = get_handler(request)
return Response(handler.start_task(request.user))
class QueryView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, task_id):
return Response(query(task_id))
class CallbackView(views.APIView):
def fail(self, detail):
raise ValidationError(detail)
def get(self, request):
async_result = AbacusAsyncResult(request.GET.get('task_id', ''))
if not validate_signature(async_result, request.GET.get('s', '')):
self.fail('Bad signature.')
if async_result._get_field('status') is None:
self.fail('Task not exists.')
if 'error' in request.GET:
async_result.error(None)
elif 'output' in request.GET:
async_result.resolve(request.GET['output'])
else:
self.fail('Should specify either error or output.')
return Response('')
| gpl-3.0 | 5,578,610,467,308,912,000 | 29.234043 | 74 | 0.679099 | false | 4.216617 | false | false | false |
edison7500/django-leancloud-sms | leancloud/sms.py | 1 | 2259 | # coding=utf-8
import re
import requests
import json
import logging
from django.conf import settings
from .exceptions import LeanCloudException, PhoneNumberException
logger = logging.getLogger(__name__)
headers = getattr(settings, 'LEANCLOUD_HEADERS')
class LeanCloudSMS(object):
cell_phone_match = re.compile('^0\d{2,3}\d{7,8}$|^1[358]\d{9}$|^147\d{8}')
url = 'https://leancloud.cn/1.1/requestSmsCode'
# verify_url = 'https://leancloud.cn/1.1/verifySmsCode/%s?mobilePhoneNumber=%s' % (verify_code, phone_number)
verify_url = 'https://leancloud.cn/1.1/verifySmsCode/'
def __init__(self, name=None):
if name is None:
self.name = getattr(settings, 'LEANCLOUD_SMS_NAME', None)
else:
self.name = name
self.headers = getattr(settings, 'LEANCLOUD_HEADERS', None)
assert self.name is not None
assert self.headers is not None
def _check_phone_num(self, phone_number):
if self.cell_phone_match.match(phone_number):
return True
else:
return False
def send_sms(self, phone_number):
if not self._check_phone_num(phone_number):
raise PhoneNumberException(u'请输入正确的手机号')
payload = {'mobilePhoneNumber': str(phone_number)}
payload.update({
'name': self.name,
})
res = requests.post(self.url, json=payload, headers=headers)
if res.status_code == 200:
# data = res.json()
return True, 'success'
else:
msg = res.json()['error']
raise LeanCloudException(u"{msg}".format(msg=msg))
def verify_phone_code(self, phone_number, verify_code):
if not self._check_phone_num(phone_number):
raise PhoneNumberException(u'请输入正确的手机号')
_verify_url = "{url}{code}?mobilePhoneNumber={phone_num}".format(
url=self.verify_url,
code=verify_code,
phone_num=phone_number,
)
res = requests.post(_verify_url, headers=headers)
if res.status_code == 200:
return True, 'success'
else:
msg = res.json()['error']
raise LeanCloudException(u"{msg}".format(msg=msg))
| gpl-3.0 | -8,270,872,587,978,459,000 | 32.179104 | 113 | 0.605488 | false | 3.484326 | false | false | false |
PaddlePaddle/models | PaddleCV/tracking/pytracking/admin/environment.py | 1 | 2051 | import importlib
import os
class EnvSettings:
def __init__(self):
pytracking_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
self.results_path = '{}/tracking_results/'.format(pytracking_path)
self.network_path = '{}/networks/'.format(pytracking_path)
self.dataset_path = '{}/benchmark_datasets/'.format(pytracking_path)
def create_default_local_file():
comment = {'results_path': 'Where to store tracking results',
'dataset_path': 'Where benchmark datasets are stored',
'network_path': 'Where tracking networks are stored.'}
path = os.path.join(os.path.dirname(__file__), 'local.py')
with open(path, 'w') as f:
settings = EnvSettings()
f.write('from pytracking.admin.environment import EnvSettings\n\n')
f.write('def local_env_settings():\n')
f.write(' settings = EnvSettings()\n\n')
f.write(' # Set your local paths here.\n\n')
for attr in dir(settings):
comment_str = None
if attr in comment:
comment_str = comment[attr]
attr_val = getattr(settings, attr)
if not attr.startswith('__') and not callable(attr_val):
if comment_str is None:
f.write(' settings.{} = \'{}\'\n'.format(attr, attr_val))
else:
f.write(' settings.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str))
f.write('\n return settings\n\n')
def env_settings():
env_module_name = 'pytracking.admin.local'
try:
env_module = importlib.import_module(env_module_name)
return env_module.local_env_settings()
except:
env_file = os.path.join(os.path.dirname(__file__), 'local.py')
# Create a default file
create_default_local_file()
raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. '
'Then try to run again.'.format(env_file))
| apache-2.0 | 5,295,043,297,320,136,000 | 38.442308 | 110 | 0.575329 | false | 3.784133 | false | false | false |
ShaolongHu/Nitrate | tcms/testplans/tests.py | 1 | 4490 | # -*- coding: utf-8 -*-
import unittest
from django.test.client import Client
class PlanTests(unittest.TestCase):
def setUp(self):
self.c = Client()
self.plan_id = 2256
self.status_codes = [301, 302]
def test_plans(self):
response = self.c.get('/plans/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_plan_new(self):
response = self.c.get('/plan/new/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_plan_clone(self):
response = self.c.get('/plans/clone/', {'plan_id': self.plan_id})
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_details(self):
location = '/plan/%s/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_cases(self):
location = '/plan/%s/cases/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_importcase(self):
location = '/plan/%s/importcase/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_delete(self):
location = '/plan/%s/delete/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_searchcase(self):
location = '/plan/%s/searchcase/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_delcase(self):
location = '/plan/%s/delcase/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_ordercase(self):
location = '/plan/%s/ordercase/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_edit(self):
location = '/plan/%s/edit/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_printable(self):
location = '/plan/%s/printable/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_export(self):
location = '/plan/%s/export/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_attachment(self):
location = '/plan/%s/attachment/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_plan_history(self):
location = '/plan/%s/history/' % self.plan_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 3,678,989,214,991,336,400 | 33.015152 | 73 | 0.608018 | false | 4.063348 | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.