repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
linearb/mojo | rasppi/rasp.py | 1 | 3195 | #!/usr/bin/python
"""Sound an alarm if a raspberry pi hasn't been heard from lately
To set an alarm for pi named 'pi', create a file in mmdata/pulse.d named pi.alarm
"""
import os.path
import time
pulse="/home/mojotronadmin/mmdata/pulse.d/"
logfile="/home/mojotronadmin/mmdata/incoming.log"
maxinterval = 15*60 # how many seconds without contact before sounding first alarm
alarm_once = False # if True then only sound alarm once, then disable it
snooze = True # if True then delay before re-sounding alarm
snoozedelay = 120*60 # in seconds
should_sendsms = True # send an sms on alarm
alarm_smsnumber = "NEEDED"
should_sendemail = False # send an email on alarm
alarm_emailaddress = "[email protected]"
from twilio.rest import TwilioRestClient
def sendsms(tonumber, message):
account_sid = "NEEDED"
auth_token = "NEEDED"
client = TwilioRestClient(account_sid, auth_token)
twilio_number = "NEEDED"
reply = client.messages.create(to=tonumber, from_=twilio_number, body=message)
import commands
def sendemail(toaddress, message):
cmd = "echo '' | mail -s '{}' {}".format(message, toaddress)
(status, output) = commands.getstatusoutput(cmd)
# should catch error if status is not 0
def alarm(pi_name):
message = pi_name + " is down."
if should_sendsms:
sendsms(alarm_smsnumber, message)
if should_sendemail:
sendemail(alarm_emailaddress, message)
# If alarm file '[piname].alarm' does not exist, the alarm for that pi is disabled.
# If that file is empty, the alarm goes off if maxdelay seconds have passed since last heard from pi.
# If it contains an integer the snooze is enabled. That sets the alarm to go off if maxdelay seconds
# have passed since last alarm. If the alarm file contains anything else, the alarm is disabled.
def main():
alarmfilelist = [x for x in os.listdir(pulse) if x.endswith(".alarm")]
for filename in alarmfilelist:
# get information about last time this pi contacted us
last_timestamp = "0"
pi_filename = filename[:-6]
if os.path.exists(pulse + pi_filename):
with open(pulse + pi_filename, 'r') as f:
last_timestamp = f.readline().rstrip()
# if there is an alarm file, sound alarm if haven't heard from pi recently
with open(pulse + filename, 'r+') as f:
timestamp = f.readline().rstrip()
if timestamp == "":
timestamp = last_timestamp
if timestamp.isdigit():
now = time.time()
if now - int(timestamp) > maxinterval:
alarm(pi_filename)
if alarm_once:
# only send alarm once, so disable alarm now
f.seek(0)
f.write("disabled\n")
f.truncate()
elif snooze:
# reset alarm time to snoozedelay seconds in future
f.seek(0)
f.write(str(int(now + snoozedelay)) + "\n")
f.truncate()
if __name__ == "__main__":
main()
| mit | -2,364,741,625,820,722,000 | 37.035714 | 101 | 0.610329 | false | 3.803571 | false | false | false |
garrettr/securedrop | securedrop/journalist_app/col.py | 1 | 3264 | # -*- coding: utf-8 -*-
from flask import (Blueprint, redirect, url_for, render_template, flash,
request, abort, send_file, current_app)
from flask_babel import gettext
from sqlalchemy.orm.exc import NoResultFound
import crypto_util
import store
from db import db_session, Submission
from journalist_app.forms import ReplyForm
from journalist_app.utils import (make_star_true, make_star_false, get_source,
delete_collection, col_download_unread,
col_download_all, col_star, col_un_star,
col_delete)
def make_blueprint(config):
view = Blueprint('col', __name__)
@view.route('/add_star/<filesystem_id>', methods=('POST',))
def add_star(filesystem_id):
make_star_true(filesystem_id)
db_session.commit()
return redirect(url_for('main.index'))
@view.route("/remove_star/<filesystem_id>", methods=('POST',))
def remove_star(filesystem_id):
make_star_false(filesystem_id)
db_session.commit()
return redirect(url_for('main.index'))
@view.route('/<filesystem_id>')
def col(filesystem_id):
form = ReplyForm()
source = get_source(filesystem_id)
source.has_key = crypto_util.getkey(filesystem_id)
return render_template("col.html", filesystem_id=filesystem_id,
source=source, form=form)
@view.route('/delete/<filesystem_id>', methods=('POST',))
def delete_single(filesystem_id):
"""deleting a single collection from its /col page"""
source = get_source(filesystem_id)
delete_collection(filesystem_id)
flash(gettext("{source_name}'s collection deleted")
.format(source_name=source.journalist_designation),
"notification")
return redirect(url_for('main.index'))
@view.route('/process', methods=('POST',))
def process():
actions = {'download-unread': col_download_unread,
'download-all': col_download_all, 'star': col_star,
'un-star': col_un_star, 'delete': col_delete}
if 'cols_selected' not in request.form:
flash(gettext('No collections selected.'), 'error')
return redirect(url_for('main.index'))
# getlist is cgi.FieldStorage.getlist
cols_selected = request.form.getlist('cols_selected')
action = request.form['action']
if action not in actions:
return abort(500)
method = actions[action]
return method(cols_selected)
@view.route('/<filesystem_id>/<fn>')
def download_single_submission(filesystem_id, fn):
"""Sends a client the contents of a single submission."""
if '..' in fn or fn.startswith('/'):
abort(404)
try:
Submission.query.filter(
Submission.filename == fn).one().downloaded = True
db_session.commit()
except NoResultFound as e:
current_app.logger.error(
"Could not mark " + fn + " as downloaded: %s" % (e,))
return send_file(store.path(filesystem_id, fn),
mimetype="application/pgp-encrypted")
return view
| agpl-3.0 | -8,452,638,703,044,574,000 | 36.090909 | 78 | 0.594975 | false | 4.10566 | false | false | false |
Haizs/NEU-mathe | FILEtoCSV.py | 1 | 1799 | import os.path
import csv
import re
headers = ['Id', 'KnowledgeId', 'Type', 'Src', 'Name', 'Count']
def doSubject(subject):
idCount = 0
rows = []
for root, dirs, files in os.walk('ChoiceSource/' + subject):
for name in files:
if (name != '.DS_Store'):
idCount += 1
qType = 1 if ('Easy' in root) else 2 if ('Averge' in root) else 3
rows.append([idCount, int(re.findall(r'(?<=/)\d+', root)[0]), qType, re.findall(r'/.*', root)[0] + '/',
os.path.splitext(name)[0],6])
with open(subject + '.csv', 'w') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
if __name__ == '__main__':
doSubject('高等数学_GS')
doSubject('复变函数_FB')
doSubject('概率统计_GL')
rows = []
idCount = 0
for root, dirs, files in os.walk('ChoiceSource/线性代数_XD'):
for name in files:
if (name != '.DS_Store'):
idCount += 1
if ('Easy' in root):
rows.append(
[idCount, int(re.findall(r'(?<=chapter)\d', root)[0]), 1, re.findall(r'/.*', root)[0] + '/',
os.path.splitext(name)[0], 6])
elif ('Hard' in root):
rows.append(
[idCount, int(re.findall(r'(?<=chapter)\d', root)[0]), 3, re.findall(r'/.*', root)[0] + '/',
os.path.splitext(name)[0], 6])
else:
rows.append([idCount, 8, 2, re.findall(r'/.*', root)[0] + '/', os.path.splitext(name)[0], 5])
with open('线性代数_XD.csv', 'w') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
| gpl-3.0 | -2,962,789,084,758,935,600 | 35.645833 | 119 | 0.466742 | false | 3.251386 | false | false | false |
obulpathi/poppy | poppy/manager/base/driver.py | 1 | 2204 | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class ManagerDriverBase(object):
"""Base class for driver manager."""
def __init__(self, conf, storage, providers, dns, distributed_task,
notification):
self._conf = conf
self._storage = storage
self._providers = providers
self._dns = dns
self._distributed_task = distributed_task
self._notification = notification
@property
def conf(self):
"""conf
:returns conf
"""
return self._conf
@property
def storage(self):
"""storage
:returns storage
"""
return self._storage
@property
def providers(self):
"""providers
:returns providers
"""
return self._providers
@property
def dns(self):
return self._dns
@property
def distributed_task(self):
return self._distributed_task
@property
def notification(self):
return self._notification
@abc.abstractproperty
def services_controller(self):
"""Returns the driver's services controller
:raises NotImplementedError
"""
raise NotImplementedError
@abc.abstractproperty
def flavors_controller(self):
"""Returns the driver's flavors controller
:raises NotImplementedError
"""
raise NotImplementedError
@abc.abstractproperty
def health_controller(self):
"""Returns the driver's health controller
:raises NotImplementedError
"""
raise NotImplementedError
| apache-2.0 | 6,152,679,558,350,396,000 | 23.21978 | 71 | 0.640653 | false | 4.886918 | false | false | false |
cheery/essence | essence3/renderer/patch9.py | 1 | 2518 | import pygame
from texture import Texture
def borders(surface):
width, height = surface.get_size()
y0 = 0
y1 = 0
x0 = 0
x1 = 0
i = 0
while i < height:
r,g,b,a = surface.get_at((0,i))
if a > 0:
y0 = i
break
i += 1
while i < height:
r,g,b,a = surface.get_at((0,i))
if a == 0:
y1 = i
break
i += 1
i = 0
while i < width:
r,g,b,a = surface.get_at((i,0))
if a > 0:
x0 = i
break
i += 1
while i < width:
r,g,b,a = surface.get_at((i,0))
if a == 0:
x1 = i
break
i += 1
return [1, x0, x1, width], [1, y0, y1, height]
class Patch9(object):
def __init__(self, texture, (xc, yc)):
self.texture = texture
self.coords = xc, yc
self.width = texture.width - 1
self.height = texture.height - 1
self.padding = xc[1]-xc[0], yc[1]-yc[0], xc[3]-xc[2], yc[3]-yc[2]
@classmethod
def load(cls, atlas, path):
surface = pygame.image.load(path)
width, height = surface.get_size()
data = pygame.image.tostring(surface, "RGBA", 0)
texture = atlas.add_rgba_string(width, height, data)
coords = borders(surface)
return cls(texture, coords)
def __call__(self, emit, (left, top, width, height), color=None):
texture = self.texture
color = color or texture.atlas.white
# c_x = float(color.x+2) / color.atlas.width
# c_y = float(color.y+2) / color.atlas.height
s0 = float(texture.x) / texture.atlas.width
t0 = float(texture.y) / texture.atlas.height
s1 = float(texture.width) / texture.atlas.width
t1 = float(texture.height) / texture.atlas.height
sn = s1 / texture.width
tn = t1 / texture.height
x_cs, y_cs = self.coords
xs = (left, left+self.padding[0], left+width-self.padding[2], left+width)
ys = (top, top +self.padding[1], top+height-self.padding[3], top+height)
for i in range(9):
x = i % 3
y = i / 3
emit(xs[x+0], ys[y+0], x_cs[x+0]*sn + s0, y_cs[y+0]*tn + t0, color.s, color.t)
emit(xs[x+1], ys[y+0], x_cs[x+1]*sn + s0, y_cs[y+0]*tn + t0, color.s, color.t)
emit(xs[x+1], ys[y+1], x_cs[x+1]*sn + s0, y_cs[y+1]*tn + t0, color.s, color.t)
emit(xs[x+0], ys[y+1], x_cs[x+0]*sn + s0, y_cs[y+1]*tn + t0, color.s, color.t)
| gpl-3.0 | -2,208,417,184,195,690,200 | 32.573333 | 90 | 0.503971 | false | 2.867882 | false | false | false |
amitay/samba | source4/scripting/python/samba/samdb.py | 1 | 31787 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007-2010
# Copyright (C) Matthias Dieter Wallnoefer 2009
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <[email protected]> 2005
# Copyright (C) Giampaolo Lauria <[email protected]> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Convenience functions for using the SAM."""
import samba
import ldb
import time
import base64
import os
from samba import dsdb
from samba.ndr import ndr_unpack, ndr_pack
from samba.dcerpc import drsblobs, misc
from samba.common import normalise_int32
__docformat__ = "restructuredText"
class SamDB(samba.Ldb):
"""The SAM database."""
hash_oid_name = {}
def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
credentials=None, flags=0, options=None, global_schema=True,
auto_connect=True, am_rodc=None):
self.lp = lp
if not auto_connect:
url = None
elif url is None and lp is not None:
url = lp.samdb_url()
self.url = url
super(SamDB, self).__init__(url=url, lp=lp, modules_dir=modules_dir,
session_info=session_info, credentials=credentials, flags=flags,
options=options)
if global_schema:
dsdb._dsdb_set_global_schema(self)
if am_rodc is not None:
dsdb._dsdb_set_am_rodc(self, am_rodc)
def connect(self, url=None, flags=0, options=None):
'''connect to the database'''
if self.lp is not None and not os.path.exists(url):
url = self.lp.private_path(url)
self.url = url
super(SamDB, self).connect(url=url, flags=flags,
options=options)
def am_rodc(self):
'''return True if we are an RODC'''
return dsdb._am_rodc(self)
def am_pdc(self):
'''return True if we are an PDC emulator'''
return dsdb._am_pdc(self)
def domain_dn(self):
'''return the domain DN'''
return str(self.get_default_basedn())
def disable_account(self, search_filter):
"""Disables an account
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
"""
flags = samba.dsdb.UF_ACCOUNTDISABLE
self.toggle_userAccountFlags(search_filter, flags, on=True)
def enable_account(self, search_filter):
"""Enables an account
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
"""
flags = samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_PASSWD_NOTREQD
self.toggle_userAccountFlags(search_filter, flags, on=False)
def toggle_userAccountFlags(self, search_filter, flags, flags_str=None,
on=True, strict=False):
"""Toggle_userAccountFlags
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
:param flags: samba.dsdb.UF_* flags
:param on: on=True (default) => set, on=False => unset
:param strict: strict=False (default) ignore if no action is needed
strict=True raises an Exception if...
"""
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter, attrs=["userAccountControl"])
if len(res) == 0:
raise Exception("Unable to find account where '%s'" % search_filter)
assert(len(res) == 1)
account_dn = res[0].dn
old_uac = int(res[0]["userAccountControl"][0])
if on:
if strict and (old_uac & flags):
error = "Account flag(s) '%s' already set" % flags_str
raise Exception(error)
new_uac = old_uac | flags
else:
if strict and not (old_uac & flags):
error = "Account flag(s) '%s' already unset" % flags_str
raise Exception(error)
new_uac = old_uac & ~flags
if old_uac == new_uac:
return
mod = """
dn: %s
changetype: modify
delete: userAccountControl
userAccountControl: %u
add: userAccountControl
userAccountControl: %u
""" % (account_dn, old_uac, new_uac)
self.modify_ldif(mod)
def force_password_change_at_next_login(self, search_filter):
"""Forces a password change at next login
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
"""
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter, attrs=[])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % search_filter)
assert(len(res) == 1)
user_dn = res[0].dn
mod = """
dn: %s
changetype: modify
replace: pwdLastSet
pwdLastSet: 0
""" % (user_dn)
self.modify_ldif(mod)
def newgroup(self, groupname, groupou=None, grouptype=None,
description=None, mailaddress=None, notes=None, sd=None):
"""Adds a new group with additional parameters
:param groupname: Name of the new group
:param grouptype: Type of the new group
:param description: Description of the new group
:param mailaddress: Email address of the new group
:param notes: Notes of the new group
:param sd: security descriptor of the object
"""
group_dn = "CN=%s,%s,%s" % (groupname, (groupou or "CN=Users"), self.domain_dn())
# The new user record. Note the reliance on the SAMLDB module which
# fills in the default informations
ldbmessage = {"dn": group_dn,
"sAMAccountName": groupname,
"objectClass": "group"}
if grouptype is not None:
ldbmessage["groupType"] = normalise_int32(grouptype)
if description is not None:
ldbmessage["description"] = description
if mailaddress is not None:
ldbmessage["mail"] = mailaddress
if notes is not None:
ldbmessage["info"] = notes
if sd is not None:
ldbmessage["nTSecurityDescriptor"] = ndr_pack(sd)
self.add(ldbmessage)
def deletegroup(self, groupname):
"""Deletes a group
:param groupname: Name of the target group
"""
groupfilter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (ldb.binary_encode(groupname), "CN=Group,CN=Schema,CN=Configuration", self.domain_dn())
self.transaction_start()
try:
targetgroup = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=groupfilter, attrs=[])
if len(targetgroup) == 0:
raise Exception('Unable to find group "%s"' % groupname)
assert(len(targetgroup) == 1)
self.delete(targetgroup[0].dn)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def add_remove_group_members(self, groupname, listofmembers,
add_members_operation=True):
"""Adds or removes group members
:param groupname: Name of the target group
:param listofmembers: Comma-separated list of group members
:param add_members_operation: Defines if its an add or remove
operation
"""
groupfilter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (
ldb.binary_encode(groupname), "CN=Group,CN=Schema,CN=Configuration", self.domain_dn())
groupmembers = listofmembers.split(',')
self.transaction_start()
try:
targetgroup = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=groupfilter, attrs=['member'])
if len(targetgroup) == 0:
raise Exception('Unable to find group "%s"' % groupname)
assert(len(targetgroup) == 1)
modified = False
addtargettogroup = """
dn: %s
changetype: modify
""" % (str(targetgroup[0].dn))
for member in groupmembers:
targetmember = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression="(|(sAMAccountName=%s)(CN=%s))" % (
ldb.binary_encode(member), ldb.binary_encode(member)), attrs=[])
if len(targetmember) != 1:
continue
if add_members_operation is True and (targetgroup[0].get('member') is None or str(targetmember[0].dn) not in targetgroup[0]['member']):
modified = True
addtargettogroup += """add: member
member: %s
""" % (str(targetmember[0].dn))
elif add_members_operation is False and (targetgroup[0].get('member') is not None and str(targetmember[0].dn) in targetgroup[0]['member']):
modified = True
addtargettogroup += """delete: member
member: %s
""" % (str(targetmember[0].dn))
if modified is True:
self.modify_ldif(addtargettogroup)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def newuser(self, username, password,
force_password_change_at_next_login_req=False,
useusernameascn=False, userou=None, surname=None, givenname=None,
initials=None, profilepath=None, scriptpath=None, homedrive=None,
homedirectory=None, jobtitle=None, department=None, company=None,
description=None, mailaddress=None, internetaddress=None,
telephonenumber=None, physicaldeliveryoffice=None, sd=None,
setpassword=True):
"""Adds a new user with additional parameters
:param username: Name of the new user
:param password: Password for the new user
:param force_password_change_at_next_login_req: Force password change
:param useusernameascn: Use username as cn rather that firstname +
initials + lastname
:param userou: Object container (without domainDN postfix) for new user
:param surname: Surname of the new user
:param givenname: First name of the new user
:param initials: Initials of the new user
:param profilepath: Profile path of the new user
:param scriptpath: Logon script path of the new user
:param homedrive: Home drive of the new user
:param homedirectory: Home directory of the new user
:param jobtitle: Job title of the new user
:param department: Department of the new user
:param company: Company of the new user
:param description: of the new user
:param mailaddress: Email address of the new user
:param internetaddress: Home page of the new user
:param telephonenumber: Phone number of the new user
:param physicaldeliveryoffice: Office location of the new user
:param sd: security descriptor of the object
:param setpassword: optionally disable password reset
"""
displayname = ""
if givenname is not None:
displayname += givenname
if initials is not None:
displayname += ' %s.' % initials
if surname is not None:
displayname += ' %s' % surname
cn = username
if useusernameascn is None and displayname is not "":
cn = displayname
user_dn = "CN=%s,%s,%s" % (cn, (userou or "CN=Users"), self.domain_dn())
dnsdomain = ldb.Dn(self, self.domain_dn()).canonical_str().replace("/", "")
user_principal_name = "%s@%s" % (username, dnsdomain)
# The new user record. Note the reliance on the SAMLDB module which
# fills in the default informations
ldbmessage = {"dn": user_dn,
"sAMAccountName": username,
"userPrincipalName": user_principal_name,
"objectClass": "user"}
if surname is not None:
ldbmessage["sn"] = surname
if givenname is not None:
ldbmessage["givenName"] = givenname
if displayname is not "":
ldbmessage["displayName"] = displayname
ldbmessage["name"] = displayname
if initials is not None:
ldbmessage["initials"] = '%s.' % initials
if profilepath is not None:
ldbmessage["profilePath"] = profilepath
if scriptpath is not None:
ldbmessage["scriptPath"] = scriptpath
if homedrive is not None:
ldbmessage["homeDrive"] = homedrive
if homedirectory is not None:
ldbmessage["homeDirectory"] = homedirectory
if jobtitle is not None:
ldbmessage["title"] = jobtitle
if department is not None:
ldbmessage["department"] = department
if company is not None:
ldbmessage["company"] = company
if description is not None:
ldbmessage["description"] = description
if mailaddress is not None:
ldbmessage["mail"] = mailaddress
if internetaddress is not None:
ldbmessage["wWWHomePage"] = internetaddress
if telephonenumber is not None:
ldbmessage["telephoneNumber"] = telephonenumber
if physicaldeliveryoffice is not None:
ldbmessage["physicalDeliveryOfficeName"] = physicaldeliveryoffice
if sd is not None:
ldbmessage["nTSecurityDescriptor"] = ndr_pack(sd)
self.transaction_start()
try:
self.add(ldbmessage)
# Sets the password for it
if setpassword:
self.setpassword("(samAccountName=%s)" % ldb.binary_encode(username), password,
force_password_change_at_next_login_req)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def deleteuser(self, username):
"""Deletes a user
:param username: Name of the target user
"""
filter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (ldb.binary_encode(username), "CN=Person,CN=Schema,CN=Configuration", self.domain_dn())
self.transaction_start()
try:
target = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=filter, attrs=[])
if len(target) == 0:
raise Exception('Unable to find user "%s"' % username)
assert(len(target) == 1)
self.delete(target[0].dn)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def setpassword(self, search_filter, password,
force_change_at_next_login=False, username=None):
"""Sets the password for a user
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
:param password: Password for the user
:param force_change_at_next_login: Force password change
"""
self.transaction_start()
try:
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter, attrs=[])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % (username or search_filter))
if len(res) > 1:
raise Exception('Matched %u multiple users with filter "%s"' % (len(res), search_filter))
user_dn = res[0].dn
setpw = """
dn: %s
changetype: modify
replace: unicodePwd
unicodePwd:: %s
""" % (user_dn, base64.b64encode(("\"" + password + "\"").encode('utf-16-le')))
self.modify_ldif(setpw)
if force_change_at_next_login:
self.force_password_change_at_next_login(
"(distinguishedName=" + str(user_dn) + ")")
# modify the userAccountControl to remove the disabled bit
self.enable_account(search_filter)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def setexpiry(self, search_filter, expiry_seconds, no_expiry_req=False):
"""Sets the account expiry for a user
:param search_filter: LDAP filter to find the user (eg
samaccountname=name)
:param expiry_seconds: expiry time from now in seconds
:param no_expiry_req: if set, then don't expire password
"""
self.transaction_start()
try:
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter,
attrs=["userAccountControl", "accountExpires"])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % search_filter)
assert(len(res) == 1)
user_dn = res[0].dn
userAccountControl = int(res[0]["userAccountControl"][0])
accountExpires = int(res[0]["accountExpires"][0])
if no_expiry_req:
userAccountControl = userAccountControl | 0x10000
accountExpires = 0
else:
userAccountControl = userAccountControl & ~0x10000
accountExpires = samba.unix2nttime(expiry_seconds + int(time.time()))
setexp = """
dn: %s
changetype: modify
replace: userAccountControl
userAccountControl: %u
replace: accountExpires
accountExpires: %u
""" % (user_dn, userAccountControl, accountExpires)
self.modify_ldif(setexp)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def set_domain_sid(self, sid):
"""Change the domain SID used by this LDB.
:param sid: The new domain sid to use.
"""
dsdb._samdb_set_domain_sid(self, sid)
def get_domain_sid(self):
"""Read the domain SID used by this LDB. """
return dsdb._samdb_get_domain_sid(self)
domain_sid = property(get_domain_sid, set_domain_sid,
"SID for the domain")
def set_invocation_id(self, invocation_id):
"""Set the invocation id for this SamDB handle.
:param invocation_id: GUID of the invocation id.
"""
dsdb._dsdb_set_ntds_invocation_id(self, invocation_id)
def get_invocation_id(self):
"""Get the invocation_id id"""
return dsdb._samdb_ntds_invocation_id(self)
invocation_id = property(get_invocation_id, set_invocation_id,
"Invocation ID GUID")
def get_oid_from_attid(self, attid):
return dsdb._dsdb_get_oid_from_attid(self, attid)
def get_attid_from_lDAPDisplayName(self, ldap_display_name,
is_schema_nc=False):
'''return the attribute ID for a LDAP attribute as an integer as found in DRSUAPI'''
return dsdb._dsdb_get_attid_from_lDAPDisplayName(self,
ldap_display_name, is_schema_nc)
def get_syntax_oid_from_lDAPDisplayName(self, ldap_display_name):
'''return the syntax OID for a LDAP attribute as a string'''
return dsdb._dsdb_get_syntax_oid_from_lDAPDisplayName(self, ldap_display_name)
def get_systemFlags_from_lDAPDisplayName(self, ldap_display_name):
'''return the systemFlags for a LDAP attribute as a integer'''
return dsdb._dsdb_get_systemFlags_from_lDAPDisplayName(self, ldap_display_name)
def get_linkId_from_lDAPDisplayName(self, ldap_display_name):
'''return the linkID for a LDAP attribute as a integer'''
return dsdb._dsdb_get_linkId_from_lDAPDisplayName(self, ldap_display_name)
def get_lDAPDisplayName_by_attid(self, attid):
'''return the lDAPDisplayName from an integer DRS attribute ID'''
return dsdb._dsdb_get_lDAPDisplayName_by_attid(self, attid)
def get_backlink_from_lDAPDisplayName(self, ldap_display_name):
'''return the attribute name of the corresponding backlink from the name
of a forward link attribute. If there is no backlink return None'''
return dsdb._dsdb_get_backlink_from_lDAPDisplayName(self, ldap_display_name)
def set_ntds_settings_dn(self, ntds_settings_dn):
"""Set the NTDS Settings DN, as would be returned on the dsServiceName
rootDSE attribute.
This allows the DN to be set before the database fully exists
:param ntds_settings_dn: The new DN to use
"""
dsdb._samdb_set_ntds_settings_dn(self, ntds_settings_dn)
def get_ntds_GUID(self):
"""Get the NTDS objectGUID"""
return dsdb._samdb_ntds_objectGUID(self)
def server_site_name(self):
"""Get the server site name"""
return dsdb._samdb_server_site_name(self)
def host_dns_name(self):
"""return the DNS name of this host"""
res = self.search(base='', scope=ldb.SCOPE_BASE, attrs=['dNSHostName'])
return res[0]['dNSHostName'][0]
def domain_dns_name(self):
"""return the DNS name of the domain root"""
domain_dn = self.get_default_basedn()
return domain_dn.canonical_str().split('/')[0]
def forest_dns_name(self):
"""return the DNS name of the forest root"""
forest_dn = self.get_root_basedn()
return forest_dn.canonical_str().split('/')[0]
def load_partition_usn(self, base_dn):
return dsdb._dsdb_load_partition_usn(self, base_dn)
def set_schema(self, schema):
self.set_schema_from_ldb(schema.ldb)
def set_schema_from_ldb(self, ldb_conn):
dsdb._dsdb_set_schema_from_ldb(self, ldb_conn)
def dsdb_DsReplicaAttribute(self, ldb, ldap_display_name, ldif_elements):
'''convert a list of attribute values to a DRSUAPI DsReplicaAttribute'''
return dsdb._dsdb_DsReplicaAttribute(ldb, ldap_display_name, ldif_elements)
def dsdb_normalise_attributes(self, ldb, ldap_display_name, ldif_elements):
'''normalise a list of attribute values'''
return dsdb._dsdb_normalise_attributes(ldb, ldap_display_name, ldif_elements)
def get_attribute_from_attid(self, attid):
""" Get from an attid the associated attribute
:param attid: The attribute id for searched attribute
:return: The name of the attribute associated with this id
"""
if len(self.hash_oid_name.keys()) == 0:
self._populate_oid_attid()
if self.hash_oid_name.has_key(self.get_oid_from_attid(attid)):
return self.hash_oid_name[self.get_oid_from_attid(attid)]
else:
return None
def _populate_oid_attid(self):
"""Populate the hash hash_oid_name.
This hash contains the oid of the attribute as a key and
its display name as a value
"""
self.hash_oid_name = {}
res = self.search(expression="objectClass=attributeSchema",
controls=["search_options:1:2"],
attrs=["attributeID",
"lDAPDisplayName"])
if len(res) > 0:
for e in res:
strDisplay = str(e.get("lDAPDisplayName"))
self.hash_oid_name[str(e.get("attributeID"))] = strDisplay
def get_attribute_replmetadata_version(self, dn, att):
"""Get the version field trom the replPropertyMetaData for
the given field
:param dn: The on which we want to get the version
:param att: The name of the attribute
:return: The value of the version field in the replPropertyMetaData
for the given attribute. None if the attribute is not replicated
"""
res = self.search(expression="distinguishedName=%s" % dn,
scope=ldb.SCOPE_SUBTREE,
controls=["search_options:1:2"],
attrs=["replPropertyMetaData"])
if len(res) == 0:
return None
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
str(res[0]["replPropertyMetaData"]))
ctr = repl.ctr
if len(self.hash_oid_name.keys()) == 0:
self._populate_oid_attid()
for o in ctr.array:
# Search for Description
att_oid = self.get_oid_from_attid(o.attid)
if self.hash_oid_name.has_key(att_oid) and\
att.lower() == self.hash_oid_name[att_oid].lower():
return o.version
return None
def set_attribute_replmetadata_version(self, dn, att, value,
addifnotexist=False):
res = self.search(expression="distinguishedName=%s" % dn,
scope=ldb.SCOPE_SUBTREE,
controls=["search_options:1:2"],
attrs=["replPropertyMetaData"])
if len(res) == 0:
return None
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
str(res[0]["replPropertyMetaData"]))
ctr = repl.ctr
now = samba.unix2nttime(int(time.time()))
found = False
if len(self.hash_oid_name.keys()) == 0:
self._populate_oid_attid()
for o in ctr.array:
# Search for Description
att_oid = self.get_oid_from_attid(o.attid)
if self.hash_oid_name.has_key(att_oid) and\
att.lower() == self.hash_oid_name[att_oid].lower():
found = True
seq = self.sequence_number(ldb.SEQ_NEXT)
o.version = value
o.originating_change_time = now
o.originating_invocation_id = misc.GUID(self.get_invocation_id())
o.originating_usn = seq
o.local_usn = seq
if not found and addifnotexist and len(ctr.array) >0:
o2 = drsblobs.replPropertyMetaData1()
o2.attid = 589914
att_oid = self.get_oid_from_attid(o2.attid)
seq = self.sequence_number(ldb.SEQ_NEXT)
o2.version = value
o2.originating_change_time = now
o2.originating_invocation_id = misc.GUID(self.get_invocation_id())
o2.originating_usn = seq
o2.local_usn = seq
found = True
tab = ctr.array
tab.append(o2)
ctr.count = ctr.count + 1
ctr.array = tab
if found :
replBlob = ndr_pack(repl)
msg = ldb.Message()
msg.dn = res[0].dn
msg["replPropertyMetaData"] = ldb.MessageElement(replBlob,
ldb.FLAG_MOD_REPLACE,
"replPropertyMetaData")
self.modify(msg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0"])
def write_prefixes_from_schema(self):
dsdb._dsdb_write_prefixes_from_schema_to_ldb(self)
def get_partitions_dn(self):
return dsdb._dsdb_get_partitions_dn(self)
def set_minPwdAge(self, value):
m = ldb.Message()
m.dn = ldb.Dn(self, self.domain_dn())
m["minPwdAge"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "minPwdAge")
self.modify(m)
def get_minPwdAge(self):
res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["minPwdAge"])
if len(res) == 0:
return None
elif not "minPwdAge" in res[0]:
return None
else:
return res[0]["minPwdAge"][0]
def set_minPwdLength(self, value):
m = ldb.Message()
m.dn = ldb.Dn(self, self.domain_dn())
m["minPwdLength"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "minPwdLength")
self.modify(m)
def get_minPwdLength(self):
res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["minPwdLength"])
if len(res) == 0:
return None
elif not "minPwdLength" in res[0]:
return None
else:
return res[0]["minPwdLength"][0]
def set_pwdProperties(self, value):
m = ldb.Message()
m.dn = ldb.Dn(self, self.domain_dn())
m["pwdProperties"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "pwdProperties")
self.modify(m)
def get_pwdProperties(self):
res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["pwdProperties"])
if len(res) == 0:
return None
elif not "pwdProperties" in res[0]:
return None
else:
return res[0]["pwdProperties"][0]
def set_dsheuristics(self, dsheuristics):
m = ldb.Message()
m.dn = ldb.Dn(self, "CN=Directory Service,CN=Windows NT,CN=Services,%s"
% self.get_config_basedn().get_linearized())
if dsheuristics is not None:
m["dSHeuristics"] = ldb.MessageElement(dsheuristics,
ldb.FLAG_MOD_REPLACE, "dSHeuristics")
else:
m["dSHeuristics"] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE,
"dSHeuristics")
self.modify(m)
def get_dsheuristics(self):
res = self.search("CN=Directory Service,CN=Windows NT,CN=Services,%s"
% self.get_config_basedn().get_linearized(),
scope=ldb.SCOPE_BASE, attrs=["dSHeuristics"])
if len(res) == 0:
dsheuristics = None
elif "dSHeuristics" in res[0]:
dsheuristics = res[0]["dSHeuristics"][0]
else:
dsheuristics = None
return dsheuristics
def create_ou(self, ou_dn, description=None, name=None, sd=None):
"""Creates an organizationalUnit object
:param ou_dn: dn of the new object
:param description: description attribute
:param name: name atttribute
:param sd: security descriptor of the object, can be
an SDDL string or security.descriptor type
"""
m = {"dn": ou_dn,
"objectClass": "organizationalUnit"}
if description:
m["description"] = description
if name:
m["name"] = name
if sd:
m["nTSecurityDescriptor"] = ndr_pack(sd)
self.add(m)
def sequence_number(self, seq_type):
"""Returns the value of the sequence number according to the requested type
:param seq_type: type of sequence number
"""
self.transaction_start()
try:
seq = super(SamDB, self).sequence_number(seq_type)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
return seq
def get_dsServiceName(self):
'''get the NTDS DN from the rootDSE'''
res = self.search(base="", scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
return res[0]["dsServiceName"][0]
def get_serverName(self):
'''get the server DN from the rootDSE'''
res = self.search(base="", scope=ldb.SCOPE_BASE, attrs=["serverName"])
return res[0]["serverName"][0]
| gpl-3.0 | -3,569,424,283,100,842,500 | 36.091015 | 158 | 0.588542 | false | 3.887367 | false | false | false |
eayunstack/python-neutronclient | neutronclient/shell.py | 1 | 41584 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Command-line interface to the Neutron APIs
"""
from __future__ import print_function
import argparse
import inspect
import itertools
import logging
import os
import sys
from keystoneauth1 import session
import os_client_config
from oslo_utils import encodeutils
from cliff import app
from cliff import command
from cliff import commandmanager
from neutronclient._i18n import _
from neutronclient.common import clientmanager
from neutronclient.common import exceptions as exc
from neutronclient.common import extension as client_extension
from neutronclient.common import utils
from neutronclient.neutron.v2_0 import address_scope
from neutronclient.neutron.v2_0 import agent
from neutronclient.neutron.v2_0 import agentscheduler
from neutronclient.neutron.v2_0 import auto_allocated_topology
from neutronclient.neutron.v2_0 import availability_zone
from neutronclient.neutron.v2_0.bgp import dragentscheduler as bgp_drsched
from neutronclient.neutron.v2_0.bgp import peer as bgp_peer
from neutronclient.neutron.v2_0.bgp import speaker as bgp_speaker
from neutronclient.neutron.v2_0 import extension
from neutronclient.neutron.v2_0.flavor import flavor
from neutronclient.neutron.v2_0.flavor import flavor_profile
from neutronclient.neutron.v2_0 import floatingip
from neutronclient.neutron.v2_0.fw import firewall
from neutronclient.neutron.v2_0.fw import firewallpolicy
from neutronclient.neutron.v2_0.fw import firewallrule
from neutronclient.neutron.v2_0.lb import healthmonitor as lb_healthmonitor
from neutronclient.neutron.v2_0.lb import member as lb_member
from neutronclient.neutron.v2_0.lb import pool as lb_pool
from neutronclient.neutron.v2_0.lb.v2 import healthmonitor as lbaas_healthmon
from neutronclient.neutron.v2_0.lb.v2 import l7policy as lbaas_l7policy
from neutronclient.neutron.v2_0.lb.v2 import l7rule as lbaas_l7rule
from neutronclient.neutron.v2_0.lb.v2 import listener as lbaas_listener
from neutronclient.neutron.v2_0.lb.v2 import loadbalancer as lbaas_loadbalancer
from neutronclient.neutron.v2_0.lb.v2 import member as lbaas_member
from neutronclient.neutron.v2_0.lb.v2 import pool as lbaas_pool
from neutronclient.neutron.v2_0.lb import vip as lb_vip
from neutronclient.neutron.v2_0 import metering
from neutronclient.neutron.v2_0 import network
from neutronclient.neutron.v2_0 import network_ip_availability
from neutronclient.neutron.v2_0 import port
from neutronclient.neutron.v2_0 import purge
from neutronclient.neutron.v2_0.qos import bandwidth_limit_rule
from neutronclient.neutron.v2_0.qos import dscp_marking_rule
from neutronclient.neutron.v2_0.qos import policy as qos_policy
from neutronclient.neutron.v2_0.qos import rule as qos_rule
from neutronclient.neutron.v2_0 import quota
from neutronclient.neutron.v2_0 import rbac
from neutronclient.neutron.v2_0 import router
from neutronclient.neutron.v2_0 import securitygroup
from neutronclient.neutron.v2_0 import servicetype
from neutronclient.neutron.v2_0 import subnet
from neutronclient.neutron.v2_0 import subnetpool
from neutronclient.neutron.v2_0 import tag
from neutronclient.neutron.v2_0.vpn import endpoint_group
from neutronclient.neutron.v2_0.vpn import ikepolicy
from neutronclient.neutron.v2_0.vpn import ipsec_site_connection
from neutronclient.neutron.v2_0.vpn import ipsecpolicy
from neutronclient.neutron.v2_0.vpn import vpnservice
from neutronclient.version import __version__
VERSION = '2.0'
NEUTRON_API_VERSION = '2.0'
def run_command(cmd, cmd_parser, sub_argv):
_argv = sub_argv
index = -1
values_specs = []
if '--' in sub_argv:
index = sub_argv.index('--')
_argv = sub_argv[:index]
values_specs = sub_argv[index:]
known_args, _values_specs = cmd_parser.parse_known_args(_argv)
if(isinstance(cmd, subnet.CreateSubnet) and not known_args.cidr):
cidr = get_first_valid_cidr(_values_specs)
if cidr:
known_args.cidr = cidr
_values_specs.remove(cidr)
cmd.values_specs = (index == -1 and _values_specs or values_specs)
return cmd.run(known_args)
def get_first_valid_cidr(value_specs):
# Bug 1442771, argparse does not allow optional positional parameter
# to be separated from previous positional parameter.
# When cidr was separated from network, the value will not be able
# to be parsed into known_args, but saved to _values_specs instead.
for value in value_specs:
if utils.is_valid_cidr(value):
return value
def env(*_vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in _vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def check_non_negative_int(value):
try:
value = int(value)
except ValueError:
raise argparse.ArgumentTypeError(_("invalid int value: %r") % value)
if value < 0:
raise argparse.ArgumentTypeError(_("input value %d is negative") %
value)
return value
class BashCompletionCommand(command.Command):
"""Prints all of the commands and options for bash-completion."""
def take_action(self, parsed_args):
pass
COMMAND_V2 = {
'bash-completion': BashCompletionCommand,
'net-list': network.ListNetwork,
'net-external-list': network.ListExternalNetwork,
'net-show': network.ShowNetwork,
'net-create': network.CreateNetwork,
'net-delete': network.DeleteNetwork,
'net-update': network.UpdateNetwork,
'subnet-list': subnet.ListSubnet,
'subnet-show': subnet.ShowSubnet,
'subnet-create': subnet.CreateSubnet,
'subnet-delete': subnet.DeleteSubnet,
'subnet-update': subnet.UpdateSubnet,
'subnetpool-list': subnetpool.ListSubnetPool,
'subnetpool-show': subnetpool.ShowSubnetPool,
'subnetpool-create': subnetpool.CreateSubnetPool,
'subnetpool-delete': subnetpool.DeleteSubnetPool,
'subnetpool-update': subnetpool.UpdateSubnetPool,
'port-list': port.ListPort,
'port-show': port.ShowPort,
'port-create': port.CreatePort,
'port-delete': port.DeletePort,
'port-update': port.UpdatePort,
'purge': purge.Purge,
'quota-list': quota.ListQuota,
'quota-show': quota.ShowQuota,
'quota-default-show': quota.ShowQuotaDefault,
'quota-delete': quota.DeleteQuota,
'quota-update': quota.UpdateQuota,
'ext-list': extension.ListExt,
'ext-show': extension.ShowExt,
'router-list': router.ListRouter,
'router-port-list': port.ListRouterPort,
'router-show': router.ShowRouter,
'router-create': router.CreateRouter,
'router-delete': router.DeleteRouter,
'router-update': router.UpdateRouter,
'router-interface-add': router.AddInterfaceRouter,
'router-interface-delete': router.RemoveInterfaceRouter,
'router-gateway-set': router.SetGatewayRouter,
'router-gateway-clear': router.RemoveGatewayRouter,
'floatingip-list': floatingip.ListFloatingIP,
'floatingip-show': floatingip.ShowFloatingIP,
'floatingip-create': floatingip.CreateFloatingIP,
'floatingip-delete': floatingip.DeleteFloatingIP,
'floatingip-associate': floatingip.AssociateFloatingIP,
'floatingip-disassociate': floatingip.DisassociateFloatingIP,
'security-group-list': securitygroup.ListSecurityGroup,
'security-group-show': securitygroup.ShowSecurityGroup,
'security-group-create': securitygroup.CreateSecurityGroup,
'security-group-delete': securitygroup.DeleteSecurityGroup,
'security-group-update': securitygroup.UpdateSecurityGroup,
'security-group-rule-list': securitygroup.ListSecurityGroupRule,
'security-group-rule-show': securitygroup.ShowSecurityGroupRule,
'security-group-rule-create': securitygroup.CreateSecurityGroupRule,
'security-group-rule-delete': securitygroup.DeleteSecurityGroupRule,
'lbaas-loadbalancer-list': lbaas_loadbalancer.ListLoadBalancer,
'lbaas-loadbalancer-show': lbaas_loadbalancer.ShowLoadBalancer,
'lbaas-loadbalancer-create': lbaas_loadbalancer.CreateLoadBalancer,
'lbaas-loadbalancer-update': lbaas_loadbalancer.UpdateLoadBalancer,
'lbaas-loadbalancer-delete': lbaas_loadbalancer.DeleteLoadBalancer,
'lbaas-loadbalancer-stats': lbaas_loadbalancer.RetrieveLoadBalancerStats,
'lbaas-loadbalancer-status': lbaas_loadbalancer.RetrieveLoadBalancerStatus,
'lbaas-listener-list': lbaas_listener.ListListener,
'lbaas-listener-show': lbaas_listener.ShowListener,
'lbaas-listener-create': lbaas_listener.CreateListener,
'lbaas-listener-update': lbaas_listener.UpdateListener,
'lbaas-listener-delete': lbaas_listener.DeleteListener,
'lbaas-l7policy-list': lbaas_l7policy.ListL7Policy,
'lbaas-l7policy-show': lbaas_l7policy.ShowL7Policy,
'lbaas-l7policy-create': lbaas_l7policy.CreateL7Policy,
'lbaas-l7policy-update': lbaas_l7policy.UpdateL7Policy,
'lbaas-l7policy-delete': lbaas_l7policy.DeleteL7Policy,
'lbaas-l7rule-list': lbaas_l7rule.ListL7Rule,
'lbaas-l7rule-show': lbaas_l7rule.ShowL7Rule,
'lbaas-l7rule-create': lbaas_l7rule.CreateL7Rule,
'lbaas-l7rule-update': lbaas_l7rule.UpdateL7Rule,
'lbaas-l7rule-delete': lbaas_l7rule.DeleteL7Rule,
'lbaas-pool-list': lbaas_pool.ListPool,
'lbaas-pool-show': lbaas_pool.ShowPool,
'lbaas-pool-create': lbaas_pool.CreatePool,
'lbaas-pool-update': lbaas_pool.UpdatePool,
'lbaas-pool-delete': lbaas_pool.DeletePool,
'lbaas-healthmonitor-list': lbaas_healthmon.ListHealthMonitor,
'lbaas-healthmonitor-show': lbaas_healthmon.ShowHealthMonitor,
'lbaas-healthmonitor-create': lbaas_healthmon.CreateHealthMonitor,
'lbaas-healthmonitor-update': lbaas_healthmon.UpdateHealthMonitor,
'lbaas-healthmonitor-delete': lbaas_healthmon.DeleteHealthMonitor,
'lbaas-member-list': lbaas_member.ListMember,
'lbaas-member-show': lbaas_member.ShowMember,
'lbaas-member-create': lbaas_member.CreateMember,
'lbaas-member-update': lbaas_member.UpdateMember,
'lbaas-member-delete': lbaas_member.DeleteMember,
'lb-vip-list': lb_vip.ListVip,
'lb-vip-show': lb_vip.ShowVip,
'lb-vip-create': lb_vip.CreateVip,
'lb-vip-update': lb_vip.UpdateVip,
'lb-vip-delete': lb_vip.DeleteVip,
'lb-pool-list': lb_pool.ListPool,
'lb-pool-show': lb_pool.ShowPool,
'lb-pool-create': lb_pool.CreatePool,
'lb-pool-update': lb_pool.UpdatePool,
'lb-pool-delete': lb_pool.DeletePool,
'lb-pool-stats': lb_pool.RetrievePoolStats,
'lb-member-list': lb_member.ListMember,
'lb-member-show': lb_member.ShowMember,
'lb-member-create': lb_member.CreateMember,
'lb-member-update': lb_member.UpdateMember,
'lb-member-delete': lb_member.DeleteMember,
'lb-healthmonitor-list': lb_healthmonitor.ListHealthMonitor,
'lb-healthmonitor-show': lb_healthmonitor.ShowHealthMonitor,
'lb-healthmonitor-create': lb_healthmonitor.CreateHealthMonitor,
'lb-healthmonitor-update': lb_healthmonitor.UpdateHealthMonitor,
'lb-healthmonitor-delete': lb_healthmonitor.DeleteHealthMonitor,
'lb-healthmonitor-associate': lb_healthmonitor.AssociateHealthMonitor,
'lb-healthmonitor-disassociate': (
lb_healthmonitor.DisassociateHealthMonitor
),
'agent-list': agent.ListAgent,
'agent-show': agent.ShowAgent,
'agent-delete': agent.DeleteAgent,
'agent-update': agent.UpdateAgent,
'dhcp-agent-network-add': agentscheduler.AddNetworkToDhcpAgent,
'dhcp-agent-network-remove': agentscheduler.RemoveNetworkFromDhcpAgent,
'net-list-on-dhcp-agent': agentscheduler.ListNetworksOnDhcpAgent,
'dhcp-agent-list-hosting-net': agentscheduler.ListDhcpAgentsHostingNetwork,
'l3-agent-router-add': agentscheduler.AddRouterToL3Agent,
'l3-agent-router-remove': agentscheduler.RemoveRouterFromL3Agent,
'router-list-on-l3-agent': agentscheduler.ListRoutersOnL3Agent,
'l3-agent-list-hosting-router': agentscheduler.ListL3AgentsHostingRouter,
'lb-pool-list-on-agent': agentscheduler.ListPoolsOnLbaasAgent,
'lb-agent-hosting-pool': agentscheduler.GetLbaasAgentHostingPool,
'lbaas-loadbalancer-list-on-agent':
agentscheduler.ListLoadBalancersOnLbaasAgent,
'lbaas-agent-hosting-loadbalancer':
agentscheduler.GetLbaasAgentHostingLoadBalancer,
'service-provider-list': servicetype.ListServiceProvider,
'firewall-rule-list': firewallrule.ListFirewallRule,
'firewall-rule-show': firewallrule.ShowFirewallRule,
'firewall-rule-create': firewallrule.CreateFirewallRule,
'firewall-rule-update': firewallrule.UpdateFirewallRule,
'firewall-rule-delete': firewallrule.DeleteFirewallRule,
'firewall-policy-list': firewallpolicy.ListFirewallPolicy,
'firewall-policy-show': firewallpolicy.ShowFirewallPolicy,
'firewall-policy-create': firewallpolicy.CreateFirewallPolicy,
'firewall-policy-update': firewallpolicy.UpdateFirewallPolicy,
'firewall-policy-delete': firewallpolicy.DeleteFirewallPolicy,
'firewall-policy-insert-rule': firewallpolicy.FirewallPolicyInsertRule,
'firewall-policy-remove-rule': firewallpolicy.FirewallPolicyRemoveRule,
'firewall-list': firewall.ListFirewall,
'firewall-show': firewall.ShowFirewall,
'firewall-create': firewall.CreateFirewall,
'firewall-update': firewall.UpdateFirewall,
'firewall-delete': firewall.DeleteFirewall,
'ipsec-site-connection-list': (
ipsec_site_connection.ListIPsecSiteConnection
),
'ipsec-site-connection-show': (
ipsec_site_connection.ShowIPsecSiteConnection
),
'ipsec-site-connection-create': (
ipsec_site_connection.CreateIPsecSiteConnection
),
'ipsec-site-connection-update': (
ipsec_site_connection.UpdateIPsecSiteConnection
),
'ipsec-site-connection-delete': (
ipsec_site_connection.DeleteIPsecSiteConnection
),
'vpn-endpoint-group-list': endpoint_group.ListEndpointGroup,
'vpn-endpoint-group-show': endpoint_group.ShowEndpointGroup,
'vpn-endpoint-group-create': endpoint_group.CreateEndpointGroup,
'vpn-endpoint-group-update': endpoint_group.UpdateEndpointGroup,
'vpn-endpoint-group-delete': endpoint_group.DeleteEndpointGroup,
'vpn-service-list': vpnservice.ListVPNService,
'vpn-service-show': vpnservice.ShowVPNService,
'vpn-service-create': vpnservice.CreateVPNService,
'vpn-service-update': vpnservice.UpdateVPNService,
'vpn-service-delete': vpnservice.DeleteVPNService,
'vpn-ipsecpolicy-list': ipsecpolicy.ListIPsecPolicy,
'vpn-ipsecpolicy-show': ipsecpolicy.ShowIPsecPolicy,
'vpn-ipsecpolicy-create': ipsecpolicy.CreateIPsecPolicy,
'vpn-ipsecpolicy-update': ipsecpolicy.UpdateIPsecPolicy,
'vpn-ipsecpolicy-delete': ipsecpolicy.DeleteIPsecPolicy,
'vpn-ikepolicy-list': ikepolicy.ListIKEPolicy,
'vpn-ikepolicy-show': ikepolicy.ShowIKEPolicy,
'vpn-ikepolicy-create': ikepolicy.CreateIKEPolicy,
'vpn-ikepolicy-update': ikepolicy.UpdateIKEPolicy,
'vpn-ikepolicy-delete': ikepolicy.DeleteIKEPolicy,
'meter-label-create': metering.CreateMeteringLabel,
'meter-label-list': metering.ListMeteringLabel,
'meter-label-show': metering.ShowMeteringLabel,
'meter-label-delete': metering.DeleteMeteringLabel,
'meter-label-rule-create': metering.CreateMeteringLabelRule,
'meter-label-rule-list': metering.ListMeteringLabelRule,
'meter-label-rule-show': metering.ShowMeteringLabelRule,
'meter-label-rule-delete': metering.DeleteMeteringLabelRule,
'rbac-create': rbac.CreateRBACPolicy,
'rbac-update': rbac.UpdateRBACPolicy,
'rbac-list': rbac.ListRBACPolicy,
'rbac-show': rbac.ShowRBACPolicy,
'rbac-delete': rbac.DeleteRBACPolicy,
'address-scope-list': address_scope.ListAddressScope,
'address-scope-show': address_scope.ShowAddressScope,
'address-scope-create': address_scope.CreateAddressScope,
'address-scope-delete': address_scope.DeleteAddressScope,
'address-scope-update': address_scope.UpdateAddressScope,
'qos-policy-list': qos_policy.ListQoSPolicy,
'qos-policy-show': qos_policy.ShowQoSPolicy,
'qos-policy-create': qos_policy.CreateQoSPolicy,
'qos-policy-update': qos_policy.UpdateQoSPolicy,
'qos-policy-delete': qos_policy.DeleteQoSPolicy,
'qos-bandwidth-limit-rule-create': (
bandwidth_limit_rule.CreateQoSBandwidthLimitRule
),
'qos-bandwidth-limit-rule-show': (
bandwidth_limit_rule.ShowQoSBandwidthLimitRule
),
'qos-bandwidth-limit-rule-list': (
bandwidth_limit_rule.ListQoSBandwidthLimitRules
),
'qos-bandwidth-limit-rule-update': (
bandwidth_limit_rule.UpdateQoSBandwidthLimitRule
),
'qos-bandwidth-limit-rule-delete': (
bandwidth_limit_rule.DeleteQoSBandwidthLimitRule
),
'qos-dscp-marking-rule-create': (
dscp_marking_rule.CreateQoSDscpMarkingRule
),
'qos-dscp-marking-rule-show': (
dscp_marking_rule.ShowQoSDscpMarkingRule
),
'qos-dscp-marking-rule-list': (
dscp_marking_rule.ListQoSDscpMarkingRules
),
'qos-dscp-marking-rule-update': (
dscp_marking_rule.UpdateQoSDscpMarkingRule
),
'qos-dscp-marking-rule-delete': (
dscp_marking_rule.DeleteQoSDscpMarkingRule
),
'qos-available-rule-types': qos_rule.ListQoSRuleTypes,
'flavor-list': flavor.ListFlavor,
'flavor-show': flavor.ShowFlavor,
'flavor-create': flavor.CreateFlavor,
'flavor-delete': flavor.DeleteFlavor,
'flavor-update': flavor.UpdateFlavor,
'flavor-associate': flavor.AssociateFlavor,
'flavor-disassociate': flavor.DisassociateFlavor,
'flavor-profile-list': flavor_profile.ListFlavorProfile,
'flavor-profile-show': flavor_profile.ShowFlavorProfile,
'flavor-profile-create': flavor_profile.CreateFlavorProfile,
'flavor-profile-delete': flavor_profile.DeleteFlavorProfile,
'flavor-profile-update': flavor_profile.UpdateFlavorProfile,
'availability-zone-list': availability_zone.ListAvailabilityZone,
'auto-allocated-topology-show': (
auto_allocated_topology.ShowAutoAllocatedTopology),
'bgp-dragent-speaker-add': (
bgp_drsched.AddBGPSpeakerToDRAgent
),
'bgp-dragent-speaker-remove': (
bgp_drsched.RemoveBGPSpeakerFromDRAgent
),
'bgp-speaker-list-on-dragent': (
bgp_drsched.ListBGPSpeakersOnDRAgent
),
'bgp-dragent-list-hosting-speaker': (
bgp_drsched.ListDRAgentsHostingBGPSpeaker
),
'bgp-speaker-list': bgp_speaker.ListSpeakers,
'bgp-speaker-advertiseroute-list': (
bgp_speaker.ListRoutesAdvertisedBySpeaker
),
'bgp-speaker-show': bgp_speaker.ShowSpeaker,
'bgp-speaker-create': bgp_speaker.CreateSpeaker,
'bgp-speaker-update': bgp_speaker.UpdateSpeaker,
'bgp-speaker-delete': bgp_speaker.DeleteSpeaker,
'bgp-speaker-peer-add': bgp_speaker.AddPeerToSpeaker,
'bgp-speaker-peer-remove': bgp_speaker.RemovePeerFromSpeaker,
'bgp-speaker-network-add': bgp_speaker.AddNetworkToSpeaker,
'bgp-speaker-network-remove': bgp_speaker.RemoveNetworkFromSpeaker,
'bgp-peer-list': bgp_peer.ListPeers,
'bgp-peer-show': bgp_peer.ShowPeer,
'bgp-peer-create': bgp_peer.CreatePeer,
'bgp-peer-update': bgp_peer.UpdatePeer,
'bgp-peer-delete': bgp_peer.DeletePeer,
'net-ip-availability-list': network_ip_availability.ListIpAvailability,
'net-ip-availability-show': network_ip_availability.ShowIpAvailability,
'tag-add': tag.AddTag,
'tag-replace': tag.ReplaceTag,
'tag-remove': tag.RemoveTag,
}
COMMANDS = {'2.0': COMMAND_V2}
class HelpAction(argparse.Action):
"""Print help message including sub-commands
Provide a custom action so the -h and --help options
to the main app will print a list of the commands.
The commands are determined by checking the CommandManager
instance, passed in as the "default" value for the action.
"""
def __call__(self, parser, namespace, values, option_string=None):
outputs = []
max_len = 0
app = self.default
parser.print_help(app.stdout)
app.stdout.write(_('\nCommands for API v%s:\n') % app.api_version)
command_manager = app.command_manager
for name, ep in sorted(command_manager):
factory = ep.load()
cmd = factory(self, None)
one_liner = cmd.get_description().split('\n')[0]
outputs.append((name, one_liner))
max_len = max(len(name), max_len)
for (name, one_liner) in outputs:
app.stdout.write(' %s %s\n' % (name.ljust(max_len), one_liner))
sys.exit(0)
class NeutronShell(app.App):
# verbose logging levels
WARNING_LEVEL = 0
INFO_LEVEL = 1
DEBUG_LEVEL = 2
CONSOLE_MESSAGE_FORMAT = '%(message)s'
DEBUG_MESSAGE_FORMAT = '%(levelname)s: %(name)s %(message)s'
log = logging.getLogger(__name__)
def __init__(self, apiversion):
super(NeutronShell, self).__init__(
description=__doc__.strip(),
version=VERSION,
command_manager=commandmanager.CommandManager('neutron.cli'), )
self.commands = COMMANDS
for k, v in self.commands[apiversion].items():
self.command_manager.add_command(k, v)
self._register_extensions(VERSION)
# Pop the 'complete' to correct the outputs of 'neutron help'.
self.command_manager.commands.pop('complete')
# This is instantiated in initialize_app() only when using
# password flow auth
self.auth_client = None
self.api_version = apiversion
def build_option_parser(self, description, version):
"""Return an argparse option parser for this application.
Subclasses may override this method to extend
the parser with more global options.
:param description: full description of the application
:paramtype description: str
:param version: version number for the application
:paramtype version: str
"""
parser = argparse.ArgumentParser(
description=description,
add_help=False, )
parser.add_argument(
'--version',
action='version',
version=__version__, )
parser.add_argument(
'-v', '--verbose', '--debug',
action='count',
dest='verbose_level',
default=self.DEFAULT_VERBOSE_LEVEL,
help=_('Increase verbosity of output and show tracebacks on'
' errors. You can repeat this option.'))
parser.add_argument(
'-q', '--quiet',
action='store_const',
dest='verbose_level',
const=0,
help=_('Suppress output except warnings and errors.'))
parser.add_argument(
'-h', '--help',
action=HelpAction,
nargs=0,
default=self, # tricky
help=_("Show this help message and exit."))
parser.add_argument(
'-r', '--retries',
metavar="NUM",
type=check_non_negative_int,
default=0,
help=_("How many times the request to the Neutron server should "
"be retried if it fails."))
# FIXME(bklei): this method should come from keystoneauth1
self._append_global_identity_args(parser)
return parser
def _append_global_identity_args(self, parser):
# FIXME(bklei): these are global identity (Keystone) arguments which
# should be consistent and shared by all service clients. Therefore,
# they should be provided by keystoneauth1. We will need to
# refactor this code once this functionality is available in
# keystoneauth1.
#
# Note: At that time we'll need to decide if we can just abandon
# the deprecated args (--service-type and --endpoint-type).
parser.add_argument(
'--os-service-type', metavar='<os-service-type>',
default=env('OS_NETWORK_SERVICE_TYPE', default='network'),
help=_('Defaults to env[OS_NETWORK_SERVICE_TYPE] or network.'))
parser.add_argument(
'--os-endpoint-type', metavar='<os-endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='public'),
help=_('Defaults to env[OS_ENDPOINT_TYPE] or public.'))
# FIXME(bklei): --service-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--service-type', metavar='<service-type>',
default=env('OS_NETWORK_SERVICE_TYPE', default='network'),
help=_('DEPRECATED! Use --os-service-type.'))
# FIXME(bklei): --endpoint-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--endpoint-type', metavar='<endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='public'),
help=_('DEPRECATED! Use --os-endpoint-type.'))
parser.add_argument(
'--os-auth-strategy', metavar='<auth-strategy>',
default=env('OS_AUTH_STRATEGY', default='keystone'),
help=_('DEPRECATED! Only keystone is supported.'))
parser.add_argument(
'--os_auth_strategy',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-cloud', metavar='<cloud>',
default=env('OS_CLOUD', default=None),
help=_('Defaults to env[OS_CLOUD].'))
parser.add_argument(
'--os-auth-url', metavar='<auth-url>',
default=env('OS_AUTH_URL'),
help=_('Authentication URL, defaults to env[OS_AUTH_URL].'))
parser.add_argument(
'--os_auth_url',
help=argparse.SUPPRESS)
project_name_group = parser.add_mutually_exclusive_group()
project_name_group.add_argument(
'--os-tenant-name', metavar='<auth-tenant-name>',
default=env('OS_TENANT_NAME'),
help=_('Authentication tenant name, defaults to '
'env[OS_TENANT_NAME].'))
project_name_group.add_argument(
'--os-project-name',
metavar='<auth-project-name>',
default=utils.env('OS_PROJECT_NAME'),
help=_('Another way to specify tenant name. '
'This option is mutually exclusive with '
' --os-tenant-name. '
'Defaults to env[OS_PROJECT_NAME].'))
parser.add_argument(
'--os_tenant_name',
help=argparse.SUPPRESS)
project_id_group = parser.add_mutually_exclusive_group()
project_id_group.add_argument(
'--os-tenant-id', metavar='<auth-tenant-id>',
default=env('OS_TENANT_ID'),
help=_('Authentication tenant ID, defaults to '
'env[OS_TENANT_ID].'))
project_id_group.add_argument(
'--os-project-id',
metavar='<auth-project-id>',
default=utils.env('OS_PROJECT_ID'),
help=_('Another way to specify tenant ID. '
'This option is mutually exclusive with '
' --os-tenant-id. '
'Defaults to env[OS_PROJECT_ID].'))
parser.add_argument(
'--os-username', metavar='<auth-username>',
default=utils.env('OS_USERNAME'),
help=_('Authentication username, defaults to env[OS_USERNAME].'))
parser.add_argument(
'--os_username',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-id', metavar='<auth-user-id>',
default=env('OS_USER_ID'),
help=_('Authentication user ID (Env: OS_USER_ID)'))
parser.add_argument(
'--os_user_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-id',
metavar='<auth-user-domain-id>',
default=utils.env('OS_USER_DOMAIN_ID'),
help=_('OpenStack user domain ID. '
'Defaults to env[OS_USER_DOMAIN_ID].'))
parser.add_argument(
'--os_user_domain_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-name',
metavar='<auth-user-domain-name>',
default=utils.env('OS_USER_DOMAIN_NAME'),
help=_('OpenStack user domain name. '
'Defaults to env[OS_USER_DOMAIN_NAME].'))
parser.add_argument(
'--os_user_domain_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-project-domain-id',
metavar='<auth-project-domain-id>',
default=utils.env('OS_PROJECT_DOMAIN_ID'),
help=_('Defaults to env[OS_PROJECT_DOMAIN_ID].'))
parser.add_argument(
'--os-project-domain-name',
metavar='<auth-project-domain-name>',
default=utils.env('OS_PROJECT_DOMAIN_NAME'),
help=_('Defaults to env[OS_PROJECT_DOMAIN_NAME].'))
parser.add_argument(
'--os-cert',
metavar='<certificate>',
default=utils.env('OS_CERT'),
help=_("Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key. Defaults "
"to env[OS_CERT]."))
parser.add_argument(
'--os-cacert',
metavar='<ca-certificate>',
default=env('OS_CACERT', default=None),
help=_("Specify a CA bundle file to use in "
"verifying a TLS (https) server certificate. "
"Defaults to env[OS_CACERT]."))
parser.add_argument(
'--os-key',
metavar='<key>',
default=utils.env('OS_KEY'),
help=_("Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your certificate "
"file. Defaults to env[OS_KEY]."))
parser.add_argument(
'--os-password', metavar='<auth-password>',
default=utils.env('OS_PASSWORD'),
help=_('Authentication password, defaults to env[OS_PASSWORD].'))
parser.add_argument(
'--os_password',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-region-name', metavar='<auth-region-name>',
default=env('OS_REGION_NAME'),
help=_('Authentication region name, defaults to '
'env[OS_REGION_NAME].'))
parser.add_argument(
'--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-token', metavar='<token>',
default=env('OS_TOKEN'),
help=_('Authentication token, defaults to env[OS_TOKEN].'))
parser.add_argument(
'--os_token',
help=argparse.SUPPRESS)
parser.add_argument(
'--http-timeout', metavar='<seconds>',
default=env('OS_NETWORK_TIMEOUT', default=None), type=float,
help=_('Timeout in seconds to wait for an HTTP response. Defaults '
'to env[OS_NETWORK_TIMEOUT] or None if not specified.'))
parser.add_argument(
'--os-url', metavar='<url>',
default=env('OS_URL'),
help=_('Defaults to env[OS_URL].'))
parser.add_argument(
'--os_url',
help=argparse.SUPPRESS)
parser.add_argument(
'--insecure',
action='store_true',
default=env('NEUTRONCLIENT_INSECURE', default=False),
help=_("Explicitly allow neutronclient to perform \"insecure\" "
"SSL (https) requests. The server's certificate will "
"not be verified against any certificate authorities. "
"This option should be used with caution."))
def _bash_completion(self):
"""Prints all of the commands and options for bash-completion."""
commands = set()
options = set()
for option, _action in self.parser._option_string_actions.items():
options.add(option)
for _name, _command in self.command_manager:
commands.add(_name)
cmd_factory = _command.load()
cmd = cmd_factory(self, None)
cmd_parser = cmd.get_parser('')
for option, _action in cmd_parser._option_string_actions.items():
options.add(option)
print(' '.join(commands | options))
def _register_extensions(self, version):
for name, module in itertools.chain(
client_extension._discover_via_entry_points()):
self._extend_shell_commands(name, module, version)
def _extend_shell_commands(self, name, module, version):
classes = inspect.getmembers(module, inspect.isclass)
for cls_name, cls in classes:
if (issubclass(cls, client_extension.NeutronClientExtension) and
hasattr(cls, 'shell_command')):
cmd = cls.shell_command
if hasattr(cls, 'versions'):
if version not in cls.versions:
continue
try:
name_prefix = "[%s]" % name
cls.__doc__ = ("%s %s" % (name_prefix, cls.__doc__) if
cls.__doc__ else name_prefix)
self.command_manager.add_command(cmd, cls)
self.commands[version][cmd] = cls
except TypeError:
pass
def run(self, argv):
"""Equivalent to the main program for the application.
:param argv: input arguments and options
:paramtype argv: list of str
"""
try:
index = 0
command_pos = -1
help_pos = -1
help_command_pos = -1
for arg in argv:
if arg == 'bash-completion' and help_command_pos == -1:
self._bash_completion()
return 0
if arg in self.commands[self.api_version]:
if command_pos == -1:
command_pos = index
elif arg in ('-h', '--help'):
if help_pos == -1:
help_pos = index
elif arg == 'help':
if help_command_pos == -1:
help_command_pos = index
index = index + 1
if command_pos > -1 and help_pos > command_pos:
argv = ['help', argv[command_pos]]
if help_command_pos > -1 and command_pos == -1:
argv[help_command_pos] = '--help'
self.options, remainder = self.parser.parse_known_args(argv)
self.configure_logging()
self.interactive_mode = not remainder
self.initialize_app(remainder)
except Exception as err:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception(err)
raise
else:
self.log.error(err)
return 1
if self.interactive_mode:
_argv = [sys.argv[0]]
sys.argv = _argv
return self.interact()
return self.run_subcommand(remainder)
def run_subcommand(self, argv):
subcommand = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = subcommand
cmd = cmd_factory(self, self.options)
try:
self.prepare_to_run_command(cmd)
full_name = (cmd_name
if self.interactive_mode
else ' '.join([self.NAME, cmd_name])
)
cmd_parser = cmd.get_parser(full_name)
return run_command(cmd, cmd_parser, sub_argv)
except SystemExit:
print(_("Try 'neutron help %s' for more information.") %
cmd_name, file=sys.stderr)
raise
except Exception as e:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception("%s", e)
raise
self.log.error("%s", e)
return 1
def authenticate_user(self):
"""Confirm user authentication
Make sure the user has provided all of the authentication
info we need.
"""
cloud_config = os_client_config.OpenStackConfig().get_one_cloud(
cloud=self.options.os_cloud, argparse=self.options,
network_api_version=self.api_version,
verify=not self.options.insecure)
verify, cert = cloud_config.get_requests_verify_args()
# TODO(singhj): Remove dependancy on HTTPClient
# for the case of token-endpoint authentication
# When using token-endpoint authentication legacy
# HTTPClient will be used, otherwise SessionClient
# will be used.
if self.options.os_token and self.options.os_url:
auth = None
auth_session = None
else:
auth = cloud_config.get_auth()
auth_session = session.Session(
auth=auth, verify=verify, cert=cert,
timeout=self.options.http_timeout)
interface = self.options.os_endpoint_type or self.endpoint_type
if interface.endswith('URL'):
interface = interface[:-3]
self.client_manager = clientmanager.ClientManager(
retries=self.options.retries,
raise_errors=False,
session=auth_session,
url=self.options.os_url,
token=self.options.os_token,
region_name=cloud_config.get_region_name(),
api_version=cloud_config.get_api_version('network'),
service_type=cloud_config.get_service_type('network'),
service_name=cloud_config.get_service_name('network'),
endpoint_type=interface,
auth=auth,
insecure=not verify,
log_credentials=True)
return
def initialize_app(self, argv):
"""Global app init bits:
* set up API versions
* validate authentication info
"""
super(NeutronShell, self).initialize_app(argv)
self.api_version = {'network': self.api_version}
# If the user is not asking for help, make sure they
# have given us auth.
cmd_name = None
if argv:
cmd_info = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = cmd_info
if self.interactive_mode or cmd_name != 'help':
self.authenticate_user()
def configure_logging(self):
"""Create logging handlers for any log output."""
root_logger = logging.getLogger('')
# Set up logging to a file
root_logger.setLevel(logging.DEBUG)
# Send higher-level messages to the console via stderr
console = logging.StreamHandler(self.stderr)
console_level = {self.WARNING_LEVEL: logging.WARNING,
self.INFO_LEVEL: logging.INFO,
self.DEBUG_LEVEL: logging.DEBUG,
}.get(self.options.verbose_level, logging.DEBUG)
# The default log level is INFO, in this situation, set the
# log level of the console to WARNING, to avoid displaying
# useless messages. This equals using "--quiet"
if console_level == logging.INFO:
console.setLevel(logging.WARNING)
else:
console.setLevel(console_level)
if logging.DEBUG == console_level:
formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)
else:
formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)
logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
console.setFormatter(formatter)
root_logger.addHandler(console)
return
def main(argv=sys.argv[1:]):
try:
return NeutronShell(NEUTRON_API_VERSION).run(
list(map(encodeutils.safe_decode, argv)))
except KeyboardInterrupt:
print(_("... terminating neutron client"), file=sys.stderr)
return 130
except exc.NeutronClientException:
return 1
except Exception as e:
print(e)
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| apache-2.0 | 2,772,146,299,183,681,000 | 40.459621 | 79 | 0.645897 | false | 3.872241 | false | false | false |
mwrlabs/veripy | contrib/rfc3633/dr/renew_message.py | 1 | 1596 | from contrib.rfc3315.constants import *
from contrib.rfc3633.dhcpv6_pd import DHCPv6PDHelper
from scapy.all import *
from veripy.assertions import *
class RenewMessageTestCase(DHCPv6PDHelper):
"""
Requesting Router Initiated: Renew Message
Verify that a device can properly interoperate while using DHCPv6-PD
@private
Source: IPv6 Ready DHCPv6 Interoperability Test Suite (Section 4.2)
"""
def run(self):
prefix, p = self.do_dhcpv6_pd_handshake_as_client(self.target(1), self.node(1))
self.logger.info("Acquired the prefix %s from the DR (T1=%d)." % (prefix, p[DHCP6OptIA_PD].T1))
for i in range(0, 2):
self.ui.wait(p[DHCP6OptIA_PD].T1)
self.node(1).clear_received()
self.logger.info("Sending a DHCPv6 Renew message...")
self.node(1).send(
IPv6(src=str(self.node(1).link_local_ip()), dst=str(AllDHCPv6RelayAgentsAndServers))/
UDP(sport=DHCPv6SourcePort, dport=DHCPv6DestPort)/
self.build_dhcpv6_pd_renew(p, self.target(1), self.node(1)))
self.logger.info("Checking for a DHCPv6 Reply message...")
r1 = self.node(1).received(src=self.target(1).link_local_ip(), type=DHCP6_Reply)
assertEqual(1, len(r1), "expected to receive a DHCPv6 Reply message")
assertHasLayer(DHCP6OptIA_PD, r1[0], "expected the DHCPv6 Reply to contain an IA for Prefix Delegation")
assertHasLayer(DHCP6OptIAPrefix, r1[0], "expected the DHCPv6 Reply to contain an IA Prefix")
| gpl-3.0 | 802,280,280,625,709,000 | 41.026316 | 116 | 0.644737 | false | 3.270492 | false | false | false |
fr34kyn01535/PyForum | server.py | 1 | 2451 | # coding:utf-8
import os.path
import cherrypy
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
from app import themen,diskussionen,beitraege,login,logout,administration,templates
def error_page(status, message, traceback, version):
return templates.RenderTemplate("error.html",title="Error",status=status,message=message,traceback=traceback,version=version);
cherrypy.config.update({'error_page.default': error_page})
cherrypy.config.update({'error_page.401': error_page})
cherrypy.config.update({'error_page.402': error_page})
cherrypy.config.update({'error_page.403': error_page})
cherrypy.config.update({'error_page.404': error_page})
cherrypy.config.update({'error_page.500': error_page})
def main():
cherrypy.Application.currentDir_s = os.path.dirname(os.path.abspath(__file__))
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': 8082,
})
cherrypy.engine.autoreload.unsubscribe()
cherrypy.engine.timeout_monitor.unsubscribe()
dynamic = {'/': {
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
'tools.sessions.on': True,
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
}};
cherrypy.tree.mount(themen.Request(), '/', dynamic)
cherrypy.tree.mount(diskussionen.Request(), '/diskussionen', dynamic)
cherrypy.tree.mount(beitraege.Request(), '/beitraege', dynamic)
cherrypy.tree.mount(login.Request(), '/login', dynamic)
cherrypy.tree.mount(logout.Request(), '/logout', dynamic)
cherrypy.tree.mount(administration.Request(), '/administration', dynamic)
cherrypy.tree.mount(None, '/js', {'/': {
'tools.gzip.on' : True,
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(cherrypy.Application.currentDir_s, 'js'),
'tools.expires.on' : True,
'tools.expires.secs' : 0
}})
cherrypy.tree.mount(None, '/css', {'/': {
'tools.gzip.on' : True,
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(cherrypy.Application.currentDir_s, 'css'),
'tools.expires.on' : True,
'tools.expires.secs' : 0
}})
cherrypy.tree.mount(None, '/fonts', {'/': {
'tools.gzip.on' : True,
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(cherrypy.Application.currentDir_s, 'fonts'),
'tools.expires.on' : True,
'tools.expires.secs' : 0
}})
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == '__main__':
main()
| gpl-2.0 | 7,957,577,721,202,830,000 | 30.423077 | 130 | 0.684619 | false | 2.953012 | true | false | false |
CauldronDevelopmentLLC/buildbot | buildbot/status/web/slaves.py | 1 | 6579 |
import time, urllib
from twisted.python import log
from twisted.web import html
from twisted.web.util import Redirect
from buildbot.status.web.base import HtmlResource, abbreviate_age, OneLineMixin, path_to_slave
from buildbot import version, util
# /buildslaves/$slavename
class OneBuildSlaveResource(HtmlResource, OneLineMixin):
addSlash = False
def __init__(self, slavename):
HtmlResource.__init__(self)
self.slavename = slavename
def getTitle(self, req):
return "Buildbot: %s" % html.escape(self.slavename)
def getChild(self, path, req):
if path == "shutdown":
s = self.getStatus(req)
slave = s.getSlave(self.slavename)
slave.setGraceful(True)
return Redirect(path_to_slave(req, slave))
def body(self, req):
s = self.getStatus(req)
slave = s.getSlave(self.slavename)
my_builders = []
for bname in s.getBuilderNames():
b = s.getBuilder(bname)
for bs in b.getSlaves():
slavename = bs.getName()
if bs.getName() == self.slavename:
my_builders.append(b)
# Current builds
current_builds = []
for b in my_builders:
for cb in b.getCurrentBuilds():
if cb.getSlavename() == self.slavename:
current_builds.append(cb)
data = []
projectName = s.getProjectName()
data.append("<a href=\"%s\">%s</a>\n" % (self.path_to_root(req), projectName))
data.append("<h1>Build Slave: %s</h1>\n" % html.escape(self.slavename))
shutdown_url = req.childLink("shutdown")
if not slave.isConnected():
data.append("<h2>NOT CONNECTED</h2>\n")
elif not slave.getGraceful():
data.append('''<form method="POST" action="%s">
<input type="submit" value="Gracefully Shutdown">
</form>''' % shutdown_url)
else:
data.append("Gracefully shutting down...\n")
if current_builds:
data.append("<h2>Currently building:</h2>\n")
data.append("<ul>\n")
for build in current_builds:
data.append("<li>%s</li>\n" % self.make_line(req, build, True))
data.append("</ul>\n")
else:
data.append("<h2>no current builds</h2>\n")
# Recent builds
data.append("<h2>Recent builds:</h2>\n")
data.append("<ul>\n")
n = 0
try:
max_builds = int(req.args.get('builds')[0])
except:
max_builds = 10
for build in s.generateFinishedBuilds(builders=[b.getName() for b in my_builders]):
if build.getSlavename() == self.slavename:
n += 1
data.append("<li>%s</li>\n" % self.make_line(req, build, True))
if n > max_builds:
break
data.append("</ul>\n")
projectURL = s.getProjectURL()
projectName = s.getProjectName()
data.append('<hr /><div class="footer">\n')
welcomeurl = self.path_to_root(req) + "index.html"
data.append("[<a href=\"%s\">welcome</a>]\n" % welcomeurl)
data.append("<br />\n")
data.append('<a href="http://buildbot.sourceforge.net/">Buildbot</a>')
data.append("-%s " % version)
if projectName:
data.append("working for the ")
if projectURL:
data.append("<a href=\"%s\">%s</a> project." % (projectURL,
projectName))
else:
data.append("%s project." % projectName)
data.append("<br />\n")
data.append("Page built: " +
time.strftime("%a %d %b %Y %H:%M:%S",
time.localtime(util.now()))
+ "\n")
data.append("</div>\n")
return "".join(data)
# /buildslaves
class BuildSlavesResource(HtmlResource):
title = "BuildSlaves"
addSlash = True
def body(self, req):
s = self.getStatus(req)
data = ""
data += "<h1>Build Slaves</h1>\n"
used_by_builder = {}
for bname in s.getBuilderNames():
b = s.getBuilder(bname)
for bs in b.getSlaves():
slavename = bs.getName()
if slavename not in used_by_builder:
used_by_builder[slavename] = []
used_by_builder[slavename].append(bname)
data += "<ol>\n"
for name in util.naturalSort(s.getSlaveNames()):
slave = s.getSlave(name)
slave_status = s.botmaster.slaves[name].slave_status
isBusy = len(slave_status.getRunningBuilds())
data += " <li><a href=\"%s\">%s</a>:\n" % (req.childLink(urllib.quote(name,'')), name)
data += " <ul>\n"
builder_links = ['<a href="%s">%s</a>'
% (req.childLink("../builders/%s" % bname),bname)
for bname in used_by_builder.get(name, [])]
if builder_links:
data += (" <li>Used by Builders: %s</li>\n" %
", ".join(builder_links))
else:
data += " <li>Not used by any Builders</li>\n"
if slave.isConnected():
data += " <li>Slave is currently connected</li>\n"
admin = slave.getAdmin()
if admin:
# munge it to avoid feeding the spambot harvesters
admin = admin.replace("@", " -at- ")
data += " <li>Admin: %s</li>\n" % admin
last = slave.lastMessageReceived()
if last:
lt = time.strftime("%Y-%b-%d %H:%M:%S",
time.localtime(last))
age = abbreviate_age(time.time() - last)
data += " <li>Last heard from: %s " % age
data += '<font size="-1">(%s)</font>' % lt
data += "</li>\n"
if isBusy:
data += "<li>Slave is currently building.</li>"
else:
data += "<li>Slave is idle.</li>"
else:
data += " <li><b>Slave is NOT currently connected</b></li>\n"
data += " </ul>\n"
data += " </li>\n"
data += "\n"
data += "</ol>\n"
return data
def getChild(self, path, req):
return OneBuildSlaveResource(path)
| gpl-2.0 | 1,440,367,016,623,047,700 | 35.348066 | 98 | 0.495212 | false | 3.789747 | false | false | false |
chop-dbhi/varify-data-warehouse | vdw/assessments/migrations/0005_copy_categories.py | 1 | 21123 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.core.management import call_command
class Migration(DataMigration):
def forwards(self, orm):
call_command("loaddata", "assessment_categories.json")
for assessment in orm.Assessment.objects.all():
if assessment.category:
ac = orm.AssessmentCategory.objects.get(pk=assessment.category.id)
assessment.assessment_category = ac
assessment.save()
def backwards(self, orm):
call_command("loaddata", "categories.json")
for assessment in orm.Assessment.objects.all():
if assessment.assessment_category:
c = orm.Category.objects.get(pk=assessment.assessment_category.id)
assessment.category = c
assessment.save()
models = {
'assessments.assessment': {
'Meta': {'object_name': 'Assessment', 'db_table': "'assessment'"},
'assessment_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.AssessmentCategory']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.Category']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'evidence_details': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'father_result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'father'", 'to': "orm['assessments.ParentalResult']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mother_result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mother'", 'to': "orm['assessments.ParentalResult']"}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pathogenicity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.Pathogenicity']"}),
'sample_result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Result']"}),
'sanger_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sanger_result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.SangerResult']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'assessments.assessmentcategory': {
'Meta': {'object_name': 'AssessmentCategory', 'db_table': "'assessment_category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.category': {
'Meta': {'object_name': 'Category', 'db_table': "'category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.parentalresult': {
'Meta': {'object_name': 'ParentalResult', 'db_table': "'parental_result'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.pathogenicity': {
'Meta': {'object_name': 'Pathogenicity', 'db_table': "'pathogenicity'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.sangerresult': {
'Meta': {'object_name': 'SangerResult', 'db_table': "'sanger_result'"},
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'genome.chromosome': {
'Meta': {'ordering': "['order']", 'object_name': 'Chromosome', 'db_table': "'chromosome'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'})
},
'genome.genotype': {
'Meta': {'object_name': 'Genotype', 'db_table': "'genotype'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'literature.pubmed': {
'Meta': {'object_name': 'PubMed', 'db_table': "'pubmed'"},
'pmid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'phenotypes.phenotype': {
'Meta': {'object_name': 'Phenotype', 'db_table': "'phenotype'"},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hpo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1000'})
},
'samples.batch': {
'Meta': {'ordering': "('project', 'label')", 'unique_together': "(('project', 'name'),)", 'object_name': 'Batch', 'db_table': "'batch'"},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batches'", 'to': "orm['samples.Project']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'samples.person': {
'Meta': {'object_name': 'Person', 'db_table': "'person'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mrn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'proband': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Person']", 'through': "orm['samples.Relation']", 'symmetrical': 'False'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'samples.project': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'Project', 'db_table': "'project'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'samples.relation': {
'Meta': {'ordering': "('person', '-generation')", 'object_name': 'Relation', 'db_table': "'relation'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'generation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'family'", 'to': "orm['samples.Person']"}),
'relative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relative_of'", 'to': "orm['samples.Person']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'samples.result': {
'Meta': {'unique_together': "(('sample', 'variant'),)", 'object_name': 'Result', 'db_table': "'sample_result'"},
'base_counts': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'baseq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'coverage_alt': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'coverage_ref': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'downsampling': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fisher_strand': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genotype']", 'null': 'True', 'blank': 'True'}),
'genotype_quality': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'haplotype_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'homopolymer_run': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_dbsnp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mq': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq0': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phred_scaled_likelihood': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'quality': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'quality_by_depth': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'raw_read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'read_pos_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sample': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['samples.Sample']"}),
'spanning_deletions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'strand_bias': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'samples.sample': {
'Meta': {'ordering': "('project', 'batch', 'label')", 'unique_together': "(('batch', 'name', 'version'),)", 'object_name': 'Sample', 'db_table': "'sample'"},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Batch']"}),
'bio_sample': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'samples'", 'null': 'True', 'to': "orm['samples.Person']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Project']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.IntegerField', [], {})
},
'variants.variant': {
'Meta': {'unique_together': "(('chr', 'pos', 'ref', 'alt'),)", 'object_name': 'Variant', 'db_table': "'variant'"},
'alt': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'db_table': "'variant_pubmed'", 'symmetrical': 'False'}),
'chr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Chromosome']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'liftover': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'phenotypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['phenotypes.Phenotype']", 'through': "orm['variants.VariantPhenotype']", 'symmetrical': 'False'}),
'pos': ('django.db.models.fields.IntegerField', [], {}),
'ref': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'rsid': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.VariantType']", 'null': 'True'})
},
'variants.variantphenotype': {
'Meta': {'object_name': 'VariantPhenotype', 'db_table': "'variant_phenotype'"},
'hgmd_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phenotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['phenotypes.Phenotype']"}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variant_phenotypes'", 'to': "orm['variants.Variant']"})
},
'variants.varianttype': {
'Meta': {'ordering': "['order']", 'object_name': 'VariantType', 'db_table': "'variant_type'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['assessments']
symmetrical = True
| bsd-2-clause | 3,351,375,113,810,633,000 | 80.555985 | 192 | 0.546561 | false | 3.658929 | false | false | false |
watchtower/asynctest | asynctest/__init__.py | 1 | 2295 | import functools
class TestStatus: # FIXME should really be an Enum
pending = -1
failure = 0
success = 1
class Test:
def __init__(self, func, description):
self.func = func
self.description = description
self.status = TestStatus.pending
self._callback = None
self.manager = None
def callback(self, f):
self._callback = f
return f
def success(self):
if self.status == TestStatus.pending:
self.status = TestStatus.success
self.manager._test_complete(self)
def failure(self):
if self.status == TestStatus.pending:
self.status = TestStatus.failure
self.manager._test_complete(self)
def succeed_if(self, condition):
if condition:
self.success()
else:
self.failure()
def __call__(self):
if self.func is not None:
self.func()
if self._callback:
self._callback()
class test:
def __init__(self, description):
self.description = description
def __call__(self, f):
return Test(f, self.description)
class TestManager:
def __init__(self, tests):
self.tests = tests
self.test_status = []
if any(not isinstance(i, Test) for i in self.tests):
raise TypeError("Non-test passed to TestManager")
for t in self.tests:
t.manager = self
def add_test(self, t):
if not isinstance(t, Test):
raise TypeError("Non-test passed to TestManager")
t.manager = self
def _all_tests_complete(self):
print("{} tests complete.".format(len(self.tests)))
success = len([t for t in self.tests if t.status])
self.successes = success
print("There were {} successes, {} failures.".format(success, len(self.tests) - success))
def _test_complete(self, t):
self.test_status.append((t.description, t.status))
print("{}: {}".format(t.description, "success" if t.status else "failure"))
if len(self.test_status) == len(self.tests):
self._all_tests_complete()
def run_all(self):
for t in self.tests:
t()
return sum([t.status == TestStatus.failure for t in self.tests])
| mit | 2,558,572,069,989,067,300 | 27.6875 | 97 | 0.578214 | false | 4.069149 | true | false | false |
linktlh/Toontown-journey | toontown/distributed/HoodMgr.py | 1 | 13166 | from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from pandac.PandaModules import *
import random
from toontown.hood import ZoneUtil
from toontown.toonbase import ToontownGlobals
class HoodMgr(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('HoodMgr')
ToontownCentralInitialDropPoints = (
[-90.7, -60, 0.025, 102.575, 0, 0],
[-91.4, -40.5, -3.948, 125.763, 0, 0],
[-107.8, -17.8, -1.937, 149.456, 0, 0],
[-108.7, 12.8, -1.767, 158.756, 0, 0],
[-42.1, -22.8, -1.328, -248.1, 0, 0],
[-35.2, -60.2, 0.025, -265.639, 0, 0]
)
ToontownCentralHQDropPoints = (
[-43.5, 42.6, -0.55, -100.454, 0, 0],
[-53.0, 12.5, -2.948, 281.502, 0, 0],
[-40.3, -18.5, -0.913, -56.674, 0, 0],
[-1.9, -37.0, 0.025, -23.43, 0, 0],
[1.9, -5.9, 4, -37.941, 0, 0]
)
ToontownCentralTunnelDropPoints = (
[-28.3, 40.1, 0.25, 17.25, 0, 0],
[-63.75, 58.96, -0.5, -23.75, 0, 0],
[-106.93, 17.66, -2.2, 99, 0, 0],
[-116.0, -21.5, -0.038, 50, 0, 0],
[74.88, -115, 2.53, -224.41, 0, 0],
[30.488, -101.5, 2.53, -179.23, 0, 0]
)
dropPoints = {
ToontownGlobals.DonaldsDock: (
[-28, -2.5, 5.8, 120, 0, 0],
[-22, 13, 5.8, 155.6, 0, 0],
[67, 47, 5.7, 134.7, 0, 0],
[62, 19, 5.7, 97, 0, 0],
[66, -27, 5.7, 80.5, 0, 0],
[-114, -7, 5.7, -97, 0, 0],
[-108, 36, 5.7, -153.8, 0, 0],
[-116, -46, 5.7, -70.1, 0, 0],
[-63, -79, 5.7, -41.2, 0, 0],
[-2, -79, 5.7, 57.4, 0, 0],
[-38, -78, 5.7, 9.1, 0, 0]
),
ToontownGlobals.ToontownCentral: (
[-60, -8, 1.3, -90, 0, 0],
[-66, -9, 1.3, -274, 0, 0],
[17, -28, 4.1, -44, 0, 0],
[87.7, -22, 4, 66, 0, 0],
[-9.6, 61.1, 0, 132, 0, 0],
[-109.0, -2.5, -1.656, -90, 0, 0],
[-35.4, -81.3, 0.5, -4, 0, 0],
[-103, 72, 0, -141, 0, 0],
[93.5, -148.4, 2.5, 43, 0, 0],
[25, 123.4, 2.55, 272, 0, 0],
[48, 39, 4, 201, 0, 0],
[-80, -61, 0.1, -265, 0, 0],
[-46.875, 43.68, -1.05, 124, 0, 0],
[34, -105, 2.55, 45, 0, 0],
[16, -75, 2.55, 56, 0, 0],
[-27, -56, 0.1, 45, 0, 0],
[100, 27, 4.1, 150, 0, 0],
[-70, 4.6, -1.9, 90, 0, 0],
[-130.7, 50, 0.55, -111, 0, 0]
),
ToontownGlobals.TheBrrrgh: (
[35, -32, 6.2, 138, 0, 0],
[26, -105, 6.2, -339, 0, 0],
[-29, -139, 6.2, -385, 0, 0],
[-79, -123, 6.2, -369, 0, 0],
[-114, -86, 3, -54, 0, 0],
[-136, 9, 6.2, -125, 0, 0],
[-75, 92, 6.2, -187, 0, 0],
[-7, 75, 6.2, -187, 0, 0],
[-106, -42, 8.6, -111, 0, 0],
[-116, -44, 8.3, -20, 0, 0]
),
ToontownGlobals.MinniesMelodyland: (
[86, 44, -13.5, 121.1, 0, 0],
[88, -8, -13.5, 91, 0, 0],
[92, -76, -13.5, 62.5, 0, 0],
[53, -112, 6.5, 65.8, 0, 0],
[-69, -71, 6.5, -67.2, 0, 0],
[-75, 21, 6.5, -100.9, 0, 0],
[-21, 72, 6.5, -129.5, 0, 0],
[56, 72, 6.5, 138.2, 0, 0],
[-41, 47, 6.5, -98.9, 0, 0]
),
ToontownGlobals.DaisyGardens: (
[0, 0, 0, -10.5, 0, 0],
[76, 35, 0, -30.2, 0, 0],
[97, 106, 0, 51.4, 0, 0],
[51, 180, 10, 22.6, 0, 0],
[-14, 203, 10, 85.6, 0, 0],
[-58, 158, 10, -146.9, 0, 0],
[-86, 128, 0, -178.9, 0, 0],
[-64, 65, 0, 17.7, 0, 0],
[-13, 39, 0, -15.7, 0, 0],
[-12, 193, 0, -112.4, 0, 0],
[87, 128, 0, 45.4, 0, 0]
),
ToontownGlobals.DonaldsDreamland: (
[77, 91, 0, 124.4, 0, 0],
[29, 92, 0, -154.5, 0, 0],
[-28, 49, -16.4, -142, 0, 0],
[21, 40, -16, -65.1, 0, 0],
[48, 27, -15.4, -161, 0, 0],
[-2, -22, -15.2, -132.1, 0, 0],
[-92, -88, 0, -116.3, 0, 0],
[-56, -93, 0, -21.5, 0, 0],
[20, -88, 0, -123.4, 0, 0],
[76, -90, 0, 11, 0, 0]
),
ToontownGlobals.GoofySpeedway: (
[-0.7, 62, 0.08, 182, 0, 0],
[-1, -30, 0.06, 183, 0, 0],
[-13, -120, 0, 307, 0, 0],
[16.4, -120, 0, 65, 0, 0],
[-0.5, -90, 0, 182, 0, 0],
[-30, -25, -0.373, 326, 0, 0],
[29, -17, -0.373, 32, 0, 0]
),
ToontownGlobals.GolfZone: (
[-49.6, 102, 0, 162, 0, 0],
[-22.8, 36.6, 0, 157.5, 0, 0],
[40, 51, 0, 185, 0, 0],
[48.3, 122.2, 0, 192, 0, 0],
[106.3, 69.2, 0, 133, 0, 0],
[-81.5, 47.2, 0, 183, 0, 0],
[-80.5, -84.2, 0, 284, 0, 0],
[73, -111, 0, 354, 0, 0]
),
ToontownGlobals.OutdoorZone: (
[-165.8, 108, 0.025, 252, 0, 0],
[21, 130, 0.16, 170, 0, 0],
[93, 78.5, 0.23, 112, 0, 0],
[79, -1.6, 0.75, 163, 0, 0],
[10, 33, 5.32, 130.379, 0, 0],
[-200, -42, 0.025, 317.543, 0, 0],
[-21, -65, 0.335, -18, 0, 0],
[23, 68.5, 4.51, -22.808, 0, 0]
),
ToontownGlobals.Tutorial: (
[130.9, -8.6, -1.3, 105.5, 0, 0],
),
ToontownGlobals.SellbotHQ: (
[-15.1324, -197.522, -19.5944, 4.92024, 0, 0],
[35.9713, -193.266, -19.5944, 4.38194, 0, 0],
[136.858, -155.959, -0.139187, 88.4705, 0, 0],
[0.2818, -281.656, 0.883273, 355.735, 0, 0],
[53.7832, -160.498, -4.33266, 397.602, 0, 0],
[-55.1619, -184.358, -3.06033, 342.677, 0, 0]
),
ToontownGlobals.CashbotHQ: (
[102, -437, -23.439, 0, 0, 0],
[124, -437, -23.439, 0, 0, 0],
[110, -446, -23.439, 0, 0, 0],
[132, -446, -23.439, 0, 0, 0]
),
ToontownGlobals.LawbotHQ: (
[77.5, 129.13, -68.4, -166.6, 0, 0],
[-57.7, 80.75, -68.4, -139.2, 0, 0],
[203.3, 46.36, -68.4, -213.37, 0, 0],
[88.2, -336.52, -68.4, -720.4, 0, 0],
[232.77, -305.33, -68.4, -651, 0, 0],
[-20.16, -345.76, -68.4, -777.98, 0, 0]
),
ToontownGlobals.BossbotHQ: (
[65, 45, 0.025, 0, 0, 0],
[-0.045, 125.9, 0.025, 558, 0, 0],
[138,110, 0.025, 497, 0, 0],
[172, 3, 0.025, 791, 0, 0]
)
}
DefaultDropPoint = [0, 0, 0, 0, 0, 0]
hoodName2Id = {
'dd': ToontownGlobals.DonaldsDock,
'tt': ToontownGlobals.ToontownCentral,
'br': ToontownGlobals.TheBrrrgh,
'mm': ToontownGlobals.MinniesMelodyland,
'dg': ToontownGlobals.DaisyGardens,
'oz': ToontownGlobals.OutdoorZone,
'ff': ToontownGlobals.FunnyFarm,
'gs': ToontownGlobals.GoofySpeedway,
'dl': ToontownGlobals.DonaldsDreamland,
'bosshq': ToontownGlobals.BossbotHQ,
'sellhq': ToontownGlobals.SellbotHQ,
'cashhq': ToontownGlobals.CashbotHQ,
'lawhq': ToontownGlobals.LawbotHQ,
'gz': ToontownGlobals.GolfZone
}
hoodId2Name = {
ToontownGlobals.DonaldsDock: 'dd',
ToontownGlobals.ToontownCentral: 'tt',
ToontownGlobals.Tutorial: 'tt',
ToontownGlobals.TheBrrrgh: 'br',
ToontownGlobals.MinniesMelodyland: 'mm',
ToontownGlobals.DaisyGardens: 'dg',
ToontownGlobals.OutdoorZone: 'oz',
ToontownGlobals.FunnyFarm: 'ff',
ToontownGlobals.GoofySpeedway: 'gs',
ToontownGlobals.DonaldsDreamland: 'dl',
ToontownGlobals.BossbotHQ: 'bosshq',
ToontownGlobals.SellbotHQ: 'sellhq',
ToontownGlobals.CashbotHQ: 'cashhq',
ToontownGlobals.LawbotHQ: 'lawhq',
ToontownGlobals.GolfZone: 'gz'
}
dbgDropMode = 0
currentDropPoint = 0
def __init__(self, cr):
self.cr = cr
def getDropPoint(self, dropPointList):
if self.dbgDropMode == 0:
return random.choice(dropPointList)
else:
droppnt = self.currentDropPoint % len(dropPointList)
self.currentDropPoint = (self.currentDropPoint + 1) % len(dropPointList)
return dropPointList[droppnt]
def getAvailableZones(self):
if base.launcher == None:
return self.getZonesInPhase(4) + self.getZonesInPhase(6) + self.getZonesInPhase(8) + self.getZonesInPhase(9) + self.getZonesInPhase(10) + self.getZonesInPhase(11) + self.getZonesInPhase(12) + self.getZonesInPhase(13)
else:
zones = []
for phase in set(ToontownGlobals.phaseMap.values()):
if base.launcher.getPhaseComplete(phase):
zones = zones + self.getZonesInPhase(phase)
return zones
def getZonesInPhase(self, phase):
p = []
for i in ToontownGlobals.phaseMap.items():
if i[1] == phase:
p.append(i[0])
return p
def getPhaseFromHood(self, hoodId):
hoodId = ZoneUtil.getCanonicalHoodId(hoodId)
return ToontownGlobals.phaseMap[hoodId]
def getPlaygroundCenterFromId(self, hoodId):
dropPointList = self.dropPoints.get(hoodId, None)
if dropPointList:
return self.getDropPoint(dropPointList)
else:
self.notify.warning('getPlaygroundCenterFromId: No such hood name as: ' + str(hoodId))
return self.DefaultDropPoint
def getIdFromName(self, hoodName):
id = self.hoodName2Id.get(hoodName)
if id:
return id
else:
self.notify.error('No such hood name as: %s' % hoodName)
def getNameFromId(self, hoodId):
name = self.hoodId2Name.get(hoodId)
if name:
return name
else:
self.notify.error('No such hood id as: %s' % hoodId)
def getFullnameFromId(self, hoodId):
hoodId = ZoneUtil.getCanonicalZoneId(hoodId)
return ToontownGlobals.hoodNameMap[hoodId][-1]
def addLinkTunnelHooks(self, hoodPart, nodeList, currentZoneId):
tunnelOriginList = []
for i in nodeList:
linkTunnelNPC = i.findAllMatches('**/linktunnel*')
for p in xrange(linkTunnelNPC.getNumPaths()):
linkTunnel = linkTunnelNPC.getPath(p)
name = linkTunnel.getName()
nameParts = name.split('_')
hoodStr = nameParts[1]
zoneStr = nameParts[2]
hoodId = self.getIdFromName(hoodStr)
zoneId = int(zoneStr)
hoodId = ZoneUtil.getTrueZoneId(hoodId, currentZoneId)
zoneId = ZoneUtil.getTrueZoneId(zoneId, currentZoneId)
linkSphere = linkTunnel.find('**/tunnel_trigger')
if linkSphere.isEmpty():
linkSphere = linkTunnel.find('**/tunnel_sphere')
if not linkSphere.isEmpty():
cnode = linkSphere.node()
cnode.setName('tunnel_trigger_' + hoodStr + '_' + zoneStr)
cnode.setCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.GhostBitmask)
else:
linkSphere = linkTunnel.find('**/tunnel_trigger_' + hoodStr + '_' + zoneStr)
if linkSphere.isEmpty():
self.notify.error('tunnel_trigger not found')
tunnelOrigin = linkTunnel.find('**/tunnel_origin')
if tunnelOrigin.isEmpty():
self.notify.error('tunnel_origin not found')
tunnelOriginPlaceHolder = render.attachNewNode('toph_' + hoodStr + '_' + zoneStr)
tunnelOriginList.append(tunnelOriginPlaceHolder)
tunnelOriginPlaceHolder.setPos(tunnelOrigin.getPos(render))
tunnelOriginPlaceHolder.setHpr(tunnelOrigin.getHpr(render))
hood = base.localAvatar.cr.playGame.hood
if ZoneUtil.tutorialDict:
how = 'teleportIn'
tutorialFlag = 1
else:
how = 'tunnelIn'
tutorialFlag = 0
hoodPart.accept('enter' + linkSphere.getName(), hoodPart.handleEnterTunnel, [{'loader': ZoneUtil.getLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': how,
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None,
'tunnelOrigin': tunnelOriginPlaceHolder,
'tutorial': tutorialFlag}])
return tunnelOriginList
def extractGroupName(self, groupFullName):
return groupFullName.split(':', 1)[0]
def makeLinkTunnelName(self, hoodId, currentZone):
return '**/toph_' + self.getNameFromId(hoodId) + '_' + str(currentZone)
| apache-2.0 | -2,892,375,891,941,455,400 | 39.018237 | 228 | 0.478353 | false | 2.847318 | false | false | false |
shiquanwang/numba | numba/control_flow/cfstats.py | 1 | 4105 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import nodes
from numba.reporting import getpos
class StatementDescr(object):
is_assignment = False
class LoopDescr(object):
def __init__(self, next_block, loop_block):
self.next_block = next_block
self.loop_block = loop_block
self.exceptions = []
class ExceptionDescr(object):
"""Exception handling helper.
entry_point ControlBlock Exception handling entry point
finally_enter ControlBlock Normal finally clause entry point
finally_exit ControlBlock Normal finally clause exit point
"""
def __init__(self, entry_point, finally_enter=None, finally_exit=None):
self.entry_point = entry_point
self.finally_enter = finally_enter
self.finally_exit = finally_exit
class NameAssignment(object):
is_assignment = True
def __init__(self, lhs, rhs, entry, assignment_node, warn_unused=True):
if not hasattr(lhs, 'cf_state'):
lhs.cf_state = set()
if not hasattr(lhs, 'cf_is_null'):
lhs.cf_is_null = False
self.lhs = lhs
self.rhs = rhs
self.assignment_node = assignment_node
self.entry = entry
self.pos = getpos(lhs)
self.refs = set()
self.is_arg = False
self.is_deletion = False
# NOTE: this is imperfect, since it means warnings are disabled for
# *all* definitions in the function...
self.entry.warn_unused = warn_unused
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
def infer_type(self, scope):
return self.rhs.infer_type(scope)
def type_dependencies(self, scope):
return self.rhs.type_dependencies(scope)
class AttributeAssignment(object):
"""
Assignment to some attribute. We need to detect assignments in the
constructor of extension types.
"""
def __init__(self, assmnt):
self.assignment_node = assmnt
self.lhs = assmnt.targets[0]
self.rhs = assmnt.value
class Argument(NameAssignment):
def __init__(self, lhs, rhs, entry):
NameAssignment.__init__(self, lhs, rhs, entry)
self.is_arg = True
class PhiNode(nodes.Node):
def __init__(self, block, variable):
self.block = block
# Unrenamed variable. This will be replaced by the renamed version
self.variable = variable
self.type = None
# self.incoming_blocks = []
# Set of incoming variables
self.incoming = set()
self.phis = set()
self.assignment_node = self
@property
def entry(self):
return self.variable
def add_incoming_block(self, block):
self.incoming_blocks.append(block)
def add(self, block, assmnt):
if assmnt is not self:
self.phis.add((block, assmnt))
def __repr__(self):
lhs = self.variable.name
if self.variable.renamed_name:
lhs = self.variable.unmangled_name
incoming = ", ".join("var(%s, %s)" % (var_in.unmangled_name, var_in.type)
for var_in in self.incoming)
if self.variable.type:
type = str(self.variable.type)
else:
type = ""
return "%s %s = phi(%s)" % (type, lhs, incoming)
def find_incoming(self):
for parent_block in self.block.parents:
name = self.variable.name
incoming_var = parent_block.symtab.lookup_most_recent(name)
yield parent_block, incoming_var
class NameDeletion(NameAssignment):
def __init__(self, lhs, entry):
NameAssignment.__init__(self, lhs, lhs, entry)
self.is_deletion = True
class Uninitialized(object):
pass
class NameReference(object):
def __init__(self, node, entry):
if not hasattr(node, 'cf_state'):
node.cf_state = set()
self.node = node
self.entry = entry
self.pos = getpos(node)
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
| bsd-2-clause | -3,426,023,315,412,407,300 | 27.908451 | 81 | 0.607065 | false | 3.872642 | false | false | false |
pope/SublimeYetAnotherCodeSearch | tests/test_csearch.py | 1 | 2481 | import sublime
import os.path
import shutil
import textwrap
import time
import uuid
from YetAnotherCodeSearch.tests import CommandTestCase
_NEEDLE_IN_HAYSTACK = 'cc5b252b-e7fb-5145-bf8a-ed272e3aa7bf'
class CsearchCommandTest(CommandTestCase):
def setUp(self):
super(CsearchCommandTest, self).setUp()
if os.path.isfile(self.index):
return
self.window.run_command('cindex', {'index_project': True})
self._wait_for_status(self.view)
assert os.path.isfile(self.index)
def test_csearch_exists(self):
self.assertIsNotNone(shutil.which('csearch'))
def test_csearch(self):
results_view = self._search(_NEEDLE_IN_HAYSTACK)
expected = textwrap.dedent("""\
Searching for "{0}"
{1}/test_csearch.py:
12: _NEEDLE_IN_HAYSTACK = '{0}'
1 matches across 1 files
""").format(_NEEDLE_IN_HAYSTACK, self.project_path)
actual = results_view.substr(sublime.Region(0, results_view.size()))
self.assertEquals(expected, actual)
def test_csearch_no_matches(self):
query = str(uuid.uuid4())
results_view = self._search(query)
expected = textwrap.dedent("""\
Searching for "{0}"
No matches found
""").format(query, self.project_path)
actual = results_view.substr(sublime.Region(0, results_view.size()))
self.assertEquals(expected, actual)
def test_csearch_go_to_file(self):
results_view = self._search(_NEEDLE_IN_HAYSTACK)
pt = results_view.text_point(3, 10) # Line 4, 10 characters in
results_view.sel().clear()
results_view.sel().add(sublime.Region(pt))
self.window.run_command('code_search_results_go_to_file')
self.assertEquals('{0}/test_csearch.py'.format(self.project_path),
self.window.active_view().file_name())
def _wait_for_status(self, view):
max_iters = 10
while max_iters > 0 and view.get_status('YetAnotherCodeSearch') != '':
time.sleep(0.1)
max_iters -= 1
assert '' == view.get_status('YetAnotherCodeSearch')
def _search(self, query):
self.window.run_command('csearch', {'query': query})
results_view = next((view for view in self.window.views()
if view.name() == 'Code Search Results'))
self._wait_for_status(results_view)
return results_view
| mit | -3,462,537,003,884,839,400 | 32.527027 | 78 | 0.612253 | false | 3.616618 | true | false | false |
awong1900/platformio | platformio/builder/scripts/frameworks/mbed.py | 1 | 7172 | # Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
"""
mbed
The mbed framework The mbed SDK has been designed to provide enough
hardware abstraction to be intuitive and concise, yet powerful enough to
build complex projects. It is built on the low-level ARM CMSIS APIs,
allowing you to code down to the metal if needed. In addition to RTOS,
USB and Networking libraries, a cookbook of hundreds of reusable
peripheral and module libraries have been built on top of the SDK by
the mbed Developer Community.
http://mbed.org/
"""
import re
import xml.etree.ElementTree as ElementTree
from binascii import crc32
from os import walk
from os.path import basename, isfile, join, normpath
from SCons.Script import DefaultEnvironment, Exit
env = DefaultEnvironment()
BOARD_OPTS = env.get("BOARD_OPTIONS", {}).get("build", {})
env.Replace(
PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-mbed")
)
MBED_VARIANTS = {
"stm32f3discovery": "DISCO_F303VC",
"stm32f4discovery": "DISCO_F407VG",
"stm32f429discovery": "DISCO_F429ZI",
"blueboard_lpc11u24": "LPC11U24",
"dipcortexm0": "LPC11U24",
"seeeduinoArchPro": "ARCH_PRO",
"ubloxc027": "UBLOX_C027",
"lpc1114fn28": "LPC1114",
"lpc11u35": "LPC11U35_401",
"mbuino": "LPC11U24",
"nrf51_mkit": "NRF51822",
"seeedTinyBLE": "SEEED_TINY_BLE",
"redBearLab": "RBLAB_NRF51822",
"nrf51-dt": "NRF51_DK",
"redBearLabBLENano": "RBLAB_NRF51822",
"wallBotBLE": "NRF51822",
"frdm_kl25z": "KL25Z",
"frdm_kl46z": "KL46Z",
"frdm_k64f": "K64F",
"frdm_kl05z": "KL05Z",
"frdm_k20d50m": "K20D50M",
"frdm_k22f": "K22F"
}
MBED_LIBS_MAP = {
"dsp": {"ar": ["dsp", "cmsis_dsp"]},
"eth": {"ar": ["eth"], "deps": ["rtos"]},
"fat": {"ar": ["fat"]},
"rtos": {"ar": ["rtos", "rtx"]},
"usb": {"ar": ["USBDevice"]},
"usb_host": {"ar": ["USBHost"]}
}
def get_mbedlib_includes():
result = []
for lib in MBED_LIBS_MAP.keys():
includes = []
lib_dir = join(env.subst("$PLATFORMFW_DIR"), "libs", lib)
for _, _, files in walk(lib_dir):
for libfile in files:
if libfile.endswith(".h"):
includes.append(libfile)
result.append((lib, set(includes)))
return result
def get_used_mbedlibs():
re_includes = re.compile(r"^(#include\s+(?:\<|\")([^\r\n\"]+))",
re.M | re.I)
srcincs = []
for root, _, files in walk(env.get("PROJECTSRC_DIR")):
for pfile in files:
if not any([pfile.endswith(ext) for ext in (".h", ".c", ".cpp")]):
continue
with open(join(root, pfile)) as fp:
srcincs.extend([i[1] for i in re_includes.findall(fp.read())])
srcincs = set(srcincs)
result = {}
for libname, libincs in get_mbedlib_includes():
if libincs & srcincs and libname not in result:
result[libname] = MBED_LIBS_MAP[libname]
return result
def add_mbedlib(libname, libar):
if libar in env.get("LIBS"):
return
lib_dir = join(env.subst("$PLATFORMFW_DIR"), "libs", libname)
if not isfile(join(lib_dir, "TARGET_%s" % variant,
"TOOLCHAIN_GCC_ARM", "lib%s.a" % libar)):
Exit("Error: %s board doesn't support %s library!" %
(env.get("BOARD"), libname))
env.Append(
LIBPATH=[
join(env.subst("$PLATFORMFW_DIR"), "libs", libname,
"TARGET_%s" % variant, "TOOLCHAIN_GCC_ARM")
],
LIBS=[libar]
)
sysincdirs = (
"eth",
"include",
"ipv4",
"lwip-eth",
"lwip-sys"
)
for root, _, files in walk(lib_dir):
if (not any(f.endswith(".h") for f in files) and
basename(root) not in sysincdirs):
continue
var_dir = join("$BUILD_DIR", "FrameworkMbed%sInc%d" %
(libname.upper(), crc32(root)))
if var_dir in env.get("CPPPATH"):
continue
env.VariantDir(var_dir, root)
env.Append(CPPPATH=[var_dir])
def parse_eix_file(filename):
result = {}
paths = (
("CFLAGS", "./Target/Source/CC/Switch"),
("CXXFLAGS", "./Target/Source/CPPC/Switch"),
("CPPDEFINES", "./Target/Source/Symbols/Symbol"),
("FILES", "./Target/Files/File"),
("LINKFLAGS", "./Target/Source/LD/Switch"),
("OBJFILES", "./Target/Source/Addobjects/Addobject"),
("LIBPATH", "./Target/Linker/Librarypaths/Librarypath"),
("STDLIBS", "./Target/Source/Syslibs/Library"),
("LDSCRIPT_PATH", "./Target/Source/Scriptfile"),
("CPPPATH", "./Target/Compiler/Includepaths/Includepath")
)
tree = ElementTree.parse(filename)
for (key, path) in paths:
if key not in result:
result[key] = []
for node in tree.findall(path):
_nkeys = node.keys()
result[key].append(
node.get(_nkeys[0]) if len(_nkeys) == 1 else node.attrib)
return result
def get_build_flags(data):
flags = {}
cflags = set(data.get("CFLAGS", []))
cxxflags = set(data.get("CXXFLAGS", []))
cppflags = set(cflags & cxxflags)
flags['CPPFLAGS'] = list(cppflags)
flags['CXXFLAGS'] = list(cxxflags - cppflags)
flags['CFLAGS'] = list(cflags - cppflags)
return flags
board_type = env.subst("$BOARD")
variant = MBED_VARIANTS[
board_type] if board_type in MBED_VARIANTS else board_type.upper()
eixdata = parse_eix_file(
join(env.subst("$PLATFORMFW_DIR"), "variant", variant, "%s.eix" % variant))
build_flags = get_build_flags(eixdata)
variant_dir = join("$PLATFORMFW_DIR", "variant", variant)
env.Replace(
CPPFLAGS=build_flags.get("CPPFLAGS", []),
CFLAGS=build_flags.get("CFLAGS", []),
CXXFLAGS=build_flags.get("CXXFLAGS", []),
LINKFLAGS=eixdata.get("LINKFLAGS", []),
CPPDEFINES=[define for define in eixdata.get("CPPDEFINES", [])],
LDSCRIPT_PATH=normpath(
join(variant_dir, eixdata.get("LDSCRIPT_PATH")[0]))
)
# Hook for K64F and K22F
if board_type in ("frdm_k22f", "frdm_k64f"):
env.Append(
LINKFLAGS=["-Wl,--start-group"]
)
for lib_path in eixdata.get("CPPPATH"):
_vdir = join("$BUILD_DIR", "FrameworkMbedInc%d" % crc32(lib_path))
env.VariantDir(_vdir, join(variant_dir, lib_path))
env.Append(CPPPATH=[_vdir])
env.Append(
LIBPATH=[join(variant_dir, lib_path)
for lib_path in eixdata.get("LIBPATH", [])
if lib_path.startswith("mbed")]
)
#
# Target: Build mbed Library
#
libs = [l for l in eixdata.get("STDLIBS", []) if l not in env.get("LIBS")]
libs.extend(["mbed", "c", "gcc"])
libs.append(env.Library(
join("$BUILD_DIR", "FrameworkMbed"),
[join(variant_dir, f)
for f in eixdata.get("OBJFILES", [])]
))
env.Append(LIBS=libs)
for _libname, _libdata in get_used_mbedlibs().iteritems():
for _libar in _libdata['ar']:
add_mbedlib(_libname, _libar)
if "deps" not in _libdata:
continue
for libdep in _libdata['deps']:
for _libar in MBED_LIBS_MAP[libdep]['ar']:
add_mbedlib(libdep, _libar)
| mit | -7,872,972,686,760,434,000 | 29.134454 | 79 | 0.591049 | false | 3.108799 | false | false | false |
cosmicAsymmetry/zulip | zerver/tests/test_signup.py | 1 | 51499 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from mock import patch
from zerver.lib.test_helpers import MockLDAP
from confirmation.models import Confirmation
from zilencer.models import Deployment
from zerver.forms import HomepageForm
from zerver.views import do_change_password
from zerver.views.invite import get_invitee_emails_set
from zerver.models import (
get_realm_by_string_id, get_prereg_user_by_email, get_user_profile_by_email,
PreregistrationUser, Realm, RealmAlias, Recipient,
Referral, ScheduledJob, UserProfile, UserMessage,
Stream, Subscription, ScheduledJob
)
from zerver.management.commands.deliver_email import send_email_job
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin
)
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import do_deactivate_realm, do_set_realm_default_language, \
add_new_user_history
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import (
enqueue_welcome_emails, one_click_unsubscribe_link, send_local_email_template_with_delay)
from zerver.lib.test_helpers import find_key_by_email, queries_captured, \
HostRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.session_user import get_session_dict_user
from zerver.context_processors import common_context
import re
import ujson
from six.moves import urllib
from six.moves import range
import six
from typing import Any, Text
import os
class PublicURLTest(ZulipTestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
# type: (str, List[str], int) -> None
for url in urls:
# e.g. self.client_post(url) if method is "post"
response = getattr(self, method)(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
# type: () -> None
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so because this Django test mechanism doesn't go
# through Tornado.
get_urls = {200: ["/accounts/home/", "/accounts/login/"
"/en/accounts/home/", "/ru/accounts/home/",
"/en/accounts/login/", "/ru/accounts/login/",
"/help/"],
302: ["/", "/en/", "/ru/"],
401: ["/json/streams/Denmark/members",
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
"/json/messages",
"/api/v1/streams",
],
404: ["/help/nonexistent"],
}
# Add all files in 'templates/zerver/help' directory (except for 'main.html' and
# 'index.md') to `get_urls['200']` list.
for doc in os.listdir('./templates/zerver/help'):
if doc not in {'main.html', 'index.md', 'include'}:
get_urls[200].append('/help/' + os.path.splitext(doc)[0]) # Strip the extension.
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/messages",
"/json/invite_users",
"/json/settings/change",
"/json/subscriptions/exists",
"/json/subscriptions/property",
"/json/fetch_api_key",
"/json/users/me/pointer",
"/json/users/me/subscriptions",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
put_urls = {401: ["/json/users/me/pointer"],
}
for status_code, url_set in six.iteritems(get_urls):
self.fetch("client_get", url_set, status_code)
for status_code, url_set in six.iteritems(post_urls):
self.fetch("client_post", url_set, status_code)
for status_code, url_set in six.iteritems(put_urls):
self.fetch("client_put", url_set, status_code)
def test_get_gcid_when_not_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('error', data['result'])
def test_get_gcid_when_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(get_realm_by_string_id("zulip"), stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register("test", "test")
user_profile = get_user_profile_by_email("[email protected]")
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message("[email protected]", streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = '[email protected]'
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password.', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = get_user_profile_by_email('[email protected]')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/[email protected]')
self.assert_in_success_response(["Still no email?"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login("[email protected]")
user_profile = get_user_profile_by_email('[email protected]')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login("[email protected]", password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("[email protected]", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm_by_string_id("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[Text, Dict[Text, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
with queries_captured() as queries:
self.register("test", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_max_length(queries, 69)
user_profile = get_user_profile_by_email('[email protected]')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm_by_string_id("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test", "test")
self.assert_in_response("has been deactivated", result)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('[email protected]')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm_by_string_id("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return("[email protected]")
self.assert_in_response("has been deactivated", result)
def test_logout(self):
# type: () -> None
self.login("[email protected]")
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "[email protected]"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client_post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class InviteUserTest(ZulipTestCase):
def invite(self, users, streams):
# type: (str, List[Text]) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invite_users",
{"invitee_emails": users,
"stream": streams})
def check_sent_emails(self, correct_recipients):
# type: (List[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
def test_bulk_invite_users(self):
# type: () -> None
"""The bulk_invite_users code path is for the first user in a realm."""
self.login('[email protected]')
invitees = ['[email protected]', '[email protected]']
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client_post('/json/bulk_invite_users', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("[email protected]")
invitee = "[email protected]"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("[email protected]")
email = "[email protected]"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email])
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("[email protected]")
email = "[email protected]"
email2 = "[email protected]"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login("[email protected]")
user_profile = get_user_profile_by_email("[email protected]")
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe_to_stream(user_profile.email, private_stream_name)
public_msg_id = self.send_message("[email protected]", "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message("[email protected]", private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = "[email protected]"
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user("alice-test", "password")
invitee_profile = get_user_profile_by_email(invitee)
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login("[email protected]")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""[email protected], [email protected],
[email protected]
[email protected]""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%[email protected]" % (user,)))
self.check_sent_emails(["[email protected]", "[email protected]",
"[email protected]", "[email protected]"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("[email protected]")
self.assert_json_error(
self.client_post("/json/invite_users", {"invitee_emails": "[email protected]"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "[email protected]"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login("[email protected]")
self.assert_json_error(self.invite("[email protected]", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("[email protected]")
self.assert_json_error(
self.client_post("/json/invite_users",
{"invitee_emails": "[email protected]",
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="[email protected]"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("[email protected]")
existing = ["[email protected]", "[email protected]"]
new = ["[email protected]", "[email protected]"]
result = self.client_post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('[email protected]')
self.assertEqual(prereg_user.email, '[email protected]')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm_by_string_id("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("[email protected]")
external_address = "[email protected]"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm_by_string_id("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("[email protected]")
external_address = "[email protected]"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("[email protected]")
invitee = "[email protected]"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe_to_stream("[email protected]", stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_refer_friend(self):
# type: () -> None
self.login("[email protected]")
user = get_user_profile_by_email('[email protected]')
user.invites_granted = 1
user.invites_used = 0
user.save()
invitee = "[email protected]"
result = self.client_post('/json/refer_friend', dict(email=invitee))
self.assert_json_success(result)
# verify this works
Referral.objects.get(user_profile=user, email=invitee)
user = get_user_profile_by_email('[email protected]')
self.assertEqual(user.invites_used, 1)
def test_invitation_reminder_email(self):
# type: () -> None
from django.core.mail import outbox
current_user_email = "[email protected]"
self.login(current_user_email)
invitee = "[email protected]"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
data = {"email": invitee, "referrer_email": current_user_email}
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=0),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
email_jobs_to_deliver = ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL,
scheduled_timestamp__lte=datetime.datetime.utcnow())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
self.assertTrue(send_email_job(job))
self.assertEqual(len(outbox), email_count + 1)
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "[email protected]"
self.email2 = "[email protected]"
self.email3 = "[email protected]"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self):
# type: () -> None
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("[email protected]")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="[email protected]")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "[email protected]"
user_profile = get_user_profile_by_email("[email protected]")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "[email protected]"
user_profile = get_user_profile_by_email("[email protected]")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="[email protected]")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
username = "user1"
password = "test"
string_id = "zuliptest"
domain = 'test.com'
email = "[email protected]"
realm = get_realm_by_string_id('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username, password, domain=domain,
realm_subdomain = string_id)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm_by_string_id(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.COMMUNITY)
self.assertEqual(realm.restricted_to_domain, False)
self.assertEqual(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
def test_create_realm_with_subdomain(self):
# type: () -> None
username = "user1"
password = "test"
string_id = "zuliptest"
domain = "test.com"
email = "[email protected]"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm_by_string_id('test'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True), self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username, password, domain=domain,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm_by_string_id(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
def test_mailinator_signup(self):
# type: () -> None
with self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': "[email protected]"})
self.assert_in_response('Please use your real email address.', result)
def test_subdomain_restrictions(self):
# type: () -> None
username = "user1"
password = "test"
domain = "test.com"
email = "[email protected]"
realm_name = "Test"
with self.settings(REALMS_HAVE_SUBDOMAINS=False), self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "at least 3 characters",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'mit': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(username, password, domain = domain,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(username, password, domain = domain,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
class UserSignUpTest(ZulipTestCase):
def test_user_default_language(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
username = "newguy"
email = "[email protected]"
password = "newpassword"
realm = get_realm_by_string_id('zulip')
domain = realm.domain
do_set_realm_default_language(realm, "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s@%s" % (username, domain)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(username, password, domain)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, realm.default_language)
from django.core.mail import outbox
outbox.pop()
def test_unique_completely_open_domain(self):
# type: () -> None
username = "user1"
password = "test"
email = "[email protected]"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm_by_string_id('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
realm = get_realm_by_string_id('mit')
do_deactivate_realm(realm)
realm.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain='acme.com',
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_completely_open_domain_success(self):
# type: () -> None
username = "user1"
password = "test"
email = "[email protected]"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm_by_string_id('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain='acme.com',
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm_by_string_id('zulip')
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': '[email protected]'}, realm=realm)
self.assertIn("trying to join, zulip, only allows users with e-mail", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm_by_string_id('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': '[email protected]'}, realm=realm)
self.assertIn("Please request an invite from", form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
form = HomepageForm({'email': '[email protected]'}, realm=None)
self.assertIn("organization you are trying to join does not exist", form.errors['email'][0])
def test_registration_through_ldap(self):
# type: () -> None
username = "newuser"
password = "testing"
domain = "zulip.com"
email = "[email protected]"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New User Name",
"[email protected]"],
result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"[email protected]"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
username = "sipbtest"
password = "test"
domain = "mit.edu"
email = "[email protected]"
subdomain = "sipb"
realm_name = "MIT"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = '[email protected]'
self.login(email)
user = get_user_profile_by_email('[email protected]')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = get_user_profile_by_email('[email protected]')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = '[email protected]'
self.login(email)
user = get_user_profile_by_email('[email protected]')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator")
user = get_user_profile_by_email('[email protected]')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = '[email protected]'
user_2 = get_user_profile_by_email('[email protected]')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
| apache-2.0 | -5,722,122,378,230,343,000 | 42.559222 | 114 | 0.574786 | false | 4.121268 | true | false | false |
rlpy/rlpy | rlpy/Representations/LocalBases.py | 1 | 7491 | """
Representations which use local bases function (e.g. kernels) distributed
in the statespace according to some scheme (e.g. grid, random, on previous
samples)
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import super
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.utils import old_div
from .Representation import Representation
import numpy as np
from rlpy.Tools.GeneralTools import addNewElementForAllActions
import matplotlib.pyplot as plt
try:
from .kernels import batch
except ImportError:
from .slow_kernels import batch
print("C-Extensions for kernels not available, expect slow runtime")
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
class LocalBases(Representation):
"""
abstract base class for representations that use local basis functions
"""
#: centers of bases
centers = None
#: widths of bases
widths = None
def __init__(self, domain, kernel, normalization=False, seed=1, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param normalization: (Boolean) If true, normalize feature vector so
that sum( phi(s) ) = 1.
Associates a kernel function with each
"""
self.kernel = batch[kernel.__name__]
self.normalization = normalization
self.centers = np.zeros((0, domain.statespace_limits.shape[0]))
self.widths = np.zeros((0, domain.statespace_limits.shape[0]))
super(LocalBases, self).__init__(domain, seed=seed)
def phi_nonTerminal(self, s):
v = self.kernel(s, self.centers, self.widths)
if self.normalization and not v.sum() == 0.:
# normalize such that each vector has a l1 norm of 1
v /= v.sum()
return v
def plot_2d_feature_centers(self, d1=None, d2=None):
"""
:param d1: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
:param d2: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
Phe centers of all features in dimension d1 and d2.
If no dimensions are specified, the first two continuous dimensions
are shown.
"""
if d1 is None and d2 is None:
# just take the first two dimensions
d1, d2 = self.domain.continuous_dims[:2]
plt.figure("Feature Dimensions {} and {}".format(d1, d2))
for i in range(self.centers.shape[0]):
plt.plot([self.centers[i, d1]],
[self.centers[i, d2]], "r", marker="x")
plt.draw()
class NonparametricLocalBases(LocalBases):
def __init__(self, domain, kernel,
max_similarity=0.9, resolution=5, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param max_similarity: threshold to allow feature to be added to
representation. Larger max_similarity makes it \"easier\" to add
more features by permitting larger values of phi(s) before
discarding. (An existing feature function in phi() with large value
at phi(s) implies that it is very representative of the true
function at *s*. i.e., the value of a feature in phi(s) is
inversely related to the \"similarity\" of a potential new feature.
:param resolution: to be used by the ``kernel()`` function, see parent.
Determines *width* of basis functions, eg sigma in Gaussian basis.
"""
self.max_similarity = max_similarity
self.common_width = old_div((domain.statespace_limits[:, 1]
- domain.statespace_limits[:, 0]), resolution)
self.features_num = 0
super(
NonparametricLocalBases,
self).__init__(
domain,
kernel,
**kwargs)
def pre_discover(self, s, terminal, a, sn, terminaln):
norm = self.normalization
expanded = 0
self.normalization = False
if not terminal:
phi_s = self.phi_nonTerminal(s)
if np.all(phi_s < self.max_similarity):
self._add_feature(s)
expanded += 1
if not terminaln:
phi_s = self.phi_nonTerminal(sn)
if np.all(phi_s < self.max_similarity):
self._add_feature(sn)
expanded += 1
self.normalization = norm
return expanded
def _add_feature(self, center):
self.features_num += 1
self.centers = np.vstack((self.centers, center))
self.widths = np.vstack((self.widths, self.common_width))
# TODO if normalized, use Q estimate for center to fill weight_vec
new = np.zeros((self.domain.actions_num, 1))
self.weight_vec = addNewElementForAllActions(
self.weight_vec,
self.domain.actions_num,
new)
class RandomLocalBases(LocalBases):
def __init__(self, domain, kernel, num=100, resolution_min=5,
resolution_max=None, seed=1, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param num: Fixed number of feature (kernel) functions to use in
EACH dimension. (for a total of features_num=numDims * num)
:param resolution_min: resolution selected uniform random, lower bound.
:param resolution_max: resolution selected uniform random, upper bound.
:param seed: the random seed to use when scattering basis functions.
Randomly scatter ``num`` feature functions throughout the domain, with
sigma / noise parameter selected uniform random between
``resolution_min`` and ``resolution_max``. NOTE these are
sensitive to the choice of coordinate (scale with coordinate units).
"""
self.features_num = num
self.dim_widths = (domain.statespace_limits[:, 1]
- domain.statespace_limits[:, 0])
self.resolution_max = resolution_max
self.resolution_min = resolution_min
super(
RandomLocalBases,
self).__init__(
domain,
kernel,
seed=seed,
**kwargs)
self.centers = np.zeros((num, len(self.dim_widths)))
self.widths = np.zeros((num, len(self.dim_widths)))
self.init_randomization()
def init_randomization(self):
for i in range(self.features_num):
for d in range(len(self.dim_widths)):
self.centers[i, d] = self.random_state.uniform(
self.domain.statespace_limits[d, 0],
self.domain.statespace_limits[d, 1])
self.widths[i, d] = self.random_state.uniform(
old_div(self.dim_widths[d], self.resolution_max),
old_div(self.dim_widths[d], self.resolution_min))
| bsd-3-clause | 6,855,771,218,240,773,000 | 39.058824 | 80 | 0.606595 | false | 4.177914 | false | false | false |
kavigupta/61a-analysis | src/analytics.py | 1 | 4920 | """
A module containing a variety of functions for analyzing the data. This is supposed to be more data
specific than statistics.
"""
import numpy as np
from tools import cached_property
def compensate_for_grader_means(evals, z_thresh=1):
"""
Compensates for grader means by subtracting each grader's average grades per problem. Eliminates
individuals for whom the graders are unusual.
"""
if not evals.evaluation_for(list(evals.emails)[0]).means_need_compensation:
return evals
problematic = set(_identify_problematic_ranges(evals, z_thresh))
filt = evals.remove(problematic)
zeroed = filt.zero_meaned()
return zeroed
class ExamPair:
"""
Structure representing a correlation between exam scores, as well as metadata on location.
"""
def __init__(self, first, second, are_time_adjacent, are_space_adjacent, are_same_room):
self.are_time_adjacent = are_time_adjacent
self.first = first
self.second = second
self.are_space_adjacent = are_space_adjacent
self.are_same_room = are_same_room
@cached_property
def correlation(self):
"""
The correlation between the two exam's rubric items
"""
return self.first.correlation(self.second)
@cached_property
def abs_score_diff(self):
"""
The absolute difference between the exam scores
"""
return abs(self.first.score - self.second.score)
def __repr__(self):
return "ExamPair(%s, %s, %r, %r, %r)" % tuple(self)
def __hash__(self):
return hash((hash(self.first) + hash(self.second), tuple(self)[2:]))
def __eq__(self, other):
align = self.first == other.first and self.second == other.second
mis_align = self.first == other.second and self.second == other.first
if not align and not mis_align:
return False
return tuple(self)[2:] == tuple(other)[2:]
def __iter__(self):
return iter((self.first,
self.second,
self.are_time_adjacent,
self.are_space_adjacent,
self.are_same_room))
def all_pairs(graded_exam, seating_chart, time_delta, progress, require_same_room,
require_not_time_adj, adjacency_type):
"""
Yields an iterable of all pairs between individuals.
"""
if require_same_room:
for _, in_room in seating_chart.emails_by_room:
yield from _pairs_per_individual(graded_exam, seating_chart, time_delta, progress,
in_room, True, require_not_time_adj, adjacency_type)
else:
emails = list(graded_exam.emails)
yield from _pairs_per_individual(graded_exam, seating_chart, time_delta, progress,
emails, False, require_not_time_adj, adjacency_type)
def _pairs_per_individual(graded_exam, seating_chart, time_delta, progress, emails, known_same_room,
require_not_time_adj, adjacency_type):
p_bar = progress(len(emails))
for index_x, email_x in enumerate(emails):
p_bar.update(index_x)
if email_x not in graded_exam.emails:
continue
eval_x = graded_exam.evaluation_for(email_x)
if not known_same_room:
room_x = seating_chart.room_for(email_x)
for email_y in emails[index_x+1:]:
if email_y not in graded_exam.emails:
continue
if not known_same_room:
same_room = room_x == seating_chart.room_for(email_y)
else:
same_room = True
time_adjacent = abs(graded_exam.time_diff(email_x, email_y)) <= time_delta
if require_not_time_adj and time_adjacent:
continue
yield ExamPair(eval_x,
graded_exam.evaluation_for(email_y),
time_adjacent,
seating_chart.are_adjacent(email_x, email_y, adjacency_type),
same_room)
def _unusualness(grader, question):
"""
Get the unusualness of a grader with respect to a graded question; i.e., the average of the
z scores from the overall mean for each rubric item.
"""
overall_mean = question.mean_score
overall_std = question.std_score
by_grader = question.for_grader(grader)
return np.mean((np.abs(by_grader.mean_score - overall_mean) / overall_std).rubric_items)
def _identify_problematic_ranges(evals, z_thresh):
"""
Ouptuts an iterable of emails for which at least one grader had an unusualness greater than the
z threshold.
"""
for _, graded_question in evals:
for grader in graded_question.graders:
if _unusualness(grader, graded_question) > z_thresh:
yield from graded_question.for_grader(grader).emails
| gpl-3.0 | -3,319,539,819,622,582,300 | 40.344538 | 100 | 0.607927 | false | 3.796296 | false | false | false |
Vishakha1990/Lambdas | testing/digitalocean/test.py | 1 | 2597 | #!/usr/bin/python
import os, requests, time, json, argparse
API = "https://api.digitalocean.com/v2/droplets"
DROPLET_NAME = "ol-tester"
HEADERS = {
"Authorization": "Bearer "+os.environ['TOKEN'],
"Content-Type": "application/json"
}
def post(args):
r = requests.post(API, data=args, headers=HEADERS)
return r.json()
def get(args):
r = requests.get(API, data=args, headers=HEADERS)
return r.json()
def start():
r = requests.get("https://api.digitalocean.com/v2/account/keys", headers=HEADERS)
keys = map(lambda row: row['id'], r.json()['ssh_keys'])
args = {
"name":DROPLET_NAME,
"region":"nyc2",
"size":"512mb",
"image":"ubuntu-14-04-x64",
"ssh_keys":keys
}
r = requests.post(API, data=json.dumps(args), headers=HEADERS)
return r.json()
def kill():
args = {}
droplets = get(args)['droplets']
for d in droplets:
if d['name'] == DROPLET_NAME:
print 'Deleting %s (%d)' % (d['name'], d['id'])
print requests.delete(API+'/'+str(d['id']), headers=HEADERS)
def lookup(droplet_id):
r = requests.get(API+'/'+str(droplet_id), headers=HEADERS)
return r.json()['droplet']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--quickstart', default=False, action='store_true')
args = parser.parse_args()
global TEST_SCRIPT
if args.quickstart:
TEST_SCRIPT = "qs_test.sh"
else:
TEST_SCRIPT = "test.sh"
# cleanup just in case
kill()
# create new droplet and wait for it
droplet = start()['droplet']
print droplet
while True:
droplet = lookup(droplet['id'])
# status
s = droplet['status']
assert(s in ['active', 'new'])
# addr
ip = None
for addr in droplet["networks"]["v4"]:
if addr["type"] == "public":
ip = addr["ip_address"]
print 'STATUS: %s, IP: %s' % (str(s), str(ip))
if s == 'active' and ip != None:
break
time.sleep(3)
time.sleep(30) # give SSH some time
scp = 'scp -o "StrictHostKeyChecking no" %s root@%s:/tmp' % (TEST_SCRIPT, ip)
print 'RUN ' + scp
rv = os.system(scp)
assert(rv == 0)
cmds = 'bash /tmp/%s' % TEST_SCRIPT
ssh = 'echo "<CMDS>" | ssh -o "StrictHostKeyChecking no" root@<IP>'
ssh = ssh.replace('<CMDS>', cmds).replace('<IP>', ip)
print 'RUN ' + ssh
rv = os.system(ssh)
assert(rv == 0)
# make sure we cleanup everything!
kill()
if __name__ == '__main__':
main()
| apache-2.0 | 2,783,647,885,308,764,000 | 24.97 | 85 | 0.560647 | false | 3.27904 | true | false | false |
yehudagale/fuzzyJoiner | old/TripletLossFacenetLSTM-angular.py | 1 | 21847 | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# import theano as T
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# """
# from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=1
ALPHA=30
USE_GRU=True
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=True
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = True
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(T.tensor.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
"""
# if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=modified_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| epl-1.0 | 8,191,197,515,958,977,000 | 37.395431 | 167 | 0.663249 | false | 3.366256 | true | false | false |
samlaudev/LeetCode | Python/Insert Delete GetRandom O(1) - Duplicates allowed/Solution.py | 1 | 2970 | # Problem: Insert Delete GetRandom O(1) - Duplicates allowed
#
# Design a data structure that supports all following operations in average O(1) time.
#
# Note: Duplicate elements are allowed.
# 1. insert(val): Inserts an item val to the collection.
# 2. remove(val): Removes an item val from the collection if present.
# 3. getRandom: Returns a random element from current collection of elements. The probability of each element being returned is linearly related to the number of same value the collection contains.
#
# Example:
#
# // Init an empty collection.
# RandomizedCollection collection = new RandomizedCollection();
#
# // Inserts 1 to the collection. Returns true as the collection did not contain 1.
# collection.insert(1);
#
# // Inserts another 1 to the collection. Returns false as the collection contained 1. Collection now contains [1,1].
# collection.insert(1);
#
# // Inserts 2 to the collection, returns true. Collection now contains [1,1,2].
# collection.insert(2);
#
# // getRandom should return 1 with the probability 2/3, and returns 2 with the probability 1/3.
# collection.getRandom();
#
# // Removes 1 from the collection, returns true. Collection now contains [1,2].
# collection.remove(1);
#
# // getRandom should return 1 and 2 both equally likely.
# collection.getRandom();
#
################################################################################
from random import randint
from collections import defaultdict
class RandomizedCollection(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.__list = []
self.__used = defaultdict(list)
def insert(self, val):
"""
Inserts a value to the collection. Returns true if the collection did not already contain the specified element.
:type val: int
:rtype: bool
"""
has = val in self.__used
self.__list += val,
self.__used[val] += len(self.__list) - 1,
return not has
def remove(self, val):
"""
Removes a value from the collection. Returns true if the collection contained the specified element.
:type val: int
:rtype: bool
"""
if val not in self.__used:
return False
last = self.__list.pop()
self.__used[last].remove(len(self.__list))
if val != last:
index = self.__used[val].pop()
self.__used[last].append(index)
self.__list[index] = last
if not self.__used[val]:
del self.__used[val]
return True
def getRandom(self):
"""
Get a random element from the collection.
:rtype: int
"""
return self.__list[randint(0, len(self.__list) - 1)]
# Your RandomizedCollection object will be instantiated and called as such:
# obj = RandomizedCollection()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| mit | -2,704,729,365,606,001,700 | 30.595745 | 199 | 0.624916 | false | 4.153846 | false | false | false |
egabancho/invenio | invenio/legacy/search_engine/__init__.py | 1 | 332135 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301,W0703
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import os
import re
import time
import string
import urllib
import urlparse
import zlib
import sys
try:
## import optional module:
import numpy
CFG_NUMPY_IMPORTABLE = True
except ImportError:
CFG_NUMPY_IMPORTABLE = False
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from six import iteritems, string_types
## import Invenio stuff:
from invenio.base.globals import cfg
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_SCOAP3_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_WEBSEARCH_IDXPAIRS_FIELDS,\
CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS, \
CFG_BIBSORT_ENABLED, \
CFG_XAPIAN_ENABLED, \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_BASE_URL, \
CFG_BIBFORMAT_HIDDEN_TAGS
try:
from invenio.config import CFG_BIBSORT_DEFAULT_FIELD, \
CFG_BIBSORT_DEFAULT_FIELD_ORDER
except ImportError:
CFG_BIBSORT_DEFAULT_FIELD = 'latest first'
CFG_BIBSORT_DEFAULT_FIELD_ORDER = 'd'
from invenio.modules.search.errors import \
InvenioWebSearchUnknownCollectionError, \
InvenioWebSearchWildcardLimitError
from invenio.legacy.bibrecord import (get_fieldvalues,
get_fieldvalues_alephseq_like)
from .utils import record_exists
from invenio.legacy.bibrecord import create_record, record_xml_output
from invenio.legacy.bibrank.record_sorter import (
get_bibrank_methods,
is_method_valid,
rank_records as rank_records_bibrank,
rank_by_citations)
from invenio.legacy.bibrank.downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.legacy.bibindex.engine_stemmer import stem
from invenio.modules.indexer.tokenizers.BibIndexDefaultTokenizer import BibIndexDefaultTokenizer
from invenio.modules.indexer.tokenizers.BibIndexCJKTokenizer import BibIndexCJKTokenizer, is_there_any_CJK_character_in_text
from invenio.legacy.bibindex.engine_utils import author_name_requires_phrase_search, \
get_field_tags
from invenio.legacy.bibindex.engine_washer import wash_index_term, lower_index_term, wash_author_name
from invenio.legacy.bibindex.engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE
from invenio.legacy.bibindex.adminlib import get_idx_indexer
from invenio.modules.formatter import format_record, format_records, get_output_format_content_type, create_excel
from invenio.legacy.bibrank.downloads_grapher import create_download_history_graph_and_box
from invenio.modules.knowledge.api import get_kbr_values
from invenio.legacy.miscutil.data_cacher import DataCacher
from invenio.legacy.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.modules.access.control import acc_get_action_id
from invenio.modules.access.local_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS, \
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.legacy.websearch.adminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from intbitset import intbitset
from invenio.legacy.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.modules.access.engine import acc_authorize_action
from invenio.ext.logging import register_exception
from invenio.ext.cache import cache
from invenio.utils.text import encode_for_xml, wash_for_utf8, strip_accents
from invenio.utils.html import get_mathjax_header
from invenio.utils.html import nmtoken_from_string
from invenio.legacy import bibrecord
import invenio.legacy.template
webstyle_templates = invenio.legacy.template.load('webstyle')
webcomment_templates = invenio.legacy.template.load('webcomment')
websearch_templates = invenio.legacy.template.load('websearch')
from invenio.legacy.bibrank.citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, \
get_refersto_hitset, get_citedby_hitset, get_cited_by_list, \
get_refers_to_list, get_citers_log
from invenio.legacy.bibrank.citation_grapher import create_citation_history_graph_and_box
from invenio.legacy.bibrank.selfcites_searcher import get_self_cited_by_list, \
get_self_cited_by, \
get_self_refers_to_list
from invenio.legacy.dbquery import run_sql, run_sql_with_limit, \
wash_table_column_name, get_table_update_time
from invenio.legacy.webuser import getUid, collect_user_info, session_param_set
from invenio.legacy.webpage import pageheaderonly, pagefooteronly, create_error_box, write_warning
from invenio.base.i18n import gettext_set_language
from invenio.legacy.search_engine.query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio.utils import apache
from invenio.legacy.miscutil.solrutils_bibindex_searcher import solr_get_bitset
from invenio.legacy.miscutil.xapianutils_bibindex_searcher import xapian_get_bitset
from invenio.modules.search import services
from invenio.legacy.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.legacy.websearch_external_collections.config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.legacy.websearch_external_collections.config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.legacy.websearch_external_collections.config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
from invenio.legacy.bibauthorid.config import LIMIT_TO_COLLECTIONS as BIBAUTHORID_LIMIT_TO_COLLECTIONS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile(r'[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_logical_and = re.compile(r'\sand\s', re.I)
re_logical_or = re.compile(r'\sor\s', re.I)
re_logical_not = re.compile(r'\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_parens_quotes = re.compile(r'[\'\"]{1}[^\'\"]*(\([^\'\"]*\))[^\'\"]*[\'\"]{1}')
re_pattern_regexp_quotes = re.compile(r"\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile(r"\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
re_punctuation_followed_by_space = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + r'\s')
## em possible values
EM_REPOSITORY={"body" : "B",
"header" : "H",
"footer" : "F",
"search_box" : "S",
"see_also_box" : "L",
"basket" : "K",
"alert" : "A",
"search_info" : "I",
"overview" : "O",
"all_portalboxes" : "P",
"te_portalbox" : "Pte",
"tp_portalbox" : "Ptp",
"np_portalbox" : "Pnp",
"ne_portalbox" : "Pne",
"lt_portalbox" : "Plt",
"rt_portalbox" : "Prt",
"search_services": "SER"};
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,), run_on_slave=True)
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except NameError:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[[f1, p1, op1], [f2, p2, op2], [f3, p3, '']]
FIXME: This is handy to have, and should live somewhere else, like
miscutil.really_useful_functions or something.
XXX: Starting in python 2.6, the same can be achieved (faster) by
using itertools.izip_longest(); when the minimum recommended Python
is bumped, we should use that instead.
"""
def l(*items):
return list(items)
return map(l, *lists)
def get_permitted_restricted_collections(user_info, recreate_cache_if_needed=True):
"""Return a list of collection that are restricted but for which the user
is authorized."""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
ret = []
auths = acc_authorize_action(
user_info,
'viewrestrcoll',
batch_args=True,
collection=restricted_collection_cache.cache
)
for collection, auth in zip(restricted_collection_cache.cache, auths):
if auth[0] == 0:
ret.append(collection)
return ret
def get_all_restricted_recids():
"""
Return the set of all the restricted recids, i.e. the ids of those records
which belong to at least one restricted collection.
"""
ret = intbitset()
for collection in restricted_collection_cache.cache:
ret |= get_collection_reclist(collection)
return ret
def get_restricted_collections_for_recid(recid, recreate_cache_if_needed=True):
"""
Return the list of restricted collection names to which recid belongs.
"""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
collection_reclist_cache.recreate_cache_if_needed()
return [collection for collection in restricted_collection_cache.cache if recid in get_collection_reclist(collection, recreate_cache_if_needed=False)]
def is_user_owner_of_record(user_info, recid):
"""
Check if the user is owner of the record, i.e. he is the submitter
and/or belongs to a owner-like group authorized to 'see' the record.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'owner' of the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
if CFG_CERN_SITE:
#the egroup might be in the form [email protected]
if email_or_group.replace('@cern.ch', ' [CERN]') in user_info['group']:
return True
return False
###FIXME: This method needs to be refactorized
def is_user_viewer_of_record(user_info, recid):
"""
Check if the user is allow to view the record based in the marc tags
inside CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
i.e. his email is inside the 506__m tag or he is inside an e-group listed
in the 506__m tag
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'allow to view' the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
def check_user_can_view_record(user_info, recid):
"""
Check if the user is authorized to view the given recid. The function
grants access in two cases: either user has author rights on this
record, or he has view rights to the primary collection this record
belongs to.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: (0, ''), when authorization is granted, (>0, 'message') when
authorization is not granted
@rtype: (int, string)
"""
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if isinstance(recid, str):
recid = int(recid)
## At this point, either webcoll has not yet run or there are some
## restricted collections. Let's see first if the user own the record.
if is_user_owner_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
if is_user_viewer_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
restricted_collections = get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False)
if not restricted_collections and record_public_p(recid):
## The record is public and not part of any restricted collection
return (0, '')
if restricted_collections:
## If there are restricted collections the user must be authorized to all/any of them (depending on the policy)
auth_code, auth_msg = 0, ''
for collection in restricted_collections:
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collection)
if auth_code and policy != 'ANY':
## Ouch! the user is not authorized to this collection
return (auth_code, auth_msg)
elif auth_code == 0 and policy == 'ANY':
## Good! At least one collection is authorized
return (0, '')
## Depending on the policy, the user will be either authorized or not
return auth_code, auth_msg
if is_record_in_any_collection(recid, recreate_cache_if_needed=False):
## the record is not in any restricted collection
return (0, '')
elif record_exists(recid) > 0:
## We are in the case where webcoll has not run.
## Let's authorize SUPERADMIN
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=None)
if auth_code == 0:
return (0, '')
else:
## Too bad. Let's print a nice message:
return (1, """The record you are trying to access has just been
submitted to the system and needs to be assigned to the
proper collections. It is currently restricted for security reasons
until the assignment will be fully completed. Please come back later to
properly access this record.""")
else:
## The record either does not exists or has been deleted.
## Let's handle these situations outside of this code.
return (0, '')
class IndexStemmingDataCacher(DataCacher):
"""
Provides cache for stemming information for word/phrase indexes.
This class is not to be used directly; use function
get_index_stemming_language() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT id, stemming_language FROM idxINDEX""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
index_stemming_cache.is_ok_p
except Exception:
index_stemming_cache = IndexStemmingDataCacher()
def get_index_stemming_language(index_id, recreate_cache_if_needed=True):
"""Return stemming langugage for given index."""
if recreate_cache_if_needed:
index_stemming_cache.recreate_cache_if_needed()
return index_stemming_cache.cache[index_id]
class FieldTokenizerDataCacher(DataCacher):
"""
Provides cache for tokenizer information for fields corresponding to indexes.
This class is not to be used directly; use function
get_field_tokenizer_type() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT fld.code, ind.tokenizer FROM idxINDEX AS ind, field AS fld, idxINDEX_field AS indfld WHERE ind.id = indfld.id_idxINDEX AND indfld.id_field = fld.id""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
field_tokenizer_cache.is_ok_p
except Exception:
field_tokenizer_cache = FieldTokenizerDataCacher()
def get_field_tokenizer_type(field_name, recreate_cache_if_needed=True):
"""Return tokenizer type for given field corresponding to an index if applicable."""
if recreate_cache_if_needed:
field_tokenizer_cache.recreate_cache_if_needed()
tokenizer = None
try:
tokenizer = field_tokenizer_cache.cache[field_name]
except KeyError:
return None
return tokenizer
class CollectionRecListDataCacher(DataCacher):
"""
Provides cache for collection reclist hitsets. This class is not
to be used directly; use function get_collection_reclist() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
res = run_sql("SELECT name FROM collection")
for name in res:
ret[name[0]] = None # this will be filled later during runtime by calling get_collection_reclist(coll)
return ret
def timestamp_verifier():
return get_table_update_time('collection')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_reclist_cache.is_ok_p:
raise Exception
except Exception:
collection_reclist_cache = CollectionRecListDataCacher()
def get_collection_reclist(coll, recreate_cache_if_needed=True):
"""Return hitset of recIDs that belong to the collection 'coll'."""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
if coll not in collection_reclist_cache.cache:
return intbitset() # collection does not exist; return empty set
if not collection_reclist_cache.cache[coll]:
# collection's reclist not in the cache yet, so calculate it
# and fill the cache:
reclist = intbitset()
query = "SELECT nbrecs,reclist FROM collection WHERE name=%s"
res = run_sql(query, (coll, ), 1)
if res and res[0][1]:
reclist = intbitset(res[0][1])
collection_reclist_cache.cache[coll] = reclist
# finally, return reclist:
return collection_reclist_cache.cache[coll]
def get_available_output_formats(visible_only=False):
"""
Return the list of available output formats. When visible_only is
True, returns only those output formats that have visibility flag
set to 1.
"""
formats = []
query = "SELECT code,name FROM format"
if visible_only:
query += " WHERE visibility='1'"
query += " ORDER BY name ASC"
res = run_sql(query)
if res:
# propose found formats:
for code, name in res:
formats.append({'value': code,
'text': name
})
else:
formats.append({'value': 'hb',
'text': "HTML brief"
})
return formats
# Flask cache for search results.
from invenio.modules.search.cache import search_results_cache, get_search_results_cache_key
class CollectionI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N collection names. This class is not to be
used directly; use function get_coll_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name
except Exception:
# database problems
return {}
for c, ln, i18nname in res:
if i18nname:
if c not in ret:
ret[c] = {}
ret[c][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('collectionname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_i18nname_cache.is_ok_p:
raise Exception
except Exception:
collection_i18nname_cache = CollectionI18nNameDataCacher()
def get_coll_i18nname(c, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted collection name (of the name type `ln'
(=long name)) for collection C in language LN.
This function uses collection_i18nname_cache, but it verifies
whether the cache is up-to-date first by default. This
verification step is performed by checking the DB table update
time. So, if you call this function 1000 times, it can get very
slow because it will do 1000 table update time verifications, even
though collection names change not that often.
Hence the parameter VERIFY_CACHE_TIMESTAMP which, when set to
False, will assume the cache is already up-to-date. This is
useful namely in the generation of collection lists for the search
results page.
"""
if verify_cache_timestamp:
collection_i18nname_cache.recreate_cache_if_needed()
out = c
try:
out = collection_i18nname_cache.cache[c][ln]
except KeyError:
pass # translation in LN does not exist
return out
class FieldI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N field names. This class is not to be used
directly; use function get_field_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name
except Exception:
# database problems, return empty cache
return {}
for f, ln, i18nname in res:
if i18nname:
if f not in ret:
ret[f] = {}
ret[f][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('fieldname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not field_i18nname_cache.is_ok_p:
raise Exception
except Exception:
field_i18nname_cache = FieldI18nNameDataCacher()
def get_field_i18nname(f, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted field name (of type 'ln', 'long name') for
field F in language LN.
If VERIFY_CACHE_TIMESTAMP is set to True, then verify DB timestamp
and field I18N name cache timestamp and refresh cache from the DB
if needed. Otherwise don't bother checking DB timestamp and
return the cached value. (This is useful when get_field_i18nname
is called inside a loop.)
"""
if verify_cache_timestamp:
field_i18nname_cache.recreate_cache_if_needed()
out = f
try:
out = field_i18nname_cache.cache[f][ln]
except KeyError:
pass # translation in LN does not exist
return out
def get_alphabetically_ordered_collection_list(level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
out = []
res = run_sql("SELECT name FROM collection ORDER BY name ASC")
for c_name in res:
c_name = c_name[0]
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c_name, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
out.append([c_name, c_printable])
return out
def get_nicely_ordered_collection_list(collid=1, level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
colls_nicely_ordered = []
res = run_sql("""SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c
WHERE c.id=cc.id_son AND cc.id_dad=%s ORDER BY score ASC""", (collid, ))
for c, cid in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
colls_nicely_ordered.append([c, c_printable])
colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln)
return colls_nicely_ordered
def get_index_id_from_field(field):
"""
Return index id with name corresponding to FIELD, or the first
index id where the logical field code named FIELD is indexed.
Return zero in case there is no index defined for this field.
Example: field='author', output=4.
"""
out = 0
if not field:
field = 'global' # empty string field means 'global' index (field 'anyfield')
# first look in the index table:
res = run_sql("""SELECT id FROM idxINDEX WHERE name=%s""", (field,))
if res:
out = res[0][0]
return out
# not found in the index table, now look in the field table:
res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX
LIMIT 1""", (field,))
if res:
out = res[0][0]
return out
def get_words_from_pattern(pattern):
"""
Returns list of whitespace-separated words from pattern, removing any
trailing punctuation-like signs from words in pattern.
"""
words = {}
# clean trailing punctuation signs inside pattern
pattern = re_punctuation_followed_by_space.sub(' ', pattern)
for word in pattern.split():
if word not in words:
words[word] = 1
return words.keys()
def create_basic_search_units(req, p, f, m=None, of='hb'):
"""Splits search pattern and search field into a list of independently searchable units.
- A search unit consists of '(operator, pattern, field, type, hitset)' tuples where
'operator' is set union (|), set intersection (+) or set exclusion (-);
'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics');
'field' is either a code like 'title' or MARC tag like '100__a';
'type' is the search type ('w' for word file search, 'a' for access file search).
- Optionally, the function accepts the match type argument 'm'.
If it is set (e.g. from advanced search interface), then it
performs this kind of matching. If it is not set, then a guess is made.
'm' can have values: 'a'='all of the words', 'o'='any of the words',
'p'='phrase/substring', 'r'='regular expression',
'e'='exact value'.
- Warnings are printed on req (when not None) in case of HTML output formats."""
opfts = [] # will hold (o,p,f,t,h) units
# FIXME: quick hack for the journal index
if f == 'journal':
opfts.append(['+', p, f, 'w'])
return opfts
## check arguments: is desired matching type set?
if m:
## A - matching type is known; good!
if m == 'e':
# A1 - exact value:
opfts.append(['+', p, f, 'a']) # '+' since we have only one unit
elif m == 'p':
# A2 - phrase/substring:
opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit
elif m == 'r':
# A3 - regular expression:
opfts.append(['+', p, f, 'r']) # '+' since we have only one unit
elif m == 'a' or m == 'w':
# A4 - all of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
opfts.append(['+', word, f, 'w']) # '+' in all units
elif m == 'o':
# A5 - any of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
if len(opfts)==0:
opfts.append(['+', word, f, 'w']) # '+' in the first unit
else:
opfts.append(['|', word, f, 'w']) # '|' in further units
else:
if of.startswith("h"):
write_warning("Matching type '%s' is not implemented yet." % cgi.escape(m), "Warning", req=req)
opfts.append(['+', "%" + p + "%", f, 'w'])
else:
## B - matching type is not known: let us try to determine it by some heuristics
if f and p[0] == '"' and p[-1] == '"':
## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search
opfts.append(['+', p[1:-1], f, 'a'])
elif f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor') and author_name_requires_phrase_search(p):
## B1 - do we search in author, and does 'p' contain space/comma/dot/etc?
## => doing washed ACC search
opfts.append(['+', p, f, 'a'])
elif f and p[0] == "'" and p[-1] == "'":
## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search
opfts.append(['+', '%' + p[1:-1] + '%', f, 'a'])
elif f and p[0] == "/" and p[-1] == "/":
## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search
opfts.append(['+', p[1:-1], f, 'r'])
elif f and p.find(',') >= 0:
## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search
opfts.append(['+', p, f, 'a'])
elif f and str(f[0:2]).isdigit():
## B2 - does 'f' exist and starts by two digits? => doing ACC search
opfts.append(['+', p, f, 'a'])
else:
## B3 - doing WRD search, but maybe ACC too
# search units are separated by spaces unless the space is within single or double quotes
# so, let us replace temporarily any space within quotes by '__SPACE__'
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# and spaces after colon as well:
p = re_pattern_spaces_after_colon.sub(lambda x: x.group(1).replace(' ', '__SPACE__'), p)
# wash argument:
p = re_logical_and.sub(" ", p)
p = re_logical_or.sub(" |", p)
p = re_logical_not.sub(" -", p)
p = re_operators.sub(r' \1', p)
for pi in p.split(): # iterate through separated units (or items, as "pi" stands for "p item")
pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' '
# firstly, determine set operator
if pi[0] == '+' or pi[0] == '-' or pi[0] == '|':
oi = pi[0]
pi = pi[1:]
else:
# okay, there is no operator, so let us decide what to do by default
oi = '+' # by default we are doing set intersection...
# secondly, determine search pattern and field:
if pi.find(":") > 0:
fi, pi = pi.split(":", 1)
fi = wash_field(fi)
# test whether fi is a real index code or a MARC-tag defined code:
if fi in get_fieldcodes() or '00' <= fi[:2] <= '99':
pass
else:
# it is not, so join it back:
fi, pi = f, fi + ":" + pi
else:
fi, pi = f, pi
# wash 'fi' argument:
fi = wash_field(fi)
# wash 'pi' argument:
pi = pi.strip() # strip eventual spaces
if re_quotes.match(pi):
# B3a - quotes are found => do ACC search (phrase search)
if pi[0] == '"' and pi[-1] == '"':
pi = pi.replace('"', '') # remove quote signs
opfts.append([oi, pi, fi, 'a'])
elif pi[0] == "'" and pi[-1] == "'":
pi = pi.replace("'", "") # remove quote signs
opfts.append([oi, "%" + pi + "%", fi, 'a'])
else: # unbalanced quotes, so fall back to WRD query:
opfts.append([oi, pi, fi, 'w'])
elif pi.startswith('/') and pi.endswith('/'):
# B3b - pi has slashes around => do regexp search
opfts.append([oi, pi[1:-1], fi, 'r'])
elif fi and len(fi) > 1 and str(fi[0]).isdigit() and str(fi[1]).isdigit():
# B3c - fi exists and starts by two digits => do ACC search
opfts.append([oi, pi, fi, 'a'])
elif fi and not get_index_id_from_field(fi) and get_field_name(fi):
# B3d - logical field fi exists but there is no WRD index for fi => try ACC search
opfts.append([oi, pi, fi, 'a'])
else:
# B3e - general case => do WRD search
pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed
for pii in get_words_from_pattern(pi):
opfts.append([oi, pii, fi, 'w'])
## sanity check:
for i in range(0, len(opfts)):
try:
pi = opfts[i][1]
if pi == '*':
if of.startswith("h"):
write_warning("Ignoring standalone wildcard word.", "Warning", req=req)
del opfts[i]
if pi == '' or pi == ' ':
fi = opfts[i][2]
if fi:
if of.startswith("h"):
write_warning("Ignoring empty <em>%s</em> search term." % fi, "Warning", req=req)
del opfts[i]
except:
pass
## replace old logical field names if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
opfts = [[o, p, wash_field(f), t] for o, p, f, t in opfts]
## return search units:
return opfts
def page_start(req, of, cc, aas, ln, uid, title_message=None,
description='', keywords='', recID=-1, tab='', p='', em=''):
"""
Start page according to given output format.
@param title_message: title of the page, not escaped for HTML
@param description: description of the page, not escaped for HTML
@param keywords: keywords of the page, not escaped for HTML
"""
_ = gettext_set_language(ln)
if not req or isinstance(req, cStringIO.OutputType):
return # we were called from CLI
if not title_message:
title_message = _("Search Results")
content_type = get_output_format_content_type(of)
if of.startswith('x'):
if of == 'xr':
# we are doing RSS output
req.content_type = "application/rss+xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
else:
# we are doing XML output:
req.content_type = get_output_format_content_type(of, 'text/xml')
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
elif of.startswith('t') or str(of[0:3]).isdigit():
# we are doing plain text output:
req.content_type = "text/plain"
req.send_http_header()
elif of == "intbitset":
req.content_type = "application/octet-stream"
req.send_http_header()
elif of == "recjson":
req.content_type = "application/json"
req.send_http_header()
elif of == "id":
pass # nothing to do, we shall only return list of recIDs
elif content_type == 'text/html':
# we are doing HTML output:
req.content_type = "text/html"
req.send_http_header()
if not description:
description = "%s %s." % (cc, _("Search Results"))
if not keywords:
keywords = "%s, WebSearch, %s" % (get_coll_i18nname(CFG_SITE_NAME, ln, False), get_coll_i18nname(cc, ln, False))
## generate RSS URL:
argd = {}
if req.args:
argd = cgi.parse_qs(req.args)
rssurl = websearch_templates.build_rss_url(argd)
## add MathJax if displaying single records (FIXME: find
## eventual better place to this code)
if of.lower() in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS:
metaheaderadd = get_mathjax_header(req.is_https())
else:
metaheaderadd = ''
# Add metadata in meta tags for Google scholar-esque harvesting...
# only if we have a detailed meta format and we are looking at a
# single record
if recID != -1 and CFG_WEBSEARCH_DETAILED_META_FORMAT and \
record_exists(recID) == 1:
metaheaderadd += format_record(recID,
CFG_WEBSEARCH_DETAILED_META_FORMAT,
ln=ln)
## generate navtrail:
navtrail = create_navtrail_links(cc, aas, ln)
if navtrail != '':
navtrail += ' > '
if (tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb')) and \
recID != -1:
# If we are not in information tab in HD format, customize
# the nav. trail to have a link back to main record. (Due
# to the way perform_request_search() works, hb
# (lowercase) is equal to hd)
navtrail += ' <a class="navtrail" href="%s/%s/%s">%s</a>' % \
(CFG_BASE_URL, CFG_SITE_RECORD, recID, cgi.escape(title_message))
if (of != '' or of.lower() != 'hd') and of != 'hb':
# Export
format_name = of
query = "SELECT name FROM format WHERE code=%s"
res = run_sql(query, (of,))
if res:
format_name = res[0][0]
navtrail += ' > ' + format_name
else:
# Discussion, citations, etc. tabs
tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label']
navtrail += ' > ' + _(tab_label)
else:
navtrail += cgi.escape(title_message)
if p:
# we are serving search/browse results pages, so insert pattern:
navtrail += ": " + cgi.escape(p)
title_message = p + " - " + title_message
body_css_classes = []
if cc:
# we know the collection, lets allow page styles based on cc
#collection names may not satisfy rules for css classes which
#are something like: -?[_a-zA-Z]+[_a-zA-Z0-9-]*
#however it isn't clear what we should do about cases with
#numbers, so we leave them to fail. Everything else becomes "_"
css = nmtoken_from_string(cc).replace('.', '_').replace('-', '_').replace(':', '_')
body_css_classes.append(css)
## finally, print page header:
if em == '' or EM_REPOSITORY["header"] in em:
req.write(pageheaderonly(req=req, title=title_message,
navtrail=navtrail,
description=description,
keywords=keywords,
metaheaderadd=metaheaderadd,
uid=uid,
language=ln,
navmenuid='search',
navtrail_append_title_p=0,
rssurl=rssurl,
body_css_classes=body_css_classes))
req.write(websearch_templates.tmpl_search_pagestart(ln=ln))
else:
req.content_type = content_type
req.send_http_header()
def page_end(req, of="hb", ln=CFG_SITE_LANG, em=""):
"End page according to given output format: e.g. close XML tags, add HTML footer, etc."
if of == "id":
return [] # empty recID list
if of == "intbitset":
return intbitset()
if not req:
return # we were called from CLI
if of.startswith('h'):
req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end
if em == "" or EM_REPOSITORY["footer"] in em:
req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req))
return
def create_add_to_search_pattern(p, p1, f1, m1, op1):
"""Create the search pattern """
if not p1:
return p
init_search_pattern = p
# operation: AND, OR, AND NOT
if op1 == 'a' and p: # we don't want '+' at the begining of the query
op = ' +'
elif op1 == 'o':
op = ' |'
elif op1 == 'n':
op = ' -'
else:
op = ''
# field
field = ''
if f1:
field = f1 + ':'
# type of search
pattern = p1
start = '('
end = ')'
if m1 == 'e':
start = end = '"'
elif m1 == 'p':
start = end = "'"
elif m1 == 'r':
start = end = '/'
else: # m1 == 'o' or m1 =='a'
words = p1.strip().split(' ')
if len(words) == 1:
start = end = ''
pattern = field + words[0]
elif m1 == 'o':
pattern = ' |'.join([field + word for word in words])
else:
pattern = ' '.join([field + word for word in words])
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + start + pattern + end
if not pattern:
return ''
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + field + start + pattern + end
def create_page_title_search_pattern_info(p, p1, p2, p3):
"""Create the search pattern bit for the page <title> web page
HTML header. Basically combine p and (p1,p2,p3) together so that
the page header may be filled whether we are in the Simple Search
or Advanced Search interface contexts."""
out = ""
if p:
out = p
else:
out = p1
if p2:
out += ' ' + p2
if p3:
out += ' ' + p3
return out
def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=CFG_SITE_LANG):
"Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options."
_ = gettext_set_language(ln)
box = ""
# day
box += """<select name="%sd">""" % name
box += """<option value="">%s""" % _("any day")
for day in range(1, 32):
box += """<option value="%02d"%s>%02d""" % (day, is_selected(day, selected_day), day)
box += """</select>"""
# month
box += """<select name="%sm">""" % name
box += """<option value="">%s""" % _("any month")
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")),
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")),
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s""" % (mm, is_selected(mm, selected_month), month.strip())
box += """</select>"""
# year
box += """<select name="%sy">""" % name
box += """<option value="">%s""" % _("any year")
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year-20, this_year+1):
box += """<option value="%d"%s>%d""" % (year, is_selected(year, selected_year), year)
box += """</select>"""
return box
def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, aas,
ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3,
m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec,
action="", em=""):
"""Create search box for 'search again in the results page' functionality."""
if em != "" and EM_REPOSITORY["search_box"] not in em:
if EM_REPOSITORY["body"] in em and cc != CFG_SITE_NAME:
return '''
<h1 class="headline">%(ccname)s</h1>''' % {'ccname' : cgi.escape(cc), }
else:
return ""
# load the right message language
_ = gettext_set_language(ln)
# some computations
cc_intl = get_coll_i18nname(cc, ln, False)
cc_colID = get_colID(cc)
colls_nicely_ordered = []
if cfg_nicely_ordered_collection_list:
colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln)
else:
colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln)
colls_nice = []
for (cx, cx_printable) in colls_nicely_ordered:
if not cx.startswith("Unnamed collection"):
colls_nice.append({'value': cx,
'text': cx_printable
})
coll_selects = []
if colls and colls[0] != CFG_SITE_NAME:
# some collections are defined, so print these first, and only then print 'add another collection' heading:
for c in colls:
if c:
temp = []
temp.append({'value': CFG_SITE_NAME,
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
})
# this field is used to remove the current collection from the ones to be searched.
temp.append({'value': '',
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("remove this publisher or journal") or _("remove this collection"))
})
for val in colls_nice:
# print collection:
if not cx.startswith("Unnamed collection"):
temp.append({'value': val['value'],
'text': val['text'],
'selected' : (c == re.sub(r"^[\s\-]*", "", val['value']))
})
coll_selects.append(temp)
coll_selects.append([{'value': '',
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("add another publisher or journal") or _("add another collection"))
}] + colls_nice)
else: # we searched in CFG_SITE_NAME, so print 'any public collection' heading
coll_selects.append([{'value': CFG_SITE_NAME,
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
}] + colls_nice)
## ranking methods
ranks = [{
'value' : '',
'text' : "- %s %s -" % (_("OR").lower(), _("rank by")),
}]
for (code, name) in get_bibrank_methods(cc_colID, ln):
# propose found rank methods:
ranks.append({
'value': code,
'text': name,
})
formats = get_available_output_formats(visible_only=True)
# show collections in the search box? (not if there is only one
# collection defined, and not if we are in light search)
show_colls = True
show_title = True
if len(collection_reclist_cache.cache.keys()) == 1 or \
aas == -1:
show_colls = False
show_title = False
if cc == CFG_SITE_NAME:
show_title = False
if CFG_INSPIRE_SITE:
show_title = False
return websearch_templates.tmpl_search_box(
ln = ln,
aas = aas,
cc_intl = cc_intl,
cc = cc,
ot = ot,
sp = sp,
action = action,
fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID),
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
p1 = p1,
p2 = p2,
p3 = p3,
op1 = op1,
op2 = op2,
rm = rm,
p = p,
f = f,
coll_selects = coll_selects,
d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d,
dt = dt,
sort_fields = get_sortby_fields(ln=ln, colID=cc_colID),
sf = sf,
so = so,
ranks = ranks,
sc = sc,
rg = rg,
formats = formats,
of = of,
pl = pl,
jrec = jrec,
ec = ec,
show_colls = show_colls,
show_title = show_title and (em=="" or EM_REPOSITORY["body"] in em)
)
def create_exact_author_browse_help_link(p=None, p1=None, p2=None, p3=None, f=None, f1=None, f2=None, f3=None,
rm=None, cc=None, ln=None, jrec=None, rg=None, aas=0, action=""):
"""Creates a link to help switch from author to exact author while browsing"""
if action == 'browse':
search_fields = (f, f1, f2, f3)
if 'author' in search_fields or 'firstauthor' in search_fields:
def add_exact(field):
if field == 'author' or field == 'firstauthor':
return 'exact' + field
return field
fe, f1e, f2e, f3e = [add_exact(field) for field in search_fields]
link_name = f or f1
link_name = (link_name == 'firstauthor' and 'exact first author') or 'exact author'
return websearch_templates.tmpl_exact_author_browse_help_link(p=p, p1=p1, p2=p2, p3=p3, f=fe, f1=f1e, f2=f2e, f3=f3e,
rm=rm, cc=cc, ln=ln, jrec=jrec, rg=rg, aas=aas, action=action,
link_name=link_name)
return ""
def create_navtrail_links(cc=CFG_SITE_NAME, aas=0, ln=CFG_SITE_LANG, self_p=1, tab=''):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in get_coll_ancestors(cc):
if dad != CFG_SITE_NAME: # exclude Home collection
dads.append((dad, get_coll_i18nname(dad, ln, False)))
if self_p and cc != CFG_SITE_NAME:
dads.append((cc, get_coll_i18nname(cc, ln, False)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def get_searchwithin_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'search within' selection box for the collection ID colID."""
res = None
if colID:
res = run_sql("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
res = run_sql("SELECT code,name FROM field ORDER BY name ASC")
fields = [{
'value' : '',
'text' : get_field_i18nname("any field", ln, False)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def get_sortby_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'sort by' selection box for the collection ID colID."""
_ = gettext_set_language(ln)
res = None
if colID:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
# no sort fields defined for this colID, try to take Home collection:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (1,))
if not res:
# no sort fields defined for the Home collection, take all sort fields defined wherever they are:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""",)
fields = [{
'value': '',
'text': _(CFG_BIBSORT_DEFAULT_FIELD)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def create_andornot_box(name='op', value='', ln='en'):
"Returns HTML code for the AND/OR/NOT selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="n"%s>%s
</select>
""" % (name,
is_selected('a', value), _("AND"),
is_selected('o', value), _("OR"),
is_selected('n', value), _("AND NOT"))
return out
def create_matchtype_box(name='m', value='', ln='en'):
"Returns HTML code for the 'match type' selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="e"%s>%s
<option value="p"%s>%s
<option value="r"%s>%s
</select>
""" % (name,
is_selected('a', value), _("All of the words:"),
is_selected('o', value), _("Any of the words:"),
is_selected('e', value), _("Exact phrase:"),
is_selected('p', value), _("Partial phrase:"),
is_selected('r', value), _("Regular expression:"))
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if type(var) is int and type(fld) is int:
if var == fld:
return " selected"
elif str(var) == str(fld):
return " selected"
elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]:
return " selected"
return ""
def wash_colls(cc, c, split_colls=0, verbose=0):
"""Wash collection list by checking whether user has deselected
anything under 'Narrow search'. Checks also if cc is a list or not.
Return list of cc, colls_to_display, colls_to_search since the list
of collections to display is different from that to search in.
This is because users might have chosen 'split by collection'
functionality.
The behaviour of "collections to display" depends solely whether
user has deselected a particular collection: e.g. if it started
from 'Articles and Preprints' page, and deselected 'Preprints',
then collection to display is 'Articles'. If he did not deselect
anything, then collection to display is 'Articles & Preprints'.
The behaviour of "collections to search in" depends on the
'split_colls' parameter:
* if is equal to 1, then we can wash the colls list down
and search solely in the collection the user started from;
* if is equal to 0, then we are splitting to the first level
of collections, i.e. collections as they appear on the page
we started to search from;
The function raises exception
InvenioWebSearchUnknownCollectionError
if cc or one of c collections is not known.
"""
colls_out = []
colls_out_for_display = []
# list to hold the hosted collections to be searched and displayed
hosted_colls_out = []
debug = ""
if verbose:
debug += "<br />"
debug += "<br />1) --- initial parameters ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# check what type is 'cc':
if type(cc) is list:
for ci in cc:
if ci in collection_reclist_cache.cache:
# yes this collection is real, so use it:
cc = ci
break
else:
# check once if cc is real:
if cc not in collection_reclist_cache.cache:
if cc:
raise InvenioWebSearchUnknownCollectionError(cc)
else:
cc = CFG_SITE_NAME # cc is not set, so replace it with Home collection
# check type of 'c' argument:
if type(c) is list:
colls = c
else:
colls = [c]
if verbose:
debug += "<br />2) --- after check for the integrity of cc and the being or not c a list ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# remove all 'unreal' collections:
colls_real = []
for coll in colls:
if coll in collection_reclist_cache.cache:
colls_real.append(coll)
else:
if coll:
raise InvenioWebSearchUnknownCollectionError(coll)
colls = colls_real
if verbose:
debug += "<br />3) --- keeping only the real colls of c ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# check if some real collections remain:
if len(colls)==0:
colls = [cc]
if verbose:
debug += "<br />4) --- in case no colls were left we use cc directly ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll':
res = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'""", (cc,))
# list that holds all the non restricted sons of cc that are also not hosted collections
l_cc_nonrestricted_sons_and_nonhosted_colls = []
res_hosted = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'
AND (c.dbquery NOT LIKE 'hostedcollection:%%' OR c.dbquery IS NULL)""", (cc,))
for row_hosted in res_hosted:
l_cc_nonrestricted_sons_and_nonhosted_colls.append(row_hosted[0])
l_cc_nonrestricted_sons_and_nonhosted_colls.sort()
l_cc_nonrestricted_sons = []
l_c = colls[:]
for row in res:
if not collection_restricted_p(row[0]):
l_cc_nonrestricted_sons.append(row[0])
l_c.sort()
l_cc_nonrestricted_sons.sort()
if l_cc_nonrestricted_sons == l_c:
colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc'
# the following elif is a hack that preserves the above funcionality when we start searching from
# the frontpage with some hosted collections deselected (either by default or manually)
elif set(l_cc_nonrestricted_sons_and_nonhosted_colls).issubset(set(l_c)):
colls_out_for_display = colls
split_colls = 0
else:
colls_out_for_display = colls # nope, we need to display all 'colls' successively
# remove duplicates:
#colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1))
#colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups)
#colls_out_for_display = list(set(colls_out_for_display))
#remove duplicates while preserving the order
set_out = set()
colls_out_for_display = [coll for coll in colls_out_for_display if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />5) --- decide whether colls_out_for_diplay should be colls or is it sufficient for it to be cc; remove duplicates ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# FIXME: The below quoted part of the code has been commented out
# because it prevents searching in individual restricted daughter
# collections when both parent and all its public daughter
# collections were asked for, in addition to some restricted
# daughter collections. The removal was introduced for hosted
# collections, so we may want to double check in this context.
# the following piece of code takes care of removing collections whose ancestors are going to be searched anyway
# list to hold the collections to be removed
#colls_to_be_removed = []
# first calculate the collections that can safely be removed
#for coll in colls_out_for_display:
# for ancestor in get_coll_ancestors(coll):
# #if ancestor in colls_out_for_display: colls_to_be_removed.append(coll)
# if ancestor in colls_out_for_display and not is_hosted_collection(coll): colls_to_be_removed.append(coll)
# secondly remove the collections
#for coll in colls_to_be_removed:
# colls_out_for_display.remove(coll)
if verbose:
debug += "<br />6) --- remove collections that have ancestors about to be search, unless they are hosted ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# calculate the hosted collections to be searched.
if colls_out_for_display == [cc]:
if is_hosted_collection(cc):
hosted_colls_out.append(cc)
else:
for coll in get_coll_sons(cc):
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
else:
for coll in colls_out_for_display:
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
if verbose:
debug += "<br />7) --- calculate the hosted_colls_out ---"
debug += "<br />hosted_colls_out : %s" % hosted_colls_out
debug += "<br />"
# second, let us decide on collection splitting:
if split_colls == 0:
# type A - no sons are wanted
colls_out = colls_out_for_display
else:
# type B - sons (first-level descendants) are wanted
for coll in colls_out_for_display:
coll_sons = get_coll_sons(coll)
if coll_sons == []:
colls_out.append(coll)
else:
for coll_son in coll_sons:
if not is_hosted_collection(coll_son):
colls_out.append(coll_son)
#else:
# colls_out = colls_out + coll_sons
# remove duplicates:
#colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1))
#colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups)
#colls_out = list(set(colls_out))
#remove duplicates while preserving the order
set_out = set()
colls_out = [coll for coll in colls_out if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />8) --- calculate the colls_out; remove duplicates ---"
debug += "<br />colls_out : %s" % colls_out
debug += "<br />"
# remove the hosted collections from the collections to be searched
if hosted_colls_out:
for coll in hosted_colls_out:
try:
colls_out.remove(coll)
except ValueError:
# in case coll was not found in colls_out
pass
if verbose:
debug += "<br />9) --- remove the hosted_colls from the colls_out ---"
debug += "<br />colls_out : %s" % colls_out
return (cc, colls_out_for_display, colls_out, hosted_colls_out, debug)
def get_synonym_terms(term, kbr_name, match_type, use_memoise=False):
"""
Return list of synonyms for TERM by looking in KBR_NAME in
MATCH_TYPE style.
@param term: search-time term or index-time term
@type term: str
@param kbr_name: knowledge base name
@type kbr_name: str
@param match_type: specifies how the term matches against the KBR
before doing the lookup. Could be `exact' (default),
'leading_to_comma', `leading_to_number'.
@type match_type: str
@param use_memoise: can we memoise while doing lookups?
@type use_memoise: bool
@return: list of term synonyms
@rtype: list of strings
"""
dterms = {}
## exact match is default:
term_for_lookup = term
term_remainder = ''
## but maybe match different term:
if match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_comma']:
mmm = re.match(r'^(.*?)(\s*,.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
elif match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_number']:
mmm = re.match(r'^(.*?)(\s*\d.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
## FIXME: workaround: escaping SQL wild-card signs, since KBR's
## exact search is doing LIKE query, so would match everything:
term_for_lookup = term_for_lookup.replace('%', '\\%')
## OK, now find synonyms:
for kbr_values in get_kbr_values(kbr_name,
searchkey=term_for_lookup,
searchtype='e',
use_memoise=use_memoise):
for kbr_value in kbr_values:
dterms[kbr_value + term_remainder] = 1
## return list of term synonyms:
return dterms.keys()
def wash_output_format(ouput_format):
"""Wash output format FORMAT. Currently only prevents input like
'of=9' for backwards-compatible format that prints certain fields
only. (for this task, 'of=tm' is preferred)"""
if str(ouput_format[0:3]).isdigit() and len(ouput_format) != 6:
# asked to print MARC tags, but not enough digits,
# so let's switch back to HTML brief default
return 'hb'
else:
return ouput_format
def wash_pattern(p):
"""Wash pattern passed by URL. Check for sanity of the wildcard by
removing wildcards if they are appended to extremely short words
(1-3 letters). TODO: instead of this approximative treatment, it
will be much better to introduce a temporal limit, e.g. to kill a
query if it does not finish in 10 seconds."""
# strip accents:
# p = strip_accents(p) # FIXME: when available, strip accents all the time
# add leading/trailing whitespace for the two following wildcard-sanity checking regexps:
p = " " + p + " "
# replace spaces within quotes by __SPACE__ temporarily:
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# get rid of unquoted wildcards after spaces:
p = re_pattern_wildcards_after_spaces.sub("\\1", p)
# get rid of extremely short words (1-3 letters with wildcards):
#p = re_pattern_short_words.sub("\\1", p)
# replace back __SPACE__ by spaces:
p = re_pattern_space.sub(" ", p)
# replace special terms:
p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p)
# remove unnecessary whitespace:
p = p.strip()
# remove potentially wrong UTF-8 characters:
p = wash_for_utf8(p)
return p
def wash_field(f):
"""Wash field passed by URL."""
if f:
# get rid of unnecessary whitespace and make it lowercase
# (e.g. Author -> author) to better suit iPhone etc input
# mode:
f = f.strip().lower()
# wash legacy 'f' field names, e.g. replace 'wau' or `au' by
# 'author', if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
f = CFG_WEBSEARCH_FIELDS_CONVERT.get(f, f)
return f
def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0):
"""
Take user-submitted date arguments D1 (full datetime string) or
(D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y)
and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime
strings in the YYYY-MM-DD HH:MM:SS format suitable for time
restricted searching.
Note that when both D1 and (D1Y, D1M, D1D) parameters are present,
the precedence goes to D1. Ditto for D2*.
Note that when (D1Y, D1M, D1D) are taken into account, some values
may be missing and are completed e.g. to 01 or 12 according to
whether it is the starting or the ending date.
"""
datetext1, datetext2 = "", ""
# sanity checking:
if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0:
return ("", "") # nothing selected, so return empty values
# wash first (starting) date:
if d1:
# full datetime string takes precedence:
datetext1 = d1
else:
# okay, first date passed as (year,month,day):
if d1y:
datetext1 += "%04d" % d1y
else:
datetext1 += "0000"
if d1m:
datetext1 += "-%02d" % d1m
else:
datetext1 += "-01"
if d1d:
datetext1 += "-%02d" % d1d
else:
datetext1 += "-01"
datetext1 += " 00:00:00"
# wash second (ending) date:
if d2:
# full datetime string takes precedence:
datetext2 = d2
else:
# okay, second date passed as (year,month,day):
if d2y:
datetext2 += "%04d" % d2y
else:
datetext2 += "9999"
if d2m:
datetext2 += "-%02d" % d2m
else:
datetext2 += "-12"
if d2d:
datetext2 += "-%02d" % d2d
else:
datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in
# given month, but for our quering it's not
# needed, 31 will always do
datetext2 += " 00:00:00"
# okay, return constructed YYYY-MM-DD HH:MM:SS datetexts:
return (datetext1, datetext2)
def is_hosted_collection(coll):
"""Check if the given collection is a hosted one; i.e. its dbquery starts with hostedcollection:
Returns True if it is, False if it's not or if the result is empty or if the query failed"""
res = run_sql("SELECT dbquery FROM collection WHERE name=%s", (coll, ))
if not res or not res[0][0]:
return False
try:
return res[0][0].startswith("hostedcollection:")
except IndexError:
return False
def get_colID(c):
"Return collection ID for collection name C. Return None if no match found."
colID = None
res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1)
if res:
colID = res[0][0]
return colID
def get_coll_normalised_name(c):
"""Returns normalised collection name (case sensitive) for collection name
C (case insensitive).
Returns None if no match found."""
res = run_sql("SELECT name FROM collection WHERE name=%s", (c,))
if res:
return res[0][0]
else:
return None
def get_coll_ancestors(coll):
"Returns a list of ancestors for collection 'coll'."
coll_ancestors = []
coll_ancestor = coll
while 1:
res = run_sql("""SELECT c.name FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad
LEFT JOIN collection AS ccc ON ccc.id=cc.id_son
WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""",
(coll_ancestor,))
if res:
coll_name = res[0][0]
coll_ancestors.append(coll_name)
coll_ancestor = coll_name
else:
break
# ancestors found, return reversed list:
coll_ancestors.reverse()
return coll_ancestors
def get_coll_sons(coll, coll_type='r', public_only=1):
"""Return a list of sons (first-level descendants) of type 'coll_type' for collection 'coll'.
If coll_type = '*', both regular and virtual collections will be returned.
If public_only, then return only non-restricted son collections.
"""
coll_sons = []
if coll_type == '*':
coll_type_query = " IN ('r', 'v')"
query_params = (coll, )
else:
coll_type_query = "=%s"
query_params = (coll_type, coll)
query = "SELECT c.name FROM collection AS c "\
"LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\
"LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\
"WHERE cc.type%s AND ccc.name=%%s" % coll_type_query
query += " ORDER BY cc.score ASC"
res = run_sql(query, query_params)
for name in res:
if not public_only or not collection_restricted_p(name[0]):
coll_sons.append(name[0])
return coll_sons
class CollectionAllChildrenDataCacher(DataCacher):
"""Cache for all children of a collection (regular & virtual, public & private)"""
def __init__(self):
def cache_filler():
def get_all_children(coll, coll_type='r', public_only=1, d_internal_coll_sons=None):
"""Return a list of all children of type 'coll_type' for collection 'coll'.
If public_only, then return only non-restricted child collections.
If coll_type='*', then return both regular and virtual collections.
d_internal_coll_sons is an internal dictionary used in recursion for
minimizing the number of database calls and should not be used outside
this scope.
"""
if not d_internal_coll_sons:
d_internal_coll_sons = {}
children = []
if coll not in d_internal_coll_sons:
d_internal_coll_sons[coll] = get_coll_sons(coll, coll_type, public_only)
for child in d_internal_coll_sons[coll]:
children.append(child)
children.extend(get_all_children(child, coll_type, public_only, d_internal_coll_sons)[0])
return children, d_internal_coll_sons
ret = {}
d_internal_coll_sons = None
collections = collection_reclist_cache.cache.keys()
for collection in collections:
ret[collection], d_internal_coll_sons = get_all_children(collection, '*', public_only=0, d_internal_coll_sons=d_internal_coll_sons)
return ret
def timestamp_verifier():
return max(get_table_update_time('collection'), get_table_update_time('collection_collection'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_allchildren_cache.is_ok_p:
raise Exception
except Exception:
collection_allchildren_cache = CollectionAllChildrenDataCacher()
def get_collection_allchildren(coll, recreate_cache_if_needed=True):
"""Returns the list of all children of a collection."""
if recreate_cache_if_needed:
collection_allchildren_cache.recreate_cache_if_needed()
if coll not in collection_allchildren_cache.cache:
return [] # collection does not exist; return empty list
return collection_allchildren_cache.cache[coll]
def get_coll_real_descendants(coll, coll_type='_', get_hosted_colls=True):
"""Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'.
IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided
that "A & B" has no associated database query defined.
"""
coll_sons = []
res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_son
LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad
WHERE ccc.name=%s AND cc.type LIKE %s ORDER BY cc.score ASC""",
(coll, coll_type,))
for name, dbquery in res:
if dbquery: # this is 'real' collection, so return it:
if get_hosted_colls:
coll_sons.append(name)
else:
if not dbquery.startswith("hostedcollection:"):
coll_sons.append(name)
else: # this is 'composed' collection, so recurse:
coll_sons.extend(get_coll_real_descendants(name))
return coll_sons
def browse_pattern_phrases(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Returns either biliographic phrases or words indexes."""
## is p enclosed in quotes? (coming from exact search)
if p.startswith('"') and p.endswith('"'):
p = p[1:-1]
## okay, "real browse" follows:
## FIXME: the maths in the get_nearest_terms_in_bibxxx is just a test
if not f and p.find(":") > 0: # does 'p' contain ':'?
f, p = p.split(":", 1)
## do we search in words indexes?
# FIXME uncomment this
#if not f:
# return browse_in_bibwords(req, p, f)
coll_hitset = intbitset()
for coll_name in colls:
coll_hitset |= get_collection_reclist(coll_name)
index_id = get_index_id_from_field(f)
if index_id != 0:
browsed_phrases_in_colls = get_nearest_terms_in_idxphrase_with_collection(p, index_id, rg/2, rg/2, coll_hitset)
else:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
while not browsed_phrases:
# try again and again with shorter and shorter pattern:
try:
p = p[:-1]
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
except:
register_exception(req=req, alert_admin=True)
# probably there are no hits at all:
#req.write(_("No values found."))
return []
## try to check hits in these particular collection selection:
browsed_phrases_in_colls = []
if 0:
for phrase in browsed_phrases:
phrase_hitset = intbitset()
phrase_hitsets = search_pattern("", phrase, f, 'e')
for coll in colls:
phrase_hitset.union_update(phrase_hitsets[coll])
if len(phrase_hitset) > 0:
# okay, this phrase has some hits in colls, so add it:
browsed_phrases_in_colls.append([phrase, len(phrase_hitset)])
## were there hits in collections?
if browsed_phrases_in_colls == []:
if browsed_phrases != []:
#write_warning(req, """<p>No match close to <em>%s</em> found in given collections.
#Please try different term.<p>Displaying matches in any collection...""" % p_orig)
## try to get nbhits for these phrases in any collection:
for phrase in browsed_phrases:
nbhits = get_nbhits_in_bibxxx(phrase, f, coll_hitset)
if nbhits > 0:
browsed_phrases_in_colls.append([phrase, nbhits])
return browsed_phrases_in_colls
def browse_pattern(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Displays either biliographic phrases or words indexes."""
# load the right message language
_ = gettext_set_language(ln)
browsed_phrases_in_colls = browse_pattern_phrases(req, colls, p, f, rg, ln)
if len(browsed_phrases_in_colls) == 0:
req.write(_("No values found."))
return
## display results now:
out = websearch_templates.tmpl_browse_pattern(
f=f,
fn=get_field_i18nname(get_field_name(f) or f, ln, False),
ln=ln,
browsed_phrases_in_colls=browsed_phrases_in_colls,
colls=colls,
rg=rg,
)
req.write(out)
return
def browse_in_bibwords(req, p, f, ln=CFG_SITE_LANG):
"""Browse inside words indexes."""
if not p:
return
_ = gettext_set_language(ln)
urlargd = {}
urlargd.update(req.argd)
urlargd['action'] = 'search'
nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0)
req.write(websearch_templates.tmpl_search_in_bibwords(
p = p,
f = f,
ln = ln,
nearest_box = nearest_box
))
return
def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' within field 'f' according to
matching type 'm'. Return hitset of recIDs.
The function uses multi-stage searching algorithm in case of no
exact match found. See the Search Internals document for
detailed description.
The 'ap' argument governs whether an alternative patterns are to
be used in case there is no direct hit for (p,f,m). For
example, whether to replace non-alphanumeric characters by
spaces if it would give some hits. See the Search Internals
document for detailed description. (ap=0 forbits the
alternative pattern usage, ap=1 permits it.)
'ap' is also internally used for allowing hidden tag search
(for requests coming from webcoll, for example). In this
case ap=-9
The 'of' argument governs whether to print or not some
information to the user in case of no match found. (Usually it
prints the information in case of HTML formats, otherwise it's
silent).
The 'verbose' argument controls the level of debugging information
to be printed (0=least, 9=most).
All the parameters are assumed to have been previously washed.
This function is suitable as a mid-level API.
"""
_ = gettext_set_language(ln)
hitset_empty = intbitset()
# sanity check:
if not p:
hitset_full = intbitset(trailing_bits=1)
hitset_full.discard(0)
# no pattern, so return all universe
return hitset_full
# search stage 1: break up arguments into basic search units:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units = create_basic_search_units(req, p, f, m, of)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 1: basic search units are: %s" % cgi.escape(repr(basic_search_units)), req=req)
write_warning("Search stage 1: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 2: do search for each search unit and verify hit presence:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units_hitsets = []
#prepare hiddenfield-related..
myhiddens = cfg['CFG_BIBFORMAT_HIDDEN_TAGS']
can_see_hidden = False
if req:
user_info = collect_user_info(req)
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if not req and ap == -9: # special request, coming from webcoll
can_see_hidden = True
if can_see_hidden:
myhiddens = []
if CFG_INSPIRE_SITE and of.startswith('h'):
# fulltext/caption search warnings for INSPIRE:
fields_to_be_searched = [f for dummy_o, p, f, m in basic_search_units]
if 'fulltext' in fields_to_be_searched:
write_warning(_("Full-text search is currently available for all arXiv papers, many theses, a few report series and some journal articles"), req=req)
elif 'caption' in fields_to_be_searched:
write_warning(_("Warning: figure caption search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") %
{'x_range_from_year': '2008',
'x_range_to_year': '2012'}, req=req)
for idx_unit in xrange(len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_f and len(bsu_f) < 2:
if of.startswith("h"):
write_warning(_("There is no index %(x_name)s. Searching for %(x_text)s in all fields.", x_name=bsu_f, x_text=bsu_p), req=req)
bsu_f = ''
bsu_m = 'w'
if of.startswith("h") and verbose:
write_warning(_('Instead searching %(x_name)s.', x_name=str([bsu_o, bsu_p, bsu_f, bsu_m])), req=req)
try:
basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m, wl)
except InvenioWebSearchWildcardLimitError as excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term too generic, displaying only partial results..."), req=req)
# FIXME: print warning if we use native full-text indexing
if bsu_f == 'fulltext' and bsu_m != 'w' and of.startswith('h') and not CFG_SOLR_URL:
write_warning(_("No phrase index available for fulltext yet, looking for word combination..."), req=req)
#check that the user is allowed to search with this tag
#if he/she tries it
if bsu_f and len(bsu_f) > 1 and bsu_f[0].isdigit() and bsu_f[1].isdigit():
for htag in myhiddens:
ltag = len(htag)
samelenfield = bsu_f[0:ltag]
if samelenfield == htag: #user searches by a hidden tag
#we won't show you anything..
basic_search_unit_hitset = intbitset()
if verbose >= 9 and of.startswith("h"):
write_warning("Pattern %s hitlist omitted since \
it queries in a hidden tag %s" %
(cgi.escape(repr(bsu_p)), repr(myhiddens)), req=req)
display_nearest_terms_box = False #..and stop spying, too.
if verbose >= 9 and of.startswith("h"):
write_warning("Search stage 1: pattern %s gave hitlist %s" % (cgi.escape(bsu_p), basic_search_unit_hitset), req=req)
if len(basic_search_unit_hitset) > 0 or \
ap<1 or \
bsu_o in ("|", "-") or \
((idx_unit+1)<len(basic_search_units) and basic_search_units[idx_unit+1][0]=="|"):
# stage 2-1: this basic search unit is retained, since
# either the hitset is non-empty, or the approximate
# pattern treatment is switched off, or the search unit
# was joined by an OR operator to preceding/following
# units so we do not require that it exists
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-2: no hits found for this search unit, try to replace non-alphanumeric chars inside pattern:
if re.search(r'[^a-zA-Z0-9\s\:]', bsu_p) and bsu_f != 'refersto' and bsu_f != 'citedby':
if bsu_p.startswith('"') and bsu_p.endswith('"'): # is it ACC query?
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', "*", bsu_p)
else: # it is WRD query
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', " ", bsu_p)
if verbose and of.startswith('h') and req:
write_warning("Trying (%s,%s,%s)" % (cgi.escape(bsu_pn), cgi.escape(bsu_f), cgi.escape(bsu_m)), req=req)
basic_search_unit_hitset = search_pattern(req=None, p=bsu_pn, f=bsu_f, m=bsu_m, of="id", ln=ln, wl=wl)
if len(basic_search_unit_hitset) > 0:
# we retain the new unit instead
if of.startswith('h'):
write_warning(_("No exact match found for %(x_query1)s, using %(x_query2)s instead...") %
{'x_query1': "<em>" + cgi.escape(bsu_p) + "</em>",
'x_query2': "<em>" + cgi.escape(bsu_pn) + "</em>"}, req=req)
basic_search_units[idx_unit][1] = bsu_pn
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
if verbose and of.startswith("h"):
t2 = os.times()[4]
for idx_unit in range(0, len(basic_search_units)):
write_warning("Search stage 2: basic search unit %s gave %d hits." %
(basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit])), req=req)
write_warning("Search stage 2: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 3: apply boolean query for each search unit:
if verbose and of.startswith("h"):
t1 = os.times()[4]
# let the initial set be the complete universe:
hitset_in_any_collection = intbitset(trailing_bits=1)
hitset_in_any_collection.discard(0)
for idx_unit in xrange(len(basic_search_units)):
this_unit_operation = basic_search_units[idx_unit][0]
this_unit_hitset = basic_search_units_hitsets[idx_unit]
if this_unit_operation == '+':
hitset_in_any_collection.intersection_update(this_unit_hitset)
elif this_unit_operation == '-':
hitset_in_any_collection.difference_update(this_unit_hitset)
elif this_unit_operation == '|':
hitset_in_any_collection.union_update(this_unit_hitset)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(this_unit_operation), "Error", req=req)
if len(hitset_in_any_collection) == 0:
# no hits found, propose alternative boolean query:
if of.startswith('h') and display_nearest_terms_box:
nearestterms = []
for idx_unit in range(0, len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_p.startswith("%") and bsu_p.endswith("%"):
bsu_p = "'" + bsu_p[1:-1] + "'"
bsu_nbhits = len(basic_search_units_hitsets[idx_unit])
# create a similar query, but with the basic search unit only
argd = {}
argd.update(req.argd)
argd['p'] = bsu_p
argd['f'] = bsu_f
nearestterms.append((bsu_p, bsu_nbhits, argd))
text = websearch_templates.tmpl_search_no_boolean_hits(
ln=ln, nearestterms=nearestterms)
write_warning(text, req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection), req=req)
write_warning("Search stage 3: execution took %.2f seconds." % (t2 - t1), req=req)
return hitset_in_any_collection
def search_pattern_parenthesised(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' containing parenthesis within field 'f' according to
matching type 'm'. Return hitset of recIDs.
For more details on the parameters see 'search_pattern'
"""
_ = gettext_set_language(ln)
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
spires_syntax_query = False
# if the pattern uses SPIRES search syntax, convert it to Invenio syntax
if spires_syntax_converter.is_applicable(p):
spires_syntax_query = True
p = spires_syntax_converter.convert_query(p)
# sanity check: do not call parenthesised parser for search terms
# like U(1) but still call it for searches like ('U(1)' | 'U(2)'):
if not re_pattern_parens.search(re_pattern_parens_quotes.sub('_', p)):
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# Try searching with parentheses
try:
parser = SearchQueryParenthesisedParser()
# get a hitset with all recids
result_hitset = intbitset(trailing_bits=1)
# parse the query. The result is list of [op1, expr1, op2, expr2, ..., opN, exprN]
parsing_result = parser.parse_query(p)
if verbose and of.startswith("h"):
write_warning("Search stage 1: search_pattern_parenthesised() searched %s." % repr(p), req=req)
write_warning("Search stage 1: search_pattern_parenthesised() returned %s." % repr(parsing_result), req=req)
# go through every pattern
# calculate hitset for it
# combine pattern's hitset with the result using the corresponding operator
for index in xrange(0, len(parsing_result)-1, 2):
current_operator = parsing_result[index]
current_pattern = parsing_result[index+1]
if CFG_INSPIRE_SITE and spires_syntax_query:
# setting ap=0 to turn off approximate matching for 0 results.
# Doesn't work well in combinations.
# FIXME: The right fix involves collecting statuses for each
# hitset, then showing a nearest terms box exactly once,
# outside this loop.
ap = 0
display_nearest_terms_box = False
# obtain a hitset for the current pattern
current_hitset = search_pattern(req, current_pattern, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# combine the current hitset with resulting hitset using the current operator
if current_operator == '+':
result_hitset = result_hitset & current_hitset
elif current_operator == '-':
result_hitset = result_hitset - current_hitset
elif current_operator == '|':
result_hitset = result_hitset | current_hitset
else:
assert False, "Unknown operator in search_pattern_parenthesised()"
return result_hitset
# If searching with parenteses fails, perform search ignoring parentheses
except SyntaxError:
write_warning(_("Search syntax misunderstood. Ignoring all parentheses in the query. If this doesn't help, please check your search and try again."), req=req)
# remove the parentheses in the query. Current implementation removes all the parentheses,
# but it could be improved to romove only these that are not inside quotes
p = p.replace('(', ' ')
p = p.replace(')', ' ')
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
def search_unit(p, f=None, m=None, wl=0, ignore_synonyms=None):
"""Search for basic search unit defined by pattern 'p' and field
'f' and matching type 'm'. Return hitset of recIDs.
All the parameters are assumed to have been previously washed.
'p' is assumed to be already a ``basic search unit'' so that it
is searched as such and is not broken up in any way. Only
wildcard and span queries are being detected inside 'p'.
If CFG_WEBSEARCH_SYNONYM_KBRS is set and we are searching in
one of the indexes that has defined runtime synonym knowledge
base, then look up there and automatically enrich search
results with results for synonyms.
In case the wildcard limit (wl) is greater than 0 and this limit
is reached an InvenioWebSearchWildcardLimitError will be raised.
In case you want to call this function with no limit for the
wildcard queries, wl should be 0.
Parameter 'ignore_synonyms' is a list of terms for which we
should not try to further find a synonym.
This function is suitable as a low-level API.
"""
## create empty output results set:
hitset = intbitset()
if not p: # sanity checking
return hitset
tokenizer = get_field_tokenizer_type(f)
hitset_cjk = intbitset()
if tokenizer == "BibIndexCJKTokenizer":
if is_there_any_CJK_character_in_text(p):
cjk_tok = BibIndexCJKTokenizer()
chars = cjk_tok.tokenize_for_words(p)
for char in chars:
hitset_cjk |= search_unit_in_bibwords(char, f, wl)
## eventually look up runtime synonyms:
hitset_synonyms = intbitset()
if CFG_WEBSEARCH_SYNONYM_KBRS.has_key(f or 'anyfield'):
if ignore_synonyms is None:
ignore_synonyms = []
ignore_synonyms.append(p)
for p_synonym in get_synonym_terms(p,
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][0],
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][1]):
if p_synonym != p and \
not p_synonym in ignore_synonyms:
hitset_synonyms |= search_unit(p_synonym, f, m, wl,
ignore_synonyms)
## look up hits:
if f == 'fulltext' and get_idx_indexer('fulltext') == 'SOLR' and CFG_SOLR_URL:
# redirect to Solr
try:
return search_unit_in_solr(p, f, m)
except:
# There were troubles with getting full-text search
# results from Solr. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
elif f == 'fulltext' and get_idx_indexer('fulltext') == 'XAPIAN' and CFG_XAPIAN_ENABLED:
# redirect to Xapian
try:
return search_unit_in_xapian(p, f, m)
except:
# There were troubles with getting full-text search
# results from Xapian. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
if f == 'datecreated':
hitset = search_unit_in_bibrec(p, p, 'c')
elif f == 'datemodified':
hitset = search_unit_in_bibrec(p, p, 'm')
elif f == 'refersto':
# we are doing search by the citation count
hitset = search_unit_refersto(p)
elif f == 'referstoexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_refersto_excluding_selfcites(p)
elif f == 'cataloguer':
# we are doing search by the cataloguer nickname
hitset = search_unit_in_record_history(p)
elif f == 'rawref':
from invenio.legacy.refextract.api import search_from_reference
field, pattern = search_from_reference(p)
return search_unit(pattern, field)
elif f == 'citedby':
# we are doing search by the citation count
hitset = search_unit_citedby(p)
elif f == 'collection':
# we are doing search by the collection name or MARC field
hitset = search_unit_collection(p, m, wl=wl)
elif f == 'tag':
module_found = False
try:
from invenio.modules.tags.search_units import search_unit_in_tags
module_found = True
except:
# WebTag module is disabled, so ignore 'tag' selector
pass
if module_found:
return search_unit_in_tags(p)
elif f == 'citedbyexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_citedby_excluding_selfcites(p)
elif m == 'a' or m == 'r' or f == 'subject':
# we are doing either phrase search or regexp search
if f == 'fulltext':
# FIXME: workaround for not having phrase index yet
return search_pattern(None, p, f, 'w')
index_id = get_index_id_from_field(f)
if index_id != 0:
if m == 'a' and index_id in get_idxpair_field_ids():
#for exact match on the admin configured fields we are searching in the pair tables
hitset = search_unit_in_idxpairs(p, f, m, wl)
else:
hitset = search_unit_in_idxphrases(p, f, m, wl)
else:
hitset = search_unit_in_bibxxx(p, f, m, wl)
# if not hitset and m == 'a' and (p[0] != '%' and p[-1] != '%'):
# #if we have no results by doing exact matching, do partial matching
# #for removing the distinction between simple and double quotes
# hitset = search_unit_in_bibxxx('%' + p + '%', f, m, wl)
elif p.startswith("cited:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:])
elif p.startswith("citedexcludingselfcites:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:], exclude_selfcites=True)
else:
# we are doing bibwords search by default
hitset = search_unit_in_bibwords(p, f, wl=wl)
## merge synonym results and return total:
hitset |= hitset_synonyms
hitset |= hitset_cjk
return hitset
def get_idxpair_field_ids():
"""Returns the list of ids for the fields that idxPAIRS should be used on"""
index_dict = dict(run_sql("SELECT name, id FROM idxINDEX"))
return [index_dict[field] for field in index_dict if field in cfg['CFG_WEBSEARCH_IDXPAIRS_FIELDS']]
def search_unit_in_bibwords(word, f, decompress=zlib.decompress, wl=0):
"""Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs."""
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
# if no field is specified, search in the global index.
f = f or 'anyfield'
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
stemming_language = get_index_stemming_language(index_id)
else:
return intbitset() # word index f does not exist
# wash 'word' argument and run query:
if f.endswith('count') and word.endswith('+'):
# field count query of the form N+ so transform N+ to N->99999:
word = word[:-1] + '->99999'
word = word.replace('*', '%') # we now use '*' as the truncation character
words = word.split("->", 1) # check for span query
if len(words) == 2:
word0 = re_word.sub('', words[0])
word1 = re_word.sub('', words[1])
if stemming_language:
word0 = lower_index_term(word0)
word1 = lower_index_term(word1)
# We remove trailing truncation character before stemming
if word0.endswith('%'):
word0 = stem(word0[:-1], stemming_language) + '%'
else:
word0 = stem(word0, stemming_language)
if word1.endswith('%'):
word1 = stem(word1[:-1], stemming_language) + '%'
else:
word1 = stem(word1, stemming_language)
word0_washed = wash_index_term(word0)
word1_washed = wash_index_term(word1)
if f.endswith('count'):
# field count query; convert to integers in order
# to have numerical behaviour for 'BETWEEN n1 AND n2' query
try:
word0_washed = int(word0_washed)
word1_washed = int(word1_washed)
except ValueError:
pass
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s" % bibwordsX,
(word0_washed, word1_washed), wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
if f == 'journal':
pass # FIXME: quick hack for the journal index
else:
word = re_word.sub('', word)
if stemming_language:
word = lower_index_term(word)
# We remove trailing truncation character before stemming
if word.endswith('%'):
word = stem(word[:-1], stemming_language) + '%'
else:
word = stem(word, stemming_language)
if word.find('%') >= 0: # do we have wildcard in the word?
if f == 'journal':
# FIXME: quick hack for the journal index
# FIXME: we can run a sanity check here for all indexes
res = ()
else:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term LIKE %%s" % bibwordsX,
(wash_index_term(word),), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX,
(wash_index_term(word),))
# fill the result set:
for word, hitlist in res:
hitset_bibwrd = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibwrd)
else:
hitset = hitset_bibwrd
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_idxpairs(p, f, search_type, wl=0):
"""Searches for pair 'p' inside idxPAIR table for field 'f' and
returns hitset of recIDs found."""
limit_reached = 0 # flag for knowing if the query limit has been reached
do_exact_search = True # flag to know when it makes sense to try to do exact matching
result_set = intbitset()
#determine the idxPAIR table to read from
index_id = get_index_id_from_field(f)
if not index_id:
return intbitset()
stemming_language = get_index_stemming_language(index_id)
pairs_tokenizer = BibIndexDefaultTokenizer(stemming_language)
idxpair_table_washed = wash_table_column_name("idxPAIR%02dF" % index_id)
if p.startswith("%") and p.endswith("%"):
p = p[1:-1]
original_pattern = p
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
queries_releated_vars = [] # contains tuples of (query_addons, query_params, use_query_limit)
#is it a span query?
ps = p.split("->", 1)
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
#so we are dealing with a span query
pairs_left = pairs_tokenizer.tokenize_for_pairs(ps[0])
pairs_right = pairs_tokenizer.tokenize_for_pairs(ps[1])
if not pairs_left or not pairs_right:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
elif len(pairs_left) != len(pairs_right):
# it is kind of hard to know what the user actually wanted
# we have to do: foo bar baz -> qux xyz, so let's swith to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
elif len(pairs_left) > 1 and \
len(pairs_right) > 1 and \
pairs_left[:-1] != pairs_right[:-1]:
# again we have something like: foo bar baz -> abc xyz qux
# so we'd better switch to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
else:
# finally, we can treat the search using idxPairs
# at this step we have either: foo bar -> abc xyz
# or foo bar abc -> foo bar xyz
queries_releated_vars = [("BETWEEN %s AND %s", (pairs_left[-1], pairs_right[-1]), True)]
for pair in pairs_left[:-1]:# which should be equal with pairs_right[:-1]
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False # no exact search for span queries
elif p.find('%') > -1:
#tokenizing p will remove the '%', so we have to make sure it stays
replacement = 'xxxxxxxxxx' #hopefuly this will not clash with anything in the future
p = string.replace(p, '%', replacement)
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
if string.find(pair, replacement) > -1:
pair = string.replace(pair, replacement, '%') #we replace back the % sign
queries_releated_vars.append(("LIKE %s", (pair, ), True))
else:
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False
else:
#normal query
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
queries_releated_vars.append(("= %s", (pair, ), False))
first_results = 1 # flag to know if it's the first set of results or not
for query_var in queries_releated_vars:
query_addons = query_var[0]
query_params = query_var[1]
use_query_limit = query_var[2]
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params, wildcard_limit=wl) #kwalitee:disable=sql
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params) #kwalitee:disable=sql
if not res:
return intbitset()
for pair, hitlist in res:
hitset_idxpairs = intbitset(hitlist)
if first_results:
result_set = hitset_idxpairs
first_results = 0
else:
result_set.intersection_update(hitset_idxpairs)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(result_set)
# check if we need to eliminate the false positives
if cfg['CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH'] and do_exact_search:
# we need to eliminate the false positives
idxphrase_table_washed = wash_table_column_name("idxPHRASE%02dR" % index_id)
not_exact_search = intbitset()
for recid in result_set:
res = run_sql("SELECT termlist FROM %s WHERE id_bibrec %s" %(idxphrase_table_washed, '=%s'), (recid, )) #kwalitee:disable=sql
if res:
termlist = deserialize_via_marshal(res[0][0])
if not [term for term in termlist if term.lower().find(p.lower()) > -1]:
not_exact_search.add(recid)
else:
not_exact_search.add(recid)
# remove the recs that are false positives from the final result
result_set.difference_update(not_exact_search)
return result_set
def search_unit_in_idxphrases(p, f, search_type, wl=0):
"""Searches for phrase 'p' inside idxPHRASE*F table for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
# deduce in which idxPHRASE table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return intbitset() # phrase index f does not exist
# detect query type (exact phrase, partial phrase, regexp):
if search_type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = p.replace('*', '%') # we now use '*' as the truncation character
ps = p.split("->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if p.find('%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# special washing for fuzzy author index:
if f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor'):
query_params_washed = ()
for query_param in query_params:
query_params_washed += (wash_author_name(query_param),)
query_params = query_params_washed
# perform search:
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons),
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons), query_params)
# fill the result set:
for dummy_word, hitlist in res:
hitset_bibphrase = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibphrase)
else:
hitset = hitset_bibphrase
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_bibxxx(p, f, type, wl=0):
"""Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'journal' or f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
query_addons = "" # will hold additional SQL code for the query
query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument)
# wash arguments:
f = string.replace(f, '*', '%') # replace truncation char '*' in field definition
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if len(f) >= 2 and str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
if not tl:
# f index does not exist, nevermind
pass
# okay, start search:
l = [] # will hold list of recID that matched
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# construct and run query:
if t == "001":
if query_addons.find('BETWEEN') > -1 or query_addons.find('=') > -1:
# verify that the params are integers (to avoid returning record 123 when searching for 123foo)
try:
query_params = tuple(int(param) for param in query_params)
except ValueError:
return intbitset()
if use_query_limit:
try:
res = run_sql_with_limit("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params)
else:
query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \
(bx, bibx, query_addons)
if len(t) != 6 or t[-1:]=='%':
# wildcard query, or only the beginning of field 't'
# is defined, so add wildcard character:
query += " AND bx.tag LIKE %s"
query_params_and_tag = query_params + (t + '%',)
else:
# exact query for 't':
query += " AND bx.tag=%s"
query_params_and_tag = query_params + (t,)
if use_query_limit:
try:
res = run_sql_with_limit(query, query_params_and_tag, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql(query, query_params_and_tag)
# fill the result set:
for id_bibrec in res:
if id_bibrec[0]:
l.append(id_bibrec[0])
# check no of hits found:
nb_hits = len(l)
# okay, return result set:
hitset = intbitset(l)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
return hitset
def search_unit_in_solr(p, f=None, m=None):
"""
Query a Solr index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return solr_get_bitset(f, p)
def search_unit_in_xapian(p, f=None, m=None):
"""
Query a Xapian index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return xapian_get_bitset(f, p)
def search_unit_in_bibrec(datetext1, datetext2, search_type='c'):
"""
Return hitset of recIDs found that were either created or modified
(according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive.
Does not pay attention to pattern, collection, anything. Useful
to intersect later on with the 'real' query.
"""
hitset = intbitset()
if search_type and search_type.startswith("m"):
search_type = "modification_date"
else:
search_type = "creation_date" # by default we are searching for creation dates
parts = datetext1.split('->')
if len(parts) > 1 and datetext1 == datetext2:
datetext1 = parts[0]
datetext2 = parts[1]
if datetext1 == datetext2:
res = run_sql("SELECT id FROM bibrec WHERE %s LIKE %%s" % (search_type,),
(datetext1 + '%',))
else:
res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (search_type, search_type),
(datetext1, datetext2))
for row in res:
hitset += row[0]
return hitset
def search_unit_by_times_cited(p, exclude_selfcites=False):
"""
Return histset of recIDs found that are cited P times.
Usually P looks like '10->23'.
"""
numstr = '"'+p+'"'
#this is sort of stupid but since we may need to
#get the records that do _not_ have cites, we have to
#know the ids of all records, too
#but this is needed only if bsu_p is 0 or 0 or 0->0
allrecs = []
if p == 0 or p == "0" or \
p.startswith("0->") or p.endswith("->0"):
allrecs = intbitset(run_sql("SELECT id FROM bibrec"))
return get_records_with_num_cites(numstr, allrecs,
exclude_selfcites=exclude_selfcites)
def search_unit_refersto(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
return get_refersto_hitset(ahitset)
else:
return intbitset([])
def search_unit_refersto_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citers = intbitset()
citations = get_cited_by_list(ahitset)
selfcitations = get_self_cited_by_list(ahitset)
for cites, selfcites in zip(citations, selfcitations):
# cites is in the form [(citee, citers), ...]
citers += cites[1] - selfcites[1]
return citers
else:
return intbitset([])
def search_unit_in_record_history(query):
"""
Return hitset of recIDs that were modified by the given cataloguer
"""
if query:
try:
cataloguer_name, modification_date = query.split(":")
except ValueError:
cataloguer_name = query
modification_date = ""
if modification_date:
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
modification_date = spires_syntax_converter.convert_date(modification_date)
parts = modification_date.split('->', 1)
if len(parts) > 1:
start_date, end_date = parts
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date>=%s AND job_date<=%s",
(cataloguer_name, start_date, end_date))
else:
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date LIKE %s",
(cataloguer_name, modification_date + '%',))
return intbitset(res)
else:
sql = "SELECT id_bibrec FROM hstRECORD WHERE job_person=%s"
res = intbitset(run_sql(sql, (cataloguer_name,)))
return res
else:
return intbitset([])
def search_unit_citedby(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records cited by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_citedby_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def search_unit_collection(query, m, wl=None):
"""
Search for records satisfying the query (e.g. collection:"BOOK" or
collection:"Books") and return list of records in the collection.
"""
if len(query):
ahitset = get_collection_reclist(query)
if not ahitset:
return search_unit_in_bibwords(query, 'collection', m, wl=wl)
return ahitset
else:
return intbitset([])
def search_unit_citedby_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citees = intbitset()
references = get_refers_to_list(ahitset)
selfreferences = get_self_refers_to_list(ahitset)
for refs, selfrefs in zip(references, selfreferences):
# refs is in the form [(citer, citees), ...]
citees += refs[1] - selfrefs[1]
return citees
else:
return intbitset([])
def get_records_that_can_be_displayed(user_info,
hitset_in_any_collection,
current_coll=CFG_SITE_NAME,
colls=None,
permitted_restricted_collections=None):
"""
Return records that can be displayed.
"""
records_that_can_be_displayed = intbitset()
if colls is None:
colls = [current_coll]
# let's get the restricted collections the user has rights to view
if permitted_restricted_collections is None:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
if policy == 'ANY':# the user needs to have access to at least one collection that restricts the records
#we need this to be able to remove records that are both in a public and restricted collection
permitted_recids = intbitset()
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection in permitted_restricted_collections:
permitted_recids |= get_collection_reclist(collection)
else:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - (notpermitted_recids - permitted_recids)
else:# the user needs to have access to all collections that restrict a records
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection not in permitted_restricted_collections:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - notpermitted_recids
if records_that_can_be_displayed.is_infinite():
# We should not return infinite results for user.
records_that_can_be_displayed = intbitset()
for coll in colls_to_be_displayed:
records_that_can_be_displayed |= get_collection_reclist(coll)
return records_that_can_be_displayed
def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, of="hb", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True):
"""Return dict of hitsets given by intersection of hitset with the collection universes."""
_ = gettext_set_language(ln)
# search stage 4: intersect with the collection universe
if verbose and of.startswith("h"):
t1 = os.times()[4]
results = {} # all final results
results_nbhits = 0
# calculate the list of recids (restricted or not) that the user has rights to access and we should display (only those)
if not req or isinstance(req, cStringIO.OutputType): # called from CLI
user_info = {}
for coll in colls:
results[coll] = hitset_in_any_collection & get_collection_reclist(coll)
results_nbhits += len(results[coll])
records_that_can_be_displayed = hitset_in_any_collection
permitted_restricted_collections = []
else:
user_info = collect_user_info(req)
# let's get the restricted collections the user has rights to view
if user_info['guest'] == '1':
## For guest users that are actually authorized to some restricted
## collection (by virtue of the IP address in a FireRole rule)
## we explicitly build the list of permitted_restricted_collections
permitted_restricted_collections = get_permitted_restricted_collections(user_info)
else:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
# let's build the list of the both public and restricted
# child collections of the collection from which the user
# started his/her search. This list of children colls will be
# used in the warning proposing a search in that collections
try:
current_coll = req.argd['cc'] # current_coll: coll from which user started his/her search
except:
from flask import request
current_coll = request.args.get('cc', CFG_SITE_NAME) # current_coll: coll from which user started his/her search
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
records_that_can_be_displayed = get_records_that_can_be_displayed(
user_info,
hitset_in_any_collection,
current_coll,
colls,
permitted_restricted_collections)
for coll in colls_to_be_displayed:
results[coll] = results.get(coll, intbitset()) | (records_that_can_be_displayed & get_collection_reclist(coll))
results_nbhits += len(results[coll])
if results_nbhits == 0:
# no hits found, try to search in Home and restricted and/or hidden collections:
results = {}
results_in_Home = records_that_can_be_displayed & get_collection_reclist(CFG_SITE_NAME)
results_in_restricted_collections = intbitset()
results_in_hidden_collections = intbitset()
for coll in permitted_restricted_collections:
if not get_coll_ancestors(coll): # hidden collection
results_in_hidden_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
else:
results_in_restricted_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
# in this way, we do not count twice, records that are both in Home collection and in a restricted collection
total_results = len(results_in_Home.union(results_in_restricted_collections))
if total_results > 0:
# some hits found in Home and/or restricted collections, so propose this search:
if of.startswith("h") and display_nearest_terms_box:
url = websearch_templates.build_search_url(req.argd, cc=CFG_SITE_NAME, c=[])
len_colls_to_display = len(colls_to_be_displayed)
# trim the list of collections to first two, since it might get very large
write_warning(_("No match found in collection %(x_collection)s. Other collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %
{'x_collection': '<em>' +
string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed[:2]], ', ') +
(len_colls_to_display > 2 and ' et al' or '') + '</em>',
'x_url_open': '<a class="nearestterms" href="%s">' % (url),
'x_nb_hits': total_results,
'x_url_close': '</a>'}, req=req)
# display the hole list of collections in a comment
if len_colls_to_display > 2:
write_warning("<!--No match found in collection <em>%(x_collection)s</em>.-->" %
{'x_collection': string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed], ', ')},
req=req)
else:
# no hits found, either user is looking for a document and he/she has not rights
# or user is looking for a hidden document:
if of.startswith("h") and display_nearest_terms_box:
if len(results_in_hidden_collections) > 0:
write_warning(_("No public collection matched your query. "
"If you were looking for a hidden document, please type "
"the correct URL for this record."), req=req)
else:
write_warning(_("No public collection matched your query. "
"If you were looking for a non-public document, please choose "
"the desired restricted collection first."), req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits, req=req)
write_warning("Search stage 4: execution took %.2f seconds." % (t2 - t1), req=req)
return results
def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"):
"""Return intersection of search 'results' (a dict of hitsets
with collection as key) with the 'hitset', i.e. apply
'hitset' intersection to each collection within search
'results'.
If the final set is to be empty, and 'ap'
(approximate pattern) is true, and then print the `warningtext'
and return the original 'results' set unchanged. If 'ap' is
false, then return empty results set.
"""
if ap:
results_ap = copy.deepcopy(results)
else:
results_ap = {} # will return empty dict in case of no hits found
nb_total = 0
final_results = {}
for coll in results.keys():
final_results[coll] = results[coll].intersection(hitset)
nb_total += len(final_results[coll])
if nb_total == 0:
if of.startswith("h"):
write_warning(aptext, req=req)
final_results = results_ap
return final_results
def create_similarly_named_authors_link_box(author_name, ln=CFG_SITE_LANG):
"""Return a box similar to ``Not satisfied...'' one by proposing
author searches for similar names. Namely, take AUTHOR_NAME
and the first initial of the firstame (after comma) and look
into author index whether authors with e.g. middle names exist.
Useful mainly for CERN Library that sometimes contains name
forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the
same person. The box isn't proposed if no similarly named
authors are found to exist.
"""
# return nothing if not configured:
if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0:
return ""
# return empty box if there is no initial:
if re.match(r'[^ ,]+, [^ ]', author_name) is None:
return ""
# firstly find name comma initial:
author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name)
# secondly search for similar name forms:
similar_author_names = {}
for name in author_name_to_search, strip_accents(author_name_to_search):
for tag in get_field_tags("author"):
# deduce into which bibxxx table we will search:
digit1, digit2 = int(tag[0]), int(tag[1])
bx = "bib%d%dx" % (digit1, digit2)
if len(tag) != 6 or tag[-1:] == '%':
# only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx,
(name + "%", tag + "%"))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx,
(name + "%", tag))
for row in res:
similar_author_names[row[0]] = 1
# remove the original name and sort the list:
try:
del similar_author_names[author_name]
except KeyError:
pass
# thirdly print the box:
out = ""
if similar_author_names:
out_authors = similar_author_names.keys()
out_authors.sort()
tmp_authors = []
for out_author in out_authors:
nbhits = get_nbhits_in_bibxxx(out_author, "author")
if nbhits:
tmp_authors.append((out_author, nbhits))
out += websearch_templates.tmpl_similar_author_names(
authors=tmp_authors, ln=ln)
return out
def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=CFG_SITE_LANG, intro_text_p=True):
"""Return text box containing list of 'n' nearest terms above/below 'p'
for the field 'f' for matching type 't' (words/phrases) in
language 'ln'.
Propose new searches according to `urlargs' with the new words.
If `intro_text_p' is true, then display the introductory message,
otherwise print only the nearest terms in the box content.
"""
# load the right message language
_ = gettext_set_language(ln)
if not CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS:
return _("Your search did not match any records. Please try again.")
nearest_terms = []
if not p: # sanity check
p = "."
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
index_id = get_index_id_from_field(f)
if f == 'fulltext':
if CFG_SOLR_URL:
return _("No match found, please enter different search terms.")
else:
# FIXME: workaround for not having native phrase index yet
t = 'w'
# special indexes:
if f == 'refersto' or f == 'referstoexcludingselfcites':
return _("There are no records referring to %(x_rec)s.", x_rec=cgi.escape(p))
if f == 'cataloguer':
return _("There are no records modified by %(x_rec)s.", x_rec=cgi.escape(p))
if f == 'citedby' or f == 'citedbyexcludingselfcites':
return _("There are no records cited by %(x_rec)s.", x_rec=cgi.escape(p))
# look for nearest terms:
if t == 'w':
nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n)
if not nearest_terms:
return _("No word index is available for %(x_name)s.",
x_name=('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>'))
else:
nearest_terms = []
if index_id:
nearest_terms = get_nearest_terms_in_idxphrase(p, index_id, n, n)
if f == 'datecreated' or f == 'datemodified':
nearest_terms = get_nearest_terms_in_bibrec(p, f, n, n)
if not nearest_terms:
nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n)
if not nearest_terms:
return _("No phrase index is available for %(x_name)s.",
x_name=('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>'))
terminfo = []
for term in nearest_terms:
if t == 'w':
hits = get_nbhits_in_bibwords(term, f)
else:
if index_id:
hits = get_nbhits_in_idxphrases(term, f)
elif f == 'datecreated' or f == 'datemodified':
hits = get_nbhits_in_bibrec(term, f)
else:
hits = get_nbhits_in_bibxxx(term, f)
argd = {}
argd.update(urlargd)
# check which fields contained the requested parameter, and replace it.
for px, dummy_fx in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'):
if px in argd:
argd_px = argd[px]
if t == 'w':
# p was stripped of accents, to do the same:
argd_px = strip_accents(argd_px)
#argd[px] = string.replace(argd_px, p, term, 1)
#we need something similar, but case insensitive
pattern_index = string.find(argd_px.lower(), p.lower())
if pattern_index > -1:
argd[px] = argd_px[:pattern_index] + term + argd_px[pattern_index+len(p):]
break
#this is doing exactly the same as:
#argd[px] = re.sub('(?i)' + re.escape(p), term, argd_px, 1)
#but is ~4x faster (2us vs. 8.25us)
terminfo.append((term, hits, argd))
intro = ""
if intro_text_p: # add full leading introductory text
if f:
intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \
{'x_term': "<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>",
'x_index': "<em>" + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + "</em>"}
else:
intro = _("Search term %(x_name)s did not match any record. Nearest terms in any collection are:",
x_name=("<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>"))
return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo,
intro=intro)
def get_nearest_terms_in_bibwords(p, f, n_below, n_above):
"""Return list of +n -n nearest terms to word `p' in index for field `f'."""
nearest_words = [] # will hold the (sorted) list of nearest words to return
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return nearest_words
# firstly try to get `n' closest words above `p':
res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX,
(p, n_above))
for row in res:
nearest_words.append(row[0])
nearest_words.reverse()
# secondly insert given word `p':
nearest_words.append(p)
# finally try to get `n' closest words below `p':
res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX,
(p, n_below))
for row in res:
nearest_words.append(row[0])
return nearest_words
def get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
regardless of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
if CFG_INSPIRE_SITE and index_id in (3, 15): # FIXME: workaround due to new fuzzy index
return [p]
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above))
res_above = [x[0] for x in res_above]
res_above.reverse()
res_below = run_sql("SELECT term FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below))
res_below = [x[0] for x in res_below]
return res_above + res_below
def get_nearest_terms_in_idxphrase_with_collection(p, index_id, n_below, n_above, collection):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
considering the collection (intbitset).
Return list of [(phrase1, hitset), (phrase2, hitset), ... , (phrase_n, hitset)]."""
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term,hitlist FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above * 3))
res_above = [(term, intbitset(hitlist) & collection) for term, hitlist in res_above]
res_above = [(term, len(hitlist)) for term, hitlist in res_above if hitlist]
res_below = run_sql("SELECT term,hitlist FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below * 3))
res_below = [(term, intbitset(hitlist) & collection) for term, hitlist in res_below]
res_below = [(term, len(hitlist)) for term, hitlist in res_below if hitlist]
res_above.reverse()
return res_above[-n_above:] + res_below[:n_below]
def get_nearest_terms_in_bibxxx(p, f, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field f, regardless
of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nearest_terms_in_bibwords(p, f, n_below, n_above)
## We are going to take max(n_below, n_above) as the number of
## values to ferch from bibXXx. This is needed to work around
## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to
## use MySQL 4.1.x or our own idxPHRASE in the future.
index_id = get_index_id_from_field(f)
if index_id:
return get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above)
n_fetch = 2*max(n_below, n_above)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
## start browsing to fetch list of hits:
browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique)
# always add self to the results set:
browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
# firstly try to get `n' closest phrases above `p':
if len(t) != 6 or t[-1:] == '%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag LIKE %%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag=%%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# secondly try to get `n' closest phrases equal to or below `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag LIKE %%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag=%%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# select first n words only: (this is needed as we were searching
# in many different tables and so aren't sure we have more than n
# words right; this of course won't be needed when we shall have
# one ACC table only for given field):
phrases_out = browsed_phrases.keys()
phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)),
string.lower(strip_accents(y))))
# find position of self:
try:
idx_p = phrases_out.index(p)
except ValueError:
idx_p = len(phrases_out)/2
# return n_above and n_below:
return phrases_out[max(0, idx_p-n_above):idx_p+n_below]
def get_nearest_terms_in_bibrec(p, f, n_below, n_above):
"""Return list of nearest terms and counts from bibrec table.
p is usually a date, and f either datecreated or datemodified.
Note: below/above count is very approximative, not really respected.
"""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res_above = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s < %%s
ORDER BY %s DESC LIMIT %%s""" % (col, col, col),
(p, n_above))
res_below = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s > %%s
ORDER BY %s ASC LIMIT %%s""" % (col, col, col),
(p, n_below))
out = set([])
for row in res_above:
out.add(row[0])
for row in res_below:
out.add(row[0])
out_list = list(out)
out_list.sort()
return list(out_list)
def get_nbhits_in_bibrec(term, f):
"""Return number of hits in bibrec table. term is usually a date,
and f is either 'datecreated' or 'datemodified'."""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res = run_sql("SELECT COUNT(*) FROM bibrec WHERE %s LIKE %%s" % (col,),
(term + '%',))
return res[0][0]
def get_nbhits_in_bibwords(word, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_idxphrases(word, f):
"""Return number of hits for word 'word' inside phrase index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % idxphraseX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_bibxxx(p, f, in_hitset=None):
"""Return number of hits for word 'word' inside words index for field 'f'."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nbhits_in_bibwords(p, f)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
# start searching:
recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore)
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag LIKE %%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t + "%"))
else:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag=%%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t))
for row in res:
recIDs[row[0]] = 1
if in_hitset is None:
nbhits = len(recIDs)
else:
nbhits = len(intbitset(recIDs.keys()).intersection(in_hitset))
return nbhits
def get_mysql_recid_from_aleph_sysno(sysno):
"""Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER").
Returns None in case of failure."""
out = None
res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b
WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""",
(sysno,))
if res:
out = res[0][0]
return out
def guess_primary_collection_of_a_record(recID):
"""Return primary collection name a record recid belongs to, by
testing 980 identifier.
May lead to bad guesses when a collection is defined dynamically
via dbquery.
In that case, return 'CFG_SITE_NAME'."""
out = CFG_SITE_NAME
dbcollids = get_fieldvalues(recID, "980__a")
for dbcollid in dbcollids:
variants = ("collection:" + dbcollid,
'collection:"' + dbcollid + '"',
"980__a:" + dbcollid,
'980__a:"' + dbcollid + '"',
'980:' + dbcollid ,
'980:"' + dbcollid + '"')
res = run_sql("SELECT name FROM collection WHERE dbquery IN (%s,%s,%s,%s,%s,%s)", variants)
if res:
out = res[0][0]
break
if CFG_CERN_SITE:
recID = int(recID)
# dirty hack for ATLAS collections at CERN:
if out in ('ATLAS Communications', 'ATLAS Internal Notes'):
for alternative_collection in ('ATLAS Communications Physics',
'ATLAS Communications General',
'ATLAS Internal Notes Physics',
'ATLAS Internal Notes General',):
if recID in get_collection_reclist(alternative_collection):
return alternative_collection
# dirty hack for FP
FP_collections = {'DO': ['Current Price Enquiries', 'Archived Price Enquiries'],
'IT': ['Current Invitation for Tenders', 'Archived Invitation for Tenders'],
'MS': ['Current Market Surveys', 'Archived Market Surveys']}
fp_coll_ids = [coll for coll in dbcollids if coll in FP_collections]
for coll in fp_coll_ids:
for coll_name in FP_collections[coll]:
if recID in get_collection_reclist(coll_name):
return coll_name
return out
_re_collection_url = re.compile('/collection/(.+)')
def guess_collection_of_a_record(recID, referer=None, recreate_cache_if_needed=True):
"""Return collection name a record recid belongs to, by first testing
the referer URL if provided and otherwise returning the
primary collection."""
if referer:
dummy, hostname, path, dummy, query, dummy = urlparse.urlparse(referer)
#requests can come from different invenio installations, with different collections
if CFG_SITE_URL.find(hostname) < 0:
return guess_primary_collection_of_a_record(recID)
g = _re_collection_url.match(path)
if g:
name = urllib.unquote_plus(g.group(1))
#check if this collection actually exist (also normalize the name if case-insensitive)
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name):
return name
elif path.startswith('/search'):
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
query = cgi.parse_qs(query)
for name in query.get('cc', []) + query.get('c', []):
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return name
return guess_primary_collection_of_a_record(recID)
def is_record_in_any_collection(recID, recreate_cache_if_needed=True):
"""Return True if the record belongs to at least one collection. This is a
good, although not perfect, indicator to guess if webcoll has already run
after this record has been entered into the system.
"""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return True
return False
def get_all_collections_of_a_record(recID, recreate_cache_if_needed=True):
"""Return all the collection names a record belongs to.
Note this function is O(n_collections)."""
ret = []
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
ret.append(name)
return ret
def get_tag_name(tag_value, prolog="", epilog=""):
"""Return tag name from the known tag value, by looking up the 'tag' table.
Return empty string in case of failure.
Example: input='100__%', output=first author'."""
out = ""
res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,))
if res:
out = prolog + res[0][0] + epilog
return out
def get_fieldcodes():
"""Returns a list of field codes that may have been passed as 'search options' in URL.
Example: output=['subject','division']."""
out = []
res = run_sql("SELECT DISTINCT(code) FROM field")
for row in res:
out.append(row[0])
return out
def get_field_name(code):
"""Return the corresponding field_name given the field code.
e.g. reportnumber -> report number."""
res = run_sql("SELECT name FROM field WHERE code=%s", (code, ))
if res:
return res[0][0]
else:
return ""
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]), "\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
def get_merged_recid(recID):
""" Return the record ID of the record with
which the given record has been merged.
@param recID: deleted record recID
@type recID: int
@return: merged record recID
@rtype: int or None
"""
merged_recid = None
for val in get_fieldvalues(recID, "970__d"):
try:
merged_recid = int(val)
break
except ValueError:
pass
return merged_recid
def record_empty(recID):
"""
Is this record empty, e.g. has only 001, waiting for integration?
@param recID: the record identifier.
@type recID: int
@return: 1 if the record is empty, 0 otherwise.
@rtype: int
"""
return bibrecord.record_empty(get_record(recID))
def record_public_p(recID, recreate_cache_if_needed=True):
"""Return 1 if the record is public, i.e. if it can be found in the Home collection.
Return 0 otherwise.
"""
return recID in get_collection_reclist(CFG_SITE_NAME, recreate_cache_if_needed=recreate_cache_if_needed)
def get_creation_date(recID, fmt="%Y-%m-%d"):
"Returns the creation date of the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_modification_date(recID, fmt="%Y-%m-%d"):
"Returns the date of last modification for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_hosted_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_results_overview(colls, results_final_nb_total, results_final_nb, cpu_time, ln=CFG_SITE_LANG, ec=[], hosted_colls_potential_results_p=False, em=""):
"""Prints results overview box with links to particular collections below."""
if em != "" and EM_REPOSITORY["overview"] not in em:
return ""
new_colls = []
for coll in colls:
new_colls.append({
'id': get_colID(coll),
'code': coll,
'name': get_coll_i18nname(coll, ln, False),
})
return websearch_templates.tmpl_print_results_overview(
ln = ln,
results_final_nb_total = results_final_nb_total,
results_final_nb = results_final_nb,
cpu_time = cpu_time,
colls = new_colls,
ec = ec,
hosted_colls_potential_results_p = hosted_colls_potential_results_p,
)
def print_hosted_results(url_and_engine, ln=CFG_SITE_LANG, of=None, req=None, no_records_found=False, search_timed_out=False, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS, em = ""):
"""Prints the full results of a hosted collection"""
if of.startswith("h"):
if no_records_found:
return "<br />No results found."
if search_timed_out:
return "<br />The search engine did not respond in time."
return websearch_templates.tmpl_print_hosted_results(
url_and_engine=url_and_engine,
ln=ln,
of=of,
req=req,
limit=limit,
display_body = em == "" or EM_REPOSITORY["body"] in em,
display_add_to_basket = em == "" or EM_REPOSITORY["basket"] in em)
class BibSortDataCacher(DataCacher):
"""
Cache holding all structures created by bibsort
( _data, data_dict).
"""
def __init__(self, method_name):
self.method_name = method_name
self.method_id = 0
res = run_sql("""SELECT id from bsrMETHOD where name = %s""", (self.method_name,))
if res and res[0]:
self.method_id = res[0][0]
else:
self.method_id = 0
def cache_filler():
method_id = self.method_id
alldicts = {}
if self.method_id == 0:
return {}
try:
res_data = run_sql("""SELECT data_dict_ordered from bsrMETHODDATA \
where id_bsrMETHOD = %s""", (method_id,))
res_buckets = run_sql("""SELECT bucket_no, bucket_data from bsrMETHODDATABUCKET\
where id_bsrMETHOD = %s""", (method_id,))
except Exception:
# database problems, return empty cache
return {}
try:
data_dict_ordered = deserialize_via_marshal(res_data[0][0])
except IndexError:
data_dict_ordered = {}
alldicts['data_dict_ordered'] = data_dict_ordered # recid: weight
if not res_buckets:
alldicts['bucket_data'] = {}
return alldicts
for row in res_buckets:
bucket_no = row[0]
try:
bucket_data = intbitset(row[1])
except IndexError:
bucket_data = intbitset([])
alldicts.setdefault('bucket_data', {})[bucket_no] = bucket_data
return alldicts
def timestamp_verifier():
method_id = self.method_id
res = run_sql("""SELECT last_updated from bsrMETHODDATA where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_methoddata = str(res[0][0])
except IndexError:
update_time_methoddata = '1970-01-01 00:00:00'
res = run_sql("""SELECT max(last_updated) from bsrMETHODDATABUCKET where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_buckets = str(res[0][0])
except IndexError:
update_time_buckets = '1970-01-01 00:00:00'
return max(update_time_methoddata, update_time_buckets)
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def get_sorting_methods():
res = run_sql("""SELECT m.name, m.definition
FROM bsrMETHOD m, bsrMETHODDATA md
WHERE m.id = md.id_bsrMETHOD""")
return dict(res)
SORTING_METHODS = get_sorting_methods()
CACHE_SORTED_DATA = {}
for sorting_method in SORTING_METHODS:
try:
CACHE_SORTED_DATA[sorting_method].is_ok_p
except KeyError:
CACHE_SORTED_DATA[sorting_method] = BibSortDataCacher(sorting_method)
def get_tags_from_sort_fields(sort_fields):
"""Given a list of sort_fields, return the tags associated with it and
also the name of the field that has no tags associated, to be able to
display a message to the user."""
tags = []
if not sort_fields:
return [], ''
for sort_field in sort_fields:
if sort_field and (len(sort_field) > 1 and str(sort_field[0:2]).isdigit()):
# sort_field starts by two digits, so this is probably a MARC tag already
tags.append(sort_field)
else:
# let us check the 'field' table
field_tags = get_field_tags(sort_field)
if field_tags:
tags.extend(field_tags)
else:
return [], sort_field
return tags, ''
def rank_records(req, rank_method_code, rank_limit_relevance, hitset_global, pattern=None, verbose=0, sort_order='d', of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, field='', sorting_methods=SORTING_METHODS):
"""Initial entry point for ranking records, acts like a dispatcher.
(i) rank_method_code is in bsrMETHOD, bibsort buckets can be used;
(ii)rank_method_code is not in bsrMETHOD, use bibrank;
"""
# Special case: sorting by citations is fast because we store the
# ranking dictionary in memory, so we do not use bibsort buckets.
if CFG_BIBSORT_ENABLED and sorting_methods and rank_method_code != 'citation':
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('RNK') and \
definition.replace('RNK:', '').strip().lower() == rank_method_code.lower():
solution_recs, solution_scores = \
sort_records_bibsort(req, hitset_global, sort_method,
'', sort_order, verbose, of, ln,
rg, jrec, 'r')
comment = ''
if verbose > 0:
comment = 'find_citations retlist %s' % [[solution_recs[i], solution_scores[i]] for i in range(len(solution_recs))]
return solution_recs, solution_scores, '(', ')', comment
if rank_method_code.lower() == 'citation':
related_to = []
else:
related_to = pattern
solution_recs, solution_scores, prefix, suffix, comment = \
rank_records_bibrank(rank_method_code=rank_method_code,
rank_limit_relevance=rank_limit_relevance,
hitset=hitset_global,
verbose=verbose,
field=field,
related_to=related_to,
rg=rg,
jrec=jrec)
# Solution recs can be None, in case of error or other cases
# which should be all be changed to return an empty list.
if solution_recs and sort_order == 'd':
solution_recs.reverse()
solution_scores.reverse()
return solution_recs, solution_scores, prefix, suffix, comment
def sort_records_latest(recIDs, jrec, rg, sort_order):
if sort_order == 'd':
recIDs.reverse()
return slice_records(recIDs, jrec, rg)
def sort_or_rank_records(req, recIDs, rm, sf, so, sp, p, verbose=0, of='hb',
ln=CFG_SITE_LANG, rg=None, jrec=None, field='',
sorting_methods=SORTING_METHODS):
"""Sort or rank records.
Entry point for deciding to either sort or rank records."""
if rm:
ranking_result = rank_records(req, rm, 0, recIDs, p, verbose, so,
of, ln, rg, jrec, field,
sorting_methods=sorting_methods)
if ranking_result[0]:
return ranking_result[0] # ranked recids
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS):
return sort_records(req, recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
return recIDs.tolist()
def sort_records(req, recIDs, sort_field='', sort_order='a', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, sorting_methods=SORTING_METHODS):
"""Initial entry point for sorting records, acts like a dispatcher.
(i) sort_field is in the bsrMETHOD, and thus, the BibSort has sorted the data for this field, so we can use the cache;
(ii)sort_field is not in bsrMETHOD, and thus, the cache does not contain any information regarding this sorting method"""
_ = gettext_set_language(ln)
# bibsort does not handle sort_pattern for now, use bibxxx
if sort_pattern:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order,
sort_pattern, verbose, of, ln, rg, jrec)
# ignore the use of buckets, use old fashion sorting
use_sorting_buckets = CFG_BIBSORT_ENABLED and sorting_methods
# Default sorting
if not sort_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, CFG_BIBSORT_DEFAULT_FIELD, sort_field, CFG_BIBSORT_DEFAULT_FIELD_ORDER, verbose, of, ln, rg, jrec)
else:
return sort_records_latest(recIDs, jrec, rg, sort_order)
sort_fields = sort_field.split(",")
if len(sort_fields) == 1:
# we have only one sorting_field, check if it is treated by BibSort
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if use_sorting_buckets and \
((definition.startswith('FIELD') and
definition.replace('FIELD:', '').strip().lower() == sort_fields[0].lower()) or
sort_method == sort_fields[0]):
#use BibSort
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#deduce sorting MARC tag out of the 'sort_field' argument:
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, CFG_BIBSORT_DEFAULT_FIELD, sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
if of.startswith('h'):
write_warning(_("Sorry, %(x_option)s does not seem to be a valid sort option. The records will not be sorted.", x_option=cgi.escape(error_field)), "Error", req=req)
return slice_records(recIDs, jrec, rg)
elif tags:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('MARC') \
and definition.replace('MARC:', '').strip().split(',') == tags \
and use_sorting_buckets:
#this list of tags have a designated method in BibSort, so use it
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#we do not have this sort_field in BibSort tables -> do the old fashion sorting
return sort_records_bibxxx(req, recIDs, tags, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
else:
return slice_records(recIDs, jrec, rg)
def sort_records_bibsort(req, recIDs, sort_method, sort_field='', sort_order='d', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=1, sort_or_rank='s', sorting_methods=SORTING_METHODS):
"""This function orders the recIDs list, based on a sorting method(sort_field) using the BibSortDataCacher for speed"""
_ = gettext_set_language(ln)
if not jrec:
jrec = 1
#sanity check
if sort_method not in sorting_methods:
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting (using BibSort cache) by method %s (definition %s)."
% (cgi.escape(repr(sort_method)), cgi.escape(repr(sorting_methods[sort_method]))), req=req)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
solution = intbitset()
input_recids = intbitset(recIDs)
CACHE_SORTED_DATA[sort_method].recreate_cache_if_needed()
sort_cache = CACHE_SORTED_DATA[sort_method].cache
bucket_numbers = sort_cache['bucket_data'].keys()
#check if all buckets have been constructed
if len(bucket_numbers) != CFG_BIBSORT_BUCKETS:
if verbose > 3 and of.startswith('h'):
write_warning("Not all buckets have been constructed.. switching to old fashion sorting.", req=req)
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field,
sort_order, '', verbose, of, ln, rg,
jrec)
if sort_order == 'd':
bucket_numbers.reverse()
for bucket_no in bucket_numbers:
solution.union_update(
input_recids & sort_cache['bucket_data'][bucket_no]
)
if len(solution) >= irec_max:
break
dict_solution = {}
missing_records = intbitset()
for recid in solution:
try:
dict_solution[recid] = sort_cache['data_dict_ordered'][recid]
except KeyError:
# recid is in buckets, but not in the bsrMETHODDATA,
# maybe because the value has been deleted, but the change has not
# yet been propagated to the buckets
missing_records.add(recid)
# check if there are recids that are not in any bucket -> to be added at
# the end/top, ordered by insertion date
if len(solution) < irec_max:
#some records have not been yet inserted in the bibsort structures
#or, some records have no value for the sort_method
missing_records += input_recids - solution
reverse = sort_order == 'd'
if sort_method.strip().lower() == CFG_BIBSORT_DEFAULT_FIELD and reverse:
# If we want to sort the records on their insertion date, add the
# missing records at the top.
solution = sorted(missing_records, reverse=True) + \
sorted(dict_solution, key=dict_solution.__getitem__, reverse=True)
else:
solution = sorted(dict_solution, key=dict_solution.__getitem__,
reverse=reverse) + sorted(missing_records)
# Only keep records, we are going to display
solution = slice_records(solution, jrec, rg)
if sort_or_rank == 'r':
# We need the recids, with their ranking score
return solution, [dict_solution.get(record, 0) for record in solution]
else:
return solution
def slice_records(recIDs, jrec, rg):
if not jrec:
jrec = 1
if rg:
recIDs = recIDs[jrec-1:jrec-1+rg]
else:
recIDs = recIDs[jrec-1:]
return recIDs
def sort_records_bibxxx(req, recIDs, tags, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""OLD FASHION SORTING WITH NO CACHE, for sort fields that are not run in BibSort
Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'.
If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by
'sort pattern', for example "sort by report number that starts by CERN-PS".
Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly."""
_ = gettext_set_language(ln)
## check arguments:
if not sort_field:
return slice_records(recIDs, jrec, rg)
if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT:
if of.startswith('h'):
write_warning(_("Sorry, sorting is allowed on sets of up to %(x_name)d records only. Using default sort order.", x_name=CFG_WEBSEARCH_NB_RECORDS_TO_SORT), "Warning", req=req)
return slice_records(recIDs, jrec, rg)
recIDs_dict = {}
recIDs_out = []
if not tags:
# tags have not been camputed yet
sort_fields = sort_field.split(',')
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if of.startswith('h'):
write_warning(_("Sorry, %(x_name)s does not seem to be a valid sort option. The records will not be sorted.", x_name=cgi.escape(error_field)), "Error", req=req)
return slice_records(recIDs, jrec, rg)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting by tags %s." % cgi.escape(repr(tags)), req=req)
if sort_pattern:
write_warning("Sorting preferentially by %s." % cgi.escape(sort_pattern), req=req)
## check if we have sorting tag defined:
if tags:
# fetch the necessary field values:
for recID in recIDs:
val = "" # will hold value for recID according to which sort
vals = [] # will hold all values found in sorting tag for recID
for tag in tags:
if CFG_CERN_SITE and tag == '773__c':
# CERN hack: journal sorting
# 773__c contains page numbers, e.g. 3-13, and we want to sort by 3, and numerically:
vals.extend(["%050s" % x.split("-", 1)[0] for x in get_fieldvalues(recID, tag)])
else:
vals.extend(get_fieldvalues(recID, tag))
if sort_pattern:
# try to pick that tag value that corresponds to sort pattern
bingo = 0
for v in vals:
if v.lower().startswith(sort_pattern.lower()): # bingo!
bingo = 1
val = v
break
if not bingo: # sort_pattern not present, so add other vals after spaces
val = sort_pattern + " " + ''.join(vals)
else:
# no sort pattern defined, so join them all together
val = ''.join(vals)
val = strip_accents(val.lower()) # sort values regardless of accents and case
if val in recIDs_dict:
recIDs_dict[val].append(recID)
else:
recIDs_dict[val] = [recID]
# create output array:
for k in sorted(recIDs_dict.keys()):
recIDs_out.extend(recIDs_dict[k])
# ascending or descending?
if sort_order == 'd':
recIDs_out.reverse()
recIDs = recIDs_out
# return only up to the maximum that we need
return slice_records(recIDs, jrec, rg)
def get_interval_for_records_to_sort(nb_found, jrec=None, rg=None):
"""calculates in which interval should the sorted records be
a value of 'rg=-9999' means to print all records: to be used with care."""
if not jrec:
jrec = 1
if not rg:
#return all
return jrec-1, nb_found
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will sort records from irec_min to irec_max excluded
irec_min = jrec - 1
irec_max = irec_min + rg
if irec_min < 0:
irec_min = 0
if irec_max > nb_found:
irec_max = nb_found
return irec_min, irec_max
def print_records(req, recIDs, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, format='hb', ot='', ln=CFG_SITE_LANG,
relevances=[], relevances_prologue="(", relevances_epilogue="%%)",
decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True,
print_records_epilogue_p=True, verbose=0, tab='', sf='', so='d', sp='',
rm='', em='', nb_found=-1):
"""
Prints list of records 'recIDs' formatted according to 'format' in
groups of 'rg' starting from 'jrec'.
Assumes that the input list 'recIDs' is sorted in reverse order,
so it counts records from tail to head.
A value of 'rg=-9999' means to print all records: to be used with care.
Print also list of RELEVANCES for each record (if defined), in
between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE.
Print prologue and/or epilogue specific to 'format' if
'print_records_prologue_p' and/or print_records_epilogue_p' are
True.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if em != "" and EM_REPOSITORY["body"] not in em:
return
# load the right message language
_ = gettext_set_language(ln)
# sanity checking:
if req is None:
return
# get user_info (for formatting based on user)
if isinstance(req, cStringIO.OutputType):
user_info = {}
else:
user_info = collect_user_info(req)
if nb_found == -1:
nb_found = len(recIDs)
if nb_found:
if not rg or rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will print records from irec_max to irec_min excluded:
irec_max = nb_found - jrec
irec_min = nb_found - jrec - rg
if irec_min < 0:
irec_min = -1
if irec_max >= nb_found:
irec_max = nb_found - 1
#req.write("%s:%d-%d" % (recIDs, irec_min, irec_max))
if len(recIDs) > rg and rg != -9999:
recIDs = slice_records(recIDs, jrec, rg)
if format.startswith('x'):
# print header if needed
if print_records_prologue_p:
print_records_prologue(req, format)
if ot:
# asked to print some filtered fields only, so call print_record() on the fly:
for recid in recIDs:
x = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(x)
if x:
req.write('\n')
else:
format_records(recIDs,
format,
ln=ln,
search_pattern=search_pattern,
record_separator="\n",
user_info=user_info,
req=req)
# print footer if needed
if print_records_epilogue_p:
print_records_epilogue(req, format)
elif format.startswith('t') or str(format[0:3]).isdigit():
# we are doing plain text output:
for recid in recIDs:
x = print_record(recid, format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
elif format.startswith('recjson'):
# we are doing recjson output:
req.write('[')
for idx, recid in enumerate(recIDs):
if idx > 0:
req.write(',')
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
req.write(']')
elif format == 'excel':
create_excel(recIDs=recIDs, req=req, ot=ot, user_info=user_info)
else:
# we are doing HTML output:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
# portfolio and on-the-fly formats:
for recid in recIDs:
req.write(print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm))
elif format.startswith("hb"):
# HTML brief format:
display_add_to_basket = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_add_to_basket = False
else:
if not user_info['precached_usebaskets']:
display_add_to_basket = False
if em != "" and EM_REPOSITORY["basket"] not in em:
display_add_to_basket = False
req.write(websearch_templates.tmpl_record_format_htmlbrief_header(ln=ln))
for irec, recid in enumerate(recIDs):
row_number = jrec+irec
if relevances and relevances[irec]:
relevance = relevances[irec]
else:
relevance = ''
record = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(websearch_templates.tmpl_record_format_htmlbrief_body(
ln=ln,
recid=recid,
row_number=row_number,
relevance=relevance,
record=record,
relevances_prologue=relevances_prologue,
relevances_epilogue=relevances_epilogue,
display_add_to_basket=display_add_to_basket
))
req.write(websearch_templates.tmpl_record_format_htmlbrief_footer(
ln=ln,
display_add_to_basket=display_add_to_basket))
elif format.startswith("hd"):
# HTML detailed format:
referer = user_info.get('referer', '')
for recid in recIDs:
if record_exists(recid) == -1:
write_warning(_("The record has been deleted."), req=req)
merged_recid = get_merged_recid(recid)
if merged_recid:
write_warning(_("The record %(x_rec)d replaces it.", x_rec=merged_recid), req=req)
continue
unordered_tabs = get_detailed_page_tabs(get_colID(guess_collection_of_a_record(recid, referer, False)),
recid, ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in iteritems(unordered_tabs)]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
recid_to_display = recid # Record ID used to build the URL.
if CFG_WEBSEARCH_USE_ALEPH_SYSNOS:
try:
recid_to_display = get_fieldvalues(recid,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG)[0]
except IndexError:
# No external sysno is available, keep using
# internal recid.
pass
tabs = [(unordered_tabs[tab_id]['label'],
'%s/%s/%s/%s%s' % (CFG_BASE_URL, CFG_SITE_RECORD, recid_to_display, tab_id, link_ln),
tab_id == tab,
unordered_tabs[tab_id]['enabled'])
for (tab_id, dummy_order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] is True]
tabs_counts = get_detailed_page_tabs_counts(recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
# load content
if tab == 'usage':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
r = calculate_reading_similarity_list(recid, "downloads")
downloadsimilarity = None
downloadhistory = None
#if r:
# downloadsimilarity = r
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS:
downloadhistory = create_download_history_graph_and_box(recid, ln)
r = calculate_reading_similarity_list(recid, "pageviews")
viewsimilarity = None
if r:
viewsimilarity = r
content = websearch_templates.tmpl_detailed_record_statistics(recid,
ln,
downloadsimilarity=downloadsimilarity,
downloadhistory=downloadhistory,
viewsimilarity=viewsimilarity)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'citations':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(websearch_templates.tmpl_detailed_record_citations_prologue(recid, ln))
# Citing
citinglist = calculate_cited_by_list(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citing_list(recid,
ln,
citinglist,
sf=sf,
so=so,
sp=sp,
rm=rm))
# Self-cited
selfcited = get_self_cited_by(recid)
selfcited = rank_by_citations(get_self_cited_by(recid), verbose=verbose)
selfcited = reversed(selfcited[0])
selfcited = [recid for recid, dummy in selfcited]
req.write(websearch_templates.tmpl_detailed_record_citations_self_cited(recid,
ln, selfcited=selfcited, citinglist=citinglist))
# Co-cited
s = calculate_co_cited_with_list(recid)
cociting = None
if s:
cociting = s
req.write(websearch_templates.tmpl_detailed_record_citations_co_citing(recid,
ln,
cociting=cociting))
# Citation history, if needed
citationhistory = None
if citinglist:
citationhistory = create_citation_history_graph_and_box(recid, ln)
#debug
if verbose > 3:
write_warning("Citation graph debug: " +
str(len(citationhistory)), req=req)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_history(ln, citationhistory))
# Citation log
entries = get_citers_log(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_log(ln, entries))
req.write(websearch_templates.tmpl_detailed_record_citations_epilogue(recid, ln))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'references':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(format_record(recid, 'HDREF', ln=ln, user_info=user_info, verbose=verbose, force_2nd_pass=True))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'keywords':
from invenio.legacy.bibclassify.webinterface import main_page
main_page(req, recid, tabs, ln,
webstyle_templates)
elif tab == 'plots':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recid,
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'hepdata':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
include_jquery=True,
include_mathjax=True))
from invenio.utils import hepdata as hepdatautils
from invenio.utils.hepdata import display as hepdatadisplayutils
data = hepdatautils.retrieve_data_for_record(recid)
if data:
content = websearch_templates.tmpl_record_hepdata(data, recid, True)
else:
content = websearch_templates.tmpl_record_no_hepdata()
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
else:
# Metadata tab
req.write(webstyle_templates.detailed_record_container_top(
recid,
tabs,
ln,
show_short_rec_p=False,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
creationdate = None
modificationdate = None
if record_exists(recid) == 1:
creationdate = get_creation_date(recid)
modificationdate = get_modification_date(recid)
content = print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm)
content = websearch_templates.tmpl_detailed_record_metadata(
recID=recid,
ln=ln,
format=format,
creationdate=creationdate,
modificationdate=modificationdate,
content=content)
# display of the next-hit/previous-hit/back-to-search links
# on the detailed record pages
content += websearch_templates.tmpl_display_back_to_search(req,
recid,
ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln,
creationdate=creationdate,
modificationdate=modificationdate,
show_short_rec_p=False))
if len(tabs) > 0:
# Add the mini box at bottom of the page
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
from invenio.modules.comments.api import get_mini_reviews
reviews = get_mini_reviews(recid=recid, ln=ln)
else:
reviews = ''
actions = format_record(recid, 'HDACT', ln=ln, user_info=user_info, verbose=verbose)
files = format_record(recid, 'HDFILE', ln=ln, user_info=user_info, verbose=verbose)
req.write(webstyle_templates.detailed_record_mini_panel(recid,
ln,
format,
files=files,
reviews=reviews,
actions=actions))
else:
# Other formats
for recid in recIDs:
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
else:
write_warning(_("Use different search terms."), req=req)
def print_records_prologue(req, format, cc=None):
"""
Print the appropriate prologue for list of records in the given
format.
"""
prologue = "" # no prologue needed for HTML or Text formats
if format.startswith('xm'):
prologue = websearch_templates.tmpl_xml_marc_prologue()
elif format.startswith('xn'):
prologue = websearch_templates.tmpl_xml_nlm_prologue()
elif format.startswith('xw'):
prologue = websearch_templates.tmpl_xml_refworks_prologue()
elif format.startswith('xr'):
prologue = websearch_templates.tmpl_xml_rss_prologue(cc=cc)
elif format.startswith('xe8x'):
prologue = websearch_templates.tmpl_xml_endnote_8x_prologue()
elif format.startswith('xe'):
prologue = websearch_templates.tmpl_xml_endnote_prologue()
elif format.startswith('xo'):
prologue = websearch_templates.tmpl_xml_mods_prologue()
elif format.startswith('xp'):
prologue = websearch_templates.tmpl_xml_podcast_prologue(cc=cc)
elif format.startswith('x'):
prologue = websearch_templates.tmpl_xml_default_prologue()
req.write(prologue)
def print_records_epilogue(req, format):
"""
Print the appropriate epilogue for list of records in the given
format.
"""
epilogue = "" # no epilogue needed for HTML or Text formats
if format.startswith('xm'):
epilogue = websearch_templates.tmpl_xml_marc_epilogue()
elif format.startswith('xn'):
epilogue = websearch_templates.tmpl_xml_nlm_epilogue()
elif format.startswith('xw'):
epilogue = websearch_templates.tmpl_xml_refworks_epilogue()
elif format.startswith('xr'):
epilogue = websearch_templates.tmpl_xml_rss_epilogue()
elif format.startswith('xe8x'):
epilogue = websearch_templates.tmpl_xml_endnote_8x_epilogue()
elif format.startswith('xe'):
epilogue = websearch_templates.tmpl_xml_endnote_epilogue()
elif format.startswith('xo'):
epilogue = websearch_templates.tmpl_xml_mods_epilogue()
elif format.startswith('xp'):
epilogue = websearch_templates.tmpl_xml_podcast_epilogue()
elif format.startswith('x'):
epilogue = websearch_templates.tmpl_xml_default_epilogue()
req.write(epilogue)
def get_record(recid):
"""Directly the record object corresponding to the recid."""
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
value = run_sql("SELECT value FROM bibfmt WHERE id_bibrec=%s AND FORMAT='recstruct'", (recid, ))
if value:
try:
val = value[0][0]
except IndexError:
### In case it does not exist, let's build it!
pass
else:
return deserialize_via_marshal(val)
return create_record(print_record(recid, 'xm'))[0]
def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.decompress,
search_pattern=None, user_info=None, verbose=0, sf='', so='d',
sp='', rm='', brief_links=True):
"""
Prints record 'recID' formatted according to 'format'.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if format == 'recstruct':
return get_record(recID)
#check from user information if the user has the right to see hidden fields/tags in the
#records as well
can_see_hidden = False
if user_info:
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if format == 'recjson':
import json
from invenio.modules.records.api import get_record as get_recjson
ot = ot if ot and len(ot) else None
return json.dumps(get_recjson(recID).dumps(
keywords=ot, filter_hidden=not can_see_hidden))
_ = gettext_set_language(ln)
# The 'attribute this paper' link is shown only if the session states it should and
# the record is included in the collections to which bibauthorid is limited.
if user_info:
display_claim_this_paper = (user_info.get("precached_viewclaimlink", False) and
recID in intbitset.union(*[get_collection_reclist(x)
for x in BIBAUTHORID_LIMIT_TO_COLLECTIONS]))
else:
display_claim_this_paper = False
can_edit_record = False
if check_user_can_edit_record(user_info, recID):
can_edit_record = True
out = ""
# sanity check:
record_exist_p = record_exists(recID)
if record_exist_p == 0: # doesn't exist
return out
# We must still check some special formats, but these
# should disappear when BibFormat improves.
if not (format.lower().startswith('t')
or format.lower().startswith('hm')
or str(format[0:3]).isdigit()
or ot):
# Unspecified format is hd
if format == '':
format = 'hd'
if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html':
# HTML output displays a default value for deleted records.
# Other format have to deal with it.
out += _("The record has been deleted.")
# was record deleted-but-merged ?
merged_recid = get_merged_recid(recID)
if merged_recid:
out += ' ' + _("The record %(x_rec)d replaces it.", x_rec=merged_recid)
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if brief_links and format.lower().startswith('hb') and \
format.lower() != 'hb_p':
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
return out
if format == "marcxml" or format == "oai_dc":
out += " <record>\n"
out += " <header>\n"
for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD):
out += " <identifier>%s</identifier>\n" % oai_id
out += " <datestamp>%s</datestamp>\n" % get_modification_date(recID)
out += " </header>\n"
out += " <metadata>\n"
if format.startswith("xm") or format == "marcxml":
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res and record_exist_p == 1 and not ot:
# record 'recID' is formatted in 'format', and we are not
# asking for field-filtered output; so print it:
out += "%s" % decompress(res[0][0])
elif ot:
# field-filtered output was asked for; print only some fields
record = get_record(recID)
if not can_see_hidden:
for tag in cfg['CFG_BIBFORMAT_HIDDEN_TAGS']:
del record[tag]
ot = list(set(ot) - set(cfg['CFG_BIBFORMAT_HIDDEN_TAGS']))
out += record_xml_output(record, ot)
else:
# record 'recID' is not formatted in 'format' or we ask
# for field-filtered output -- they are not in "bibfmt"
# table; so fetch all the data from "bibXXx" tables:
if format == "marcxml":
out += """ <record xmlns="http://www.loc.gov/MARC21/slim">\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
elif format.startswith("xm"):
out += """ <record>\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
if record_exist_p == -1:
# deleted record, so display only OAI ID and 980:
oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD)
if oai_ids:
out += "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">%s</subfield></datafield>\n" % \
(CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0])
out += "<datafield tag=\"980\" ind1=\"\" ind2=\"\"><subfield code=\"c\">DELETED</subfield></datafield>\n"
else:
# controlfields
query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\
"WHERE bb.id_bibrec=%s AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\
"ORDER BY bb.field_number, b.tag ASC"
res = run_sql(query, (recID, ))
for row in res:
field, value = row[0], row[1]
value = encode_for_xml(value)
out += """ <controlfield tag="%s">%s</controlfield>\n""" % \
(encode_for_xml(field[0:3]), value)
# datafields
i = 1 # Do not process bib00x and bibrec_bib00x, as
# they are controlfields. So start at bib01x and
# bibrec_bib00x (and set i = 0 at the end of
# first loop)
for digit1 in range(0, 10):
for digit2 in range(i, 10):
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(digit1)+str(digit2)+'%'))
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
if ind1 == "_" or ind1 == "":
ind1 = " "
if ind2 == "_" or ind2 == "":
ind2 = " "
# print field tag, unless hidden
printme = True
if not can_see_hidden:
for htag in cfg['CFG_BIBFORMAT_HIDDEN_TAGS']:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if field_number_old != -999:
out += """ </datafield>\n"""
out += """ <datafield tag="%s" ind1="%s" ind2="%s">\n""" % \
(encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2))
field_number_old = field_number
field_old = field
# print subfield value
value = encode_for_xml(value)
out += """ <subfield code="%s">%s</subfield>\n""" % \
(encode_for_xml(field[-1:]), value)
# all fields/subfields printed in this run, so close the tag:
if field_number_old != -999:
out += """ </datafield>\n"""
i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x
# we are at the end of printing the record:
out += " </record>\n"
elif format == "xd" or format == "oai_dc":
# XML Dublin Core format, possibly OAI -- select only some bibXXx fields:
out += """ <dc xmlns="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://purl.org/dc/elements/1.1/
http://www.openarchives.org/OAI/1.1/dc.xsd">\n"""
if record_exist_p == -1:
out += ""
else:
for f in get_fieldvalues(recID, "041__a"):
out += " <language>%s</language>\n" % f
for f in get_fieldvalues(recID, "100__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "700__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "245__a"):
out += " <title>%s</title>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "65017a"):
out += " <subject>%s</subject>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "8564_u"):
if f.split('.') == 'png':
continue
out += " <identifier>%s</identifier>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "520__a"):
out += " <description>%s</description>\n" % encode_for_xml(f)
out += " <date>%s</date>\n" % get_creation_date(recID)
out += " </dc>\n"
elif len(format) == 6 and str(format[0:3]).isdigit():
# user has asked to print some fields only
if format == "001":
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, recID, format)
else:
vals = get_fieldvalues(recID, format)
for val in vals:
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, val, format)
elif format.startswith('t'):
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)
else:
out += get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)
elif format == "hm":
if record_exist_p == -1:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)) + "</pre>"
else:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)) + "</pre>"
elif format.startswith("h") and ot:
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden) + "</pre>"
else:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ot, can_see_hidden) + "</pre>"
elif format == "hd":
# HTML detailed format
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_detailed(
ln = ln,
recID = recID,
)
elif format.startswith("hb_") or format.startswith("hd_"):
# underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hx"):
# BibTeX format, called on the fly:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hs"):
# for citation/download similarity navigation links:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += '<a href="%s">' % websearch_templates.build_search_url(recid=recID, ln=ln)
# firstly, title:
titles = get_fieldvalues(recID, "245__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# just print record ID:
out += "<strong>%s %d</strong>" % (get_field_i18nname("record ID", ln, False), recID)
out += "</a>"
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += " - %s" % authors[0]
if len(authors) > 1:
out += " <em>et al</em>"
# thirdly publication info:
publinfos = get_fieldvalues(recID, "773__s")
if not publinfos:
publinfos = get_fieldvalues(recID, "909C4s")
if not publinfos:
publinfos = get_fieldvalues(recID, "037__a")
if not publinfos:
publinfos = get_fieldvalues(recID, "088__a")
if publinfos:
out += " - %s" % publinfos[0]
else:
# fourthly publication year (if not publication info):
years = get_fieldvalues(recID, "773__y")
if not years:
years = get_fieldvalues(recID, "909C4y")
if not years:
years = get_fieldvalues(recID, "260__c")
if years:
out += " (%s)" % years[0]
else:
# HTML brief format by default
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format))
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format:
if CFG_WEBSEARCH_CALL_BIBFORMAT:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
pass # do nothing for portfolio and on-the-fly formats
else:
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
# print record closing tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " </metadata>\n"
out += " </record>\n"
return out
def call_bibformat(recID, format="HD", ln=CFG_SITE_LANG, search_pattern=None, user_info=None, verbose=0):
"""
Calls BibFormat and returns formatted record.
BibFormat will decide by itself if old or new BibFormat must be used.
"""
from invenio.modules.formatter.utils import get_pdf_snippets
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, str(search_pattern), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
out = format_record(recID,
of=format,
ln=ln,
search_pattern=keywords,
user_info=user_info,
verbose=verbose)
if CFG_WEBSEARCH_FULLTEXT_SNIPPETS and user_info and \
'fulltext' in user_info['uri'].lower():
# check snippets only if URL contains fulltext
# FIXME: make it work for CLI too, via new function arg
if keywords:
snippets = ''
try:
snippets = get_pdf_snippets(recID, keywords, user_info)
except:
register_exception()
if snippets:
out += snippets
return out
def log_query(hostname, query_args, uid=-1):
"""
Log query into the query and user_query tables.
Return id_query or None in case of problems.
"""
id_query = None
if uid >= 0:
# log the query only if uid is reasonable
res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1)
try:
id_query = res[0][0]
except IndexError:
id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,))
if id_query:
run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)",
(uid, id_query, hostname,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return id_query
def log_query_info(action, p, f, colls, nb_records_found_total=-1):
"""Write some info to the log file for later analysis."""
try:
log = open(CFG_LOGDIR + "/search.log", "a")
log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime()))
log.write(action+"#")
log.write(p+"#")
log.write(f+"#")
for coll in colls[:-1]:
log.write("%s," % coll)
log.write("%s#" % colls[-1])
log.write("%d" % nb_records_found_total)
log.write("\n")
log.close()
except:
pass
return
def clean_dictionary(dictionary, list_of_items):
"""Returns a copy of the dictionary with all the items
in the list_of_items as empty strings"""
out_dictionary = dictionary.copy()
out_dictionary.update((item, '') for item in list_of_items)
return out_dictionary
### CALLABLES
def perform_request_search(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=None, sf="", so="a", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0,
recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG, ec=None, tab="",
wl=0, em=""):
"""Perform search or browse request, without checking for
authentication. Return list of recIDs found, if of=id.
Otherwise create web page.
The arguments are as follows:
req - mod_python Request class instance.
cc - current collection (e.g. "ATLAS"). The collection the
user started to search/browse from.
c - collection list (e.g. ["Theses", "Books"]). The
collections user may have selected/deselected when
starting to search from 'cc'.
p - pattern to search for (e.g. "ellis and muon or kaon").
f - field to search within (e.g. "author").
rg - records in groups of (e.g. "10"). Defines how many hits
per collection in the search results page are
displayed. (Note that `rg' is ignored in case of `of=id'.)
sf - sort field (e.g. "title").
so - sort order ("a"=ascending, "d"=descending).
sp - sort pattern (e.g. "CERN-") -- in case there are more
values in a sort field, this argument tells which one
to prefer
rm - ranking method (e.g. "jif"). Defines whether results
should be ranked by some known ranking method.
of - output format (e.g. "hb"). Usually starting "h" means
HTML output (and "hb" for HTML brief, "hd" for HTML
detailed), "x" means XML output, "t" means plain text
output, "id" means no output at all but to return list
of recIDs found, "intbitset" means to return an intbitset
representation of the recIDs found (no sorting or ranking
will be performed). (Suitable for high-level API.)
ot - output only these MARC tags (e.g. "100,700,909C0b").
Useful if only some fields are to be shown in the
output, e.g. for library to control some fields.
em - output only part of the page.
aas - advanced search ("0" means no, "1" means yes). Whether
search was called from within the advanced search
interface.
p1 - first pattern to search for in the advanced search
interface. Much like 'p'.
f1 - first field to search within in the advanced search
interface. Much like 'f'.
m1 - first matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op1 - first operator, to join the first and the second unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p2 - second pattern to search for in the advanced search
interface. Much like 'p'.
f2 - second field to search within in the advanced search
interface. Much like 'f'.
m2 - second matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op2 - second operator, to join the second and the third unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p3 - third pattern to search for in the advanced search
interface. Much like 'p'.
f3 - third field to search within in the advanced search
interface. Much like 'f'.
m3 - third matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
sc - split by collection ("0" no, "1" yes). Governs whether
we want to present the results in a single huge list,
or splitted by collection.
jrec - jump to record (e.g. "234"). Used for navigation
inside the search results. (Note that `jrec' is ignored
in case of `of=id'.)
recid - display record ID (e.g. "20000"). Do not
search/browse but go straight away to the Detailed
record page for the given recID.
recidb - display record ID bis (e.g. "20010"). If greater than
'recid', then display records from recid to recidb.
Useful for example for dumping records from the
database for reformatting.
sysno - display old system SYS number (e.g. ""). If you
migrate to Invenio from another system, and store your
old SYS call numbers, you can use them instead of recid
if you wish so.
id - the same as recid, in case recid is not set. For
backwards compatibility.
idb - the same as recid, in case recidb is not set. For
backwards compatibility.
sysnb - the same as sysno, in case sysno is not set. For
backwards compatibility.
action - action to do. "SEARCH" for searching, "Browse" for
browsing. Default is to search.
d1 - first datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-08-23 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd1' takes precedence over d1y, d1m,
d1d if these are defined.
d1y - first date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d1m - first date's month (e.g. "08"). Useful for search
limits on creation/modification date.
d1d - first date's day (e.g. "23"). Useful for search
limits on creation/modification date.
d2 - second datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-09-02 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd2' takes precedence over d2y, d2m,
d2d if these are defined.
d2y - second date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d2m - second date's month (e.g. "09"). Useful for search
limits on creation/modification date.
d2d - second date's day (e.g. "02"). Useful for search
limits on creation/modification date.
dt - first and second date's type (e.g. "c"). Specifies
whether to search in creation dates ("c") or in
modification dates ("m"). When dt is not set and d1*
and d2* are set, the default is "c".
verbose - verbose level (0=min, 9=max). Useful to print some
internal information on the searching process in case
something goes wrong.
ap - alternative patterns (0=no, 1=yes). In case no exact
match is found, the search engine can try alternative
patterns e.g. to replace non-alphanumeric characters by
a boolean query. ap defines if this is wanted.
ln - language of the search interface (e.g. "en"). Useful
for internationalization.
ec - list of external search engines to search as well
(e.g. "SPIRES HEP").
wl - wildcard limit (ex: 100) the wildcard queries will be
limited at 100 results
"""
kwargs = prs_wash_arguments(req=req, cc=cc, c=c, p=p, f=f, rg=rg, sf=sf, so=so, sp=sp, rm=rm, of=of, ot=ot, aas=aas,
p1=p1, f1=f1, m1=m1, op1=op1, p2=p2, f2=f2, m2=m2, op2=op2, p3=p3, f3=f3, m3=m3, sc=sc, jrec=jrec,
recid=recid, recidb=recidb, sysno=sysno, id=id, idb=idb, sysnb=sysnb, action=action, d1=d1,
d1y=d1y, d1m=d1m, d1d=d1d, d2=d2, d2y=d2y, d2m=d2m, d2d=d2d, dt=dt, verbose=verbose, ap=ap, ln=ln, ec=ec,
tab=tab, wl=wl, em=em)
return prs_perform_search(kwargs=kwargs, **kwargs)
def prs_perform_search(kwargs=None, **dummy):
"""Internal call which does the search, it is calling standard Invenio;
Unless you know what you are doing, don't use this call as an API
"""
# separately because we can call it independently
out = prs_wash_arguments_colls(kwargs=kwargs, **kwargs)
if not out:
return out
return prs_search(kwargs=kwargs, **kwargs)
def prs_wash_arguments_colls(kwargs=None, of=None, req=None, cc=None, c=None, sc=None, verbose=None,
aas=None, ln=None, em="", **dummy):
"""
Check and wash collection list argument before we start searching.
If there are troubles, e.g. a collection is not defined, print
warning to the browser.
@return: True if collection list is OK, and various False values
(empty string, empty list) if there was an error.
"""
# raise an exception when trying to print out html from the cli
if of.startswith("h"):
assert req
# for every search engine request asking for an HTML output, we
# first regenerate cache of collection and field I18N names if
# needed; so that later we won't bother checking timestamps for
# I18N names at all:
if of.startswith("h"):
collection_i18nname_cache.recreate_cache_if_needed()
field_i18nname_cache.recreate_cache_if_needed()
try:
(cc, colls_to_display, colls_to_search, hosted_colls, wash_colls_debug) = wash_colls(cc, c, sc, verbose) # which colls to search and to display?
kwargs['colls_to_display'] = colls_to_display
kwargs['colls_to_search'] = colls_to_search
kwargs['hosted_colls'] = hosted_colls
kwargs['wash_colls_debug'] = wash_colls_debug
except InvenioWebSearchUnknownCollectionError as exc:
colname = exc.colname
if of.startswith("h"):
page_start(req, of, cc, aas, ln, getUid(req),
websearch_templates.tmpl_collection_not_found_page_title(colname, ln))
req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln))
page_end(req, of, ln, em)
return ''
elif of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
page_end(req, of, ln, em)
return ''
else:
page_end(req, of, ln, em)
return ''
return True
def prs_wash_arguments(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="",
sc=0, jrec=0, recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG,
ec=None, tab="", uid=None, wl=0, em="", **dummy):
"""
Sets the (default) values and checks others for the PRS call
"""
# wash output format:
of = wash_output_format(of)
# wash all arguments requiring special care
p = wash_pattern(p)
f = wash_field(f)
p1 = wash_pattern(p1)
f1 = wash_field(f1)
p2 = wash_pattern(p2)
f2 = wash_field(f2)
p3 = wash_pattern(p3)
f3 = wash_field(f3)
(d1y, d1m, d1d, d2y, d2m, d2d) = map(int, (d1y, d1m, d1d, d2y, d2m, d2d))
datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d)
# wash ranking method:
if not is_method_valid(None, rm):
rm = ""
# backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable)
if sysnb != "" and sysno == "":
sysno = sysnb
if id > 0 and recid == -1:
recid = id
if idb > 0 and recidb == -1:
recidb = idb
# TODO deduce passed search limiting criterias (if applicable)
pl, pl_in_url = "", "" # no limits by default
if action != "browse" and req and not isinstance(req, (cStringIO.OutputType, dict)) \
and getattr(req, 'args', None): # we do not want to add options while browsing or while calling via command-line
fieldargs = cgi.parse_qs(req.args)
for fieldcode in get_fieldcodes():
if fieldcode in fieldargs:
for val in fieldargs[fieldcode]:
pl += "+%s:\"%s\" " % (fieldcode, val)
pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val))
# deduce recid from sysno argument (if applicable):
if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record:
recid = get_mysql_recid_from_aleph_sysno(sysno)
if recid is None:
recid = 0 # use recid 0 to indicate that this sysno does not exist
# deduce collection we are in (if applicable):
if recid > 0:
referer = None
if req:
referer = req.headers_in.get('Referer')
cc = guess_collection_of_a_record(recid, referer)
# deduce user id (if applicable):
if uid is None:
try:
uid = getUid(req)
except:
uid = 0
_ = gettext_set_language(ln)
if aas == 2: #add-to-search interface
p = create_add_to_search_pattern(p, p1, f1, m1, op1)
default_addtosearch_args = websearch_templates.restore_search_args_to_default(['p1', 'f1', 'm1', 'op1'])
if req:
req.argd.update(default_addtosearch_args)
req.argd['p'] = p
kwargs = {'req': req, 'cc': cc, 'c': c, 'p': p, 'f': f, 'rg': rg, 'sf': sf,
'so': so, 'sp': sp, 'rm': rm, 'of': of, 'ot': ot, 'aas': aas,
'p1': p1, 'f1': f1, 'm1': m1, 'op1': op1, 'p2': p2, 'f2': f2,
'm2': m2, 'op2': op2, 'p3': p3, 'f3': f3, 'm3': m3, 'sc': sc,
'jrec': jrec, 'recid': recid, 'recidb': recidb, 'sysno': sysno,
'id': id, 'idb': idb, 'sysnb': sysnb, 'action': action, 'd1': d1,
'd1y': d1y, 'd1m': d1m, 'd1d': d1d, 'd2': d2, 'd2y': d2y,
'd2m': d2m, 'd2d': d2d, 'dt': dt, 'verbose': verbose, 'ap': ap,
'ln': ln, 'ec': ec, 'tab': tab, 'wl': wl, 'em': em,
'datetext1': datetext1, 'datetext2': datetext2, 'uid': uid,
'pl': pl, 'pl_in_url': pl_in_url, '_': _,
'selected_external_collections_infos': None,
}
kwargs.update(**dummy)
return kwargs
def prs_search(kwargs=None, recid=0, req=None, cc=None, p=None, p1=None, p2=None, p3=None,
f=None, ec=None, verbose=None, ln=None, selected_external_collections_infos=None,
action=None, rm=None, of=None, em=None,
**dummy):
"""
This function write various bits into the req object as the search
proceeds (so that pieces of a page are rendered even before the
search ended)
"""
## 0 - start output
if recid >= 0: # recid can be 0 if deduced from sysno and if such sysno does not exist
output = prs_detailed_record(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif action == "browse":
## 2 - browse needed
of = 'hb'
output = prs_browse(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif rm and p.startswith("recid:"):
## 3-ter - similarity search (or old-style citation search) needed
output = prs_search_similar_records(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL
## 3-terter - cited by search needed
output = prs_search_cocitedwith(kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3 - common search needed
output = prs_search_common(kwargs=kwargs, **kwargs)
if output is not None:
return output
# External searches
if of.startswith("h"):
if not of in ['hcs', 'hcs2']:
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_detailed_record(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, recid=None, recidb=None,
p=None, verbose=None, tab=None, sf=None, so=None, sp=None, rm=None, ot=None, _=None, em=None,
**dummy):
"""Formats and prints one record"""
## 1 - detailed record display
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, recid, ln)
if req is not None and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, title, description, keywords, recid, tab, em)
# Default format is hb but we are in detailed -> change 'of'
if of == "hb":
of = "hd"
if record_exists(recid):
if recidb <= recid: # sanity check
recidb = recid + 1
if of in ["id", "intbitset"]:
result = [recidx for recidx in range(recid, recidb) if record_exists(recidx)]
if of == "intbitset":
return intbitset(result)
else:
return result
else:
print_records(req, range(recid, recidb), -1, -9999, of, ot, ln,
search_pattern=p, verbose=verbose, tab=tab, sf=sf,
so=so, sp=sp, rm=rm, em=em, nb_found=len(range(recid, recidb)))
if req and of.startswith("h"): # register detailed record page view event
client_ip_address = str(req.remote_ip)
register_page_view_event(recid, uid, client_ip_address)
else: # record does not exist
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
def prs_browse(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
colls_to_search=None, verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Browse"), p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
write_warning(create_exact_author_browse_help_link(p, p1, p2, p3, f, f1, f2, f3,
rm, cc, ln, jrec, rg, aas, action),
req=req)
try:
if aas == 1 or (p1 or p2 or p3):
browse_pattern(req, colls_to_search, p1, f1, rg, ln)
browse_pattern(req, colls_to_search, p2, f2, rg, ln)
browse_pattern(req, colls_to_search, p3, f3, rg, ln)
else:
browse_pattern(req, colls_to_search, p, f, rg, ln)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_search_similar_records(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, em=None,
verbose=None, **dummy):
if req and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recid = p[6:]
if record_exists(recid) != 1:
# record does not exist
if of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
if of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find similar ones to it
t1 = os.times()[4]
(results_similar_recIDs,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
results_similar_comments) = \
rank_records_bibrank(rank_method_code=rm,
rank_limit_relevance=0,
hitset=get_collection_reclist(cc),
related_to=[p],
verbose=verbose,
field=f,
rg=rg,
jrec=jrec)
if results_similar_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cc, len(results_similar_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
write_warning(results_similar_comments, req=req)
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
elif of == "id":
return results_similar_recIDs
elif of == "intbitset":
return intbitset(results_similar_recIDs)
elif of.startswith("x"):
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
else:
# rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning(results_similar_relevances_prologue, req=req)
write_warning(results_similar_relevances_epilogue, req=req)
write_warning(results_similar_comments, req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_cocitedwith(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recID = p[12:]
if record_exists(recID) != 1:
# record does not exist
if of.startswith("h"):
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find co-cited ones:
t1 = os.times()[4]
results_cocited_recIDs = [x[0] for x in calculate_co_cited_with_list(int(recID))]
if results_cocited_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, CFG_SITE_NAME, len(results_cocited_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
elif of == "id":
return results_cocited_recIDs
elif of == "intbitset":
return intbitset(results_cocited_recIDs)
elif of.startswith("x"):
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
else:
# cited rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning("nothing found", req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_hosted_collections(kwargs=None, req=None, of=None, ln=None, _=None, p=None,
p1=None, p2=None, p3=None, hosted_colls=None, f=None,
colls_to_search=None, hosted_colls_actual_or_potential_results_p=None,
verbose=None, **dummy):
hosted_colls_results = hosted_colls_timeouts = hosted_colls_true_results = None
# search into the hosted collections only if the output format is html or xml
if hosted_colls and (of.startswith("h") or of.startswith("x")) and not p.startswith("recid:"):
# hosted_colls_results : the hosted collections' searches that did not timeout
# hosted_colls_timeouts : the hosted collections' searches that timed out and will be searched later on again
(hosted_colls_results, hosted_colls_timeouts) = calculate_hosted_collections_results(req, [p, p1, p2, p3], f, hosted_colls, verbose, ln, CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH)
# successful searches
if hosted_colls_results:
hosted_colls_true_results = []
for result in hosted_colls_results:
# if the number of results is None or 0 (or False) then just do nothing
if result[1] is None or result[1] is False:
# these are the searches the returned no or zero results
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned no results" % result[0][1].name, req=req)
else:
# these are the searches that actually returned results on time
hosted_colls_true_results.append(result)
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned %s results in %s seconds" % (result[0][1].name, result[1], result[2]), req=req)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections results to be printed at this time", req=req)
if hosted_colls_timeouts:
if verbose:
for timeout in hosted_colls_timeouts:
write_warning("Hosted collections (perform_search_request): %s timed out and will be searched again later" % timeout[0][1].name, req=req)
# we need to know for later use if there were any hosted collections to be searched even if they weren't in the end
elif hosted_colls and ((not (of.startswith("h") or of.startswith("x"))) or p.startswith("recid:")):
(hosted_colls_results, hosted_colls_timeouts) = (None, None)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections to be searched", req=req)
## let's define some useful boolean variables:
# True means there are actual or potential hosted collections results to be printed
kwargs['hosted_colls_actual_or_potential_results_p'] = not (not hosted_colls or not ((hosted_colls_results and hosted_colls_true_results) or hosted_colls_timeouts))
# True means there are hosted collections timeouts to take care of later
# (useful for more accurate printing of results later)
kwargs['hosted_colls_potential_results_p'] = not (not hosted_colls or not hosted_colls_timeouts)
# True means we only have hosted collections to deal with
kwargs['only_hosted_colls_actual_or_potential_results_p'] = not colls_to_search and hosted_colls_actual_or_potential_results_p
kwargs['hosted_colls_results'] = hosted_colls_results
kwargs['hosted_colls_timeouts'] = hosted_colls_timeouts
kwargs['hosted_colls_true_results'] = hosted_colls_true_results
def prs_advanced_search(results_in_any_collection, kwargs=None, req=None, of=None,
cc=None, ln=None, _=None, p=None, p1=None, p2=None, p3=None,
f=None, f1=None, m1=None, op1=None, f2=None, m2=None,
op2=None, f3=None, m3=None, ap=None, ec=None,
selected_external_collections_infos=None, verbose=None,
wl=None, em=None, **dummy):
len_results_p1 = 0
len_results_p2 = 0
len_results_p3 = 0
try:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl))
len_results_p1 = len(results_in_any_collection)
if len_results_p1 == 0:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec,
verbose, ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if p2:
results_tmp = search_pattern_parenthesised(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p2 = len(results_tmp)
if op1 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op1 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op1 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op1), "Error", req=req)
if len(results_in_any_collection) == 0:
if of.startswith("h"):
if len_results_p2:
#each individual query returned results, but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
if p3:
results_tmp = search_pattern_parenthesised(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p3 = len(results_tmp)
if op2 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op2 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op2 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op2), "Error", req=req)
if len(results_in_any_collection) == 0 and len_results_p3 and of.startswith("h"):
#each individual query returned results but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
if p2:
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
nearestterms.append((p3, len_results_p3, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p2', 'f2', 'm2'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_simple_search(results_in_any_collection, kwargs=None, req=None, of=None, cc=None, ln=None, p=None, f=None,
p1=None, p2=None, p3=None, ec=None, verbose=None, selected_external_collections_infos=None,
only_hosted_colls_actual_or_potential_results_p=None, query_representation_in_cache=None,
ap=None, hosted_colls_actual_or_potential_results_p=None, wl=None, em=None,
**dummy):
try:
results_in_cache = intbitset().fastload(
search_results_cache.get(query_representation_in_cache))
except:
results_in_cache = None
if results_in_cache is not None:
# query is not in the cache already, so reuse it:
results_in_any_collection.union_update(results_in_cache)
if verbose and of.startswith("h"):
write_warning("Search stage 0: query found in cache, reusing cached results.", req=req)
else:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there are results only in the hosted collections. Also added the if clause to avoid
# searching in case we know we only have actual or potential hosted collections results
if not only_hosted_colls_actual_or_potential_results_p:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln,
display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p,
wl=wl))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_intersect_results_with_collrecs(results_final, results_in_any_collection,
kwargs=None, colls_to_search=None,
req=None, of=None, ln=None,
cc=None, p=None, p1=None, p2=None, p3=None, f=None,
ec=None, verbose=None, selected_external_collections_infos=None,
em=None, **dummy):
display_nearest_terms_box=not kwargs['hosted_colls_actual_or_potential_results_p']
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there results only in the hosted collections. Also added the if clause to avoid
# searching in case we know since the last stage that we have no results in any collection
if len(results_in_any_collection) != 0:
results_final.update(intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, of,
verbose, ln, display_nearest_terms_box=display_nearest_terms_box))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, req=None, verbose=None, of=None, **dummy):
if CFG_WEBSEARCH_SEARCH_CACHE_SIZE > 0:
search_results_cache.set(query_representation_in_cache,
results_in_any_collection.fastdump(),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
search_results_cache.set(query_representation_in_cache + '::cc',
dummy.get('cc', CFG_SITE_NAME),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
if req:
from flask import request
req = request
search_results_cache.set(query_representation_in_cache + '::p',
req.values.get('p', ''),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
if verbose and of.startswith("h"):
write_warning(req, "Search stage 3: storing query results in cache.", req=req)
def prs_apply_search_limits(results_final, kwargs=None, req=None, of=None, cc=None, ln=None, _=None,
p=None, p1=None, p2=None, p3=None, f=None, pl=None, ap=None, dt=None,
ec=None, selected_external_collections_infos=None,
hosted_colls_actual_or_potential_results_p=None,
datetext1=None, datetext2=None, verbose=None, wl=None, em=None,
**dummy):
if datetext1 != "" and results_final != {}:
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying time etc limits, from %s until %s..." % (datetext1, datetext2), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_unit_in_bibrec(datetext1, datetext2, dt),
ap,
aptext= _("No match within your time limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
#if of.startswith("x"):
# # Print empty, but valid XML
# print_records_prologue(req, of)
# print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if pl and results_final != {}:
pl = wash_pattern(pl)
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying search pattern limit %s..." % cgi.escape(pl), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_pattern_parenthesised(req, pl, ap=0, ln=ln, wl=wl),
ap,
aptext=_("No match within your search limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_split_into_collections(kwargs=None, results_final=None, colls_to_search=None, hosted_colls_results=None,
cpu_time=0, results_final_nb_total=None, hosted_colls_actual_or_potential_results_p=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, **dummy):
results_final_nb_total = 0
results_final_nb = {} # will hold number of records found in each collection
# (in simple dict to display overview more easily)
for coll in results_final.keys():
results_final_nb[coll] = len(results_final[coll])
#results_final_nb_total += results_final_nb[coll]
# Now let us calculate results_final_nb_total more precisely,
# in order to get the total number of "distinct" hits across
# searched collections; this is useful because a record might
# have been attributed to more than one primary collection; so
# we have to avoid counting it multiple times. The price to
# pay for this accuracy of results_final_nb_total is somewhat
# increased CPU time.
if results_final.keys() == 1:
# only one collection; no need to union them
results_final_for_all_selected_colls = results_final.values()[0]
results_final_nb_total = results_final_nb.values()[0]
else:
# okay, some work ahead to union hits across collections:
results_final_for_all_selected_colls = intbitset()
for coll in results_final.keys():
results_final_for_all_selected_colls.union_update(results_final[coll])
results_final_nb_total = len(results_final_for_all_selected_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
for result in hosted_colls_true_results:
colls_to_search.append(result[0][1].name)
results_final_nb[result[0][1].name] = result[1]
results_final_nb_total += result[1]
cpu_time += result[2]
if hosted_colls_timeouts:
for timeout in hosted_colls_timeouts:
colls_to_search.append(timeout[1].name)
# use -963 as a special number to identify the collections that timed out
results_final_nb[timeout[1].name] = -963
kwargs['results_final_nb'] = results_final_nb
kwargs['results_final_nb_total'] = results_final_nb_total
kwargs['results_final_for_all_selected_colls'] = results_final_for_all_selected_colls
kwargs['cpu_time'] = cpu_time #rca TODO: check where the cpu_time is used, this line was missing
return (results_final_nb, results_final_nb_total, results_final_for_all_selected_colls)
def prs_summarize_records(kwargs=None, req=None, p=None, f=None, aas=None,
p1=None, p2=None, p3=None, f1=None, f2=None, f3=None, op1=None, op2=None,
ln=None, results_final_for_all_selected_colls=None, of='hcs', **dummy):
# feed the current search to be summarized:
from invenio.legacy.search_engine.summarizer import summarize_records
search_p = p
search_f = f
if not p and (aas == 1 or p1 or p2 or p3):
op_d = {'n': ' and not ', 'a': ' and ', 'o': ' or ', '': ''}
triples = ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, ''])
triples_len = len(triples)
for i in range(triples_len):
fi, pi, oi = triples[i] # e.g.:
if i < triples_len-1 and not triples[i+1][1]: # if p2 empty
triples[i+1][0] = '' # f2 must be too
oi = '' # and o1
if ' ' in pi:
pi = '"'+pi+'"'
if fi:
fi = fi + ':'
search_p += fi + pi + op_d[oi]
search_f = ''
summarize_records(results_final_for_all_selected_colls, of, ln, search_p, search_f, req)
def prs_print_records(kwargs=None, results_final=None, req=None, of=None, cc=None, pl_in_url=None,
ln=None, _=None, p=None, p1=None, p2=None, p3=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, d1y=None, d1m=None,
d1d=None, d2y=None, d2m=None, d2d=None, dt=None, jrec=None, colls_to_search=None,
hosted_colls_actual_or_potential_results_p=None, hosted_colls_results=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, results_final_nb=None,
cpu_time=None, verbose=None, em=None, **dummy):
if len(colls_to_search) > 1:
cpu_time = -1 # we do not want to have search time printed on each collection
print_records_prologue(req, of, cc=cc)
results_final_colls = []
wlqh_results_overlimit = 0
for coll in colls_to_search:
if coll in results_final and len(results_final[coll]):
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
results_final_recIDs = list(results_final[coll])
results_final_nb_found = len(results_final_recIDs)
results_final_relevances = []
results_final_relevances_prologue = ""
results_final_relevances_epilogue = ""
if rm: # do we have to rank?
results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \
rank_records(req, rm, 0, results_final[coll],
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec, kwargs['f'])
if of.startswith("h"):
write_warning(results_final_comments, req=req)
if results_final_recIDs_ranked:
results_final_recIDs = results_final_recIDs_ranked
else:
# rank_records failed and returned some error message to display:
write_warning(results_final_relevances_prologue, req=req)
write_warning(results_final_relevances_epilogue, req=req)
else:
results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
if len(results_final_recIDs) < CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
results_final_colls.append(results_final_recIDs)
else:
wlqh_results_overlimit = 1
print_records(req, results_final_recIDs, jrec, rg, of, ot, ln,
results_final_relevances,
results_final_relevances_prologue,
results_final_relevances_epilogue,
search_pattern=p,
print_records_prologue_p=False,
print_records_epilogue_p=False,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm,
em=em,
nb_found=results_final_nb_found)
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1, em=em))
if req and not isinstance(req, cStringIO.OutputType):
# store the last search results page
session_param_set(req, 'websearch-last-query', req.unparsed_uri)
if wlqh_results_overlimit:
results_final_colls = None
# store list of results if user wants to display hits
# in a single list, or store list of collections of records
# if user displays hits split by collections:
session_param_set(req, 'websearch-last-query-hits', results_final_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
# TODO: add a verbose message here
for result in hosted_colls_true_results:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts:
# TODO: add a verbose message here
# TODO: check if verbose messages still work when dealing with (re)calculations of timeouts
(hosted_colls_timeouts_results, hosted_colls_timeouts_timeouts) = do_calculate_hosted_collections_results(req, ln, None, verbose, None, hosted_colls_timeouts, CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH)
if hosted_colls_timeouts_results:
for result in hosted_colls_timeouts_results:
if result[1] is None or result[1] is False:
## these are the searches the returned no or zero results
## also print a nearest terms box, in case this is the only
## collection being searched and it returns no results?
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, no_records_found=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
else:
# these are the searches that actually returned results on time
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts_timeouts:
for timeout in hosted_colls_timeouts_timeouts:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=timeout[0], ln=ln, of=of, req=req, search_timed_out=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
print_records_epilogue(req, of)
if f == "author" and of.startswith("h"):
req.write(create_similarly_named_authors_link_box(p, ln))
def prs_log_query(kwargs=None, req=None, uid=None, of=None, ln=None, p=None, f=None,
colls_to_search=None, results_final_nb_total=None, em=None, **dummy):
# FIXME move query logging to signal receiver
# log query:
try:
from flask.ext.login import current_user
if req:
from flask import request
req = request
id_query = log_query(req.host,
'&'.join(map(lambda (k,v): k+'='+v, request.values.iteritems(multi=True))),
uid)
#id_query = log_query(req.remote_host, req.args, uid)
#of = request.values.get('of', 'hb')
if of.startswith("h") and id_query and (em == '' or EM_REPOSITORY["alert"] in em):
if not of in ['hcs', 'hcs2']:
# display alert/RSS teaser for non-summary formats:
display_email_alert_part = True
if current_user:
if current_user['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_email_alert_part = False
else:
if not current_user['precached_usealerts']:
display_email_alert_part = False
from flask import flash
flash(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query, \
ln=ln, display_email_alert_part=display_email_alert_part), 'search-results-after')
except:
# do not log query if req is None (used by CLI interface)
pass
log_query_info("ss", p, f, colls_to_search, results_final_nb_total)
def prs_search_common(kwargs=None, req=None, of=None, cc=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, colls_to_search=None, wash_colls_debug=None,
verbose=None, wl=None, em=None, **dummy):
query_representation_in_cache = get_search_results_cache_key(**kwargs)
page_start(req, of, cc, aas, ln, uid, p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
if of.startswith("h") and verbose and wash_colls_debug:
write_warning("wash_colls debugging info : %s" % wash_colls_debug, req=req)
prs_search_hosted_collections(kwargs=kwargs, **kwargs)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
# WebSearch services
if jrec <= 1 and \
(em == "" and True or (EM_REPOSITORY["search_services"] in em)):
user_info = collect_user_info(req)
# display only on first search page, and only if wanted
# when 'em' param set.
for answer_relevance, answer_html in services.get_answers(
req, user_info, of, cc, colls_to_search, p, f, ln):
req.write('<div class="searchservicebox">')
req.write(answer_html)
if verbose > 8:
write_warning("Service relevance: %i" % answer_relevance, req=req)
req.write('</div>')
t1 = os.times()[4]
results_in_any_collection = intbitset()
if aas == 2 and not (p2 or p3):
## 3A add-to-search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
elif aas == 1 or (p1 or p2 or p3):
## 3B - advanced search
output = prs_advanced_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3C - simple search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
if len(results_in_any_collection) == 0 and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return None
# store this search query results into search results cache if needed:
prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, **kwargs)
# search stage 4 and 5: intersection with collection universe and sorting/limiting
try:
output = prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except: # no results to display
return None
t2 = os.times()[4]
cpu_time = t2 - t1
kwargs['cpu_time'] = cpu_time
## search stage 6: display results:
return prs_display_results(kwargs=kwargs, **kwargs)
def prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection,
kwargs=None, req=None, of=None,
**dummy):
# search stage 4: intersection with collection universe:
results_final = {}
output = prs_intersect_results_with_collrecs(results_final, results_in_any_collection, kwargs, **kwargs)
if output is not None:
return output
# another external search if we still don't have something
if results_final == {} and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
kwargs['results_final'] = results_final
raise Exception
# search stage 5: apply search option limits and restrictions:
output = prs_apply_search_limits(results_final, kwargs=kwargs, **kwargs)
kwargs['results_final'] = results_final
if output is not None:
return output
def prs_display_results(kwargs=None, results_final=None, req=None, of=None, sf=None,
so=None, sp=None, verbose=None, p=None, p1=None, p2=None, p3=None,
cc=None, ln=None, _=None, ec=None, colls_to_search=None, rm=None, cpu_time=None,
f=None, em=None, jrec=None, rg=None, **dummy
):
## search stage 6: display results:
# split result set into collections
(results_final_nb, results_final_nb_total, results_final_for_all_selected_colls) = prs_split_into_collections(kwargs=kwargs, **kwargs)
# we continue past this point only if there is a hosted collection that has timed out and might offer potential results
if results_final_nb_total == 0 and not kwargs['hosted_colls_potential_results_p']:
if of.startswith("h"):
write_warning("No match found, please enter different search terms.", req=req)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
prs_log_query(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# print results overview:
if of == "intbitset":
#return the result as an intbitset
return results_final_for_all_selected_colls
elif of == "id":
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, ln, kwargs['rg'], kwargs['jrec'], kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of, ln)
return slice_records(recIDs, jrec, rg)
elif of.startswith("h"):
if of not in ['hcs', 'hcs2', 'hcv', 'htcv', 'tlcv']:
# added the hosted_colls_potential_results_p parameter to help print out the overview more accurately
req.write(print_results_overview(colls_to_search, results_final_nb_total, results_final_nb, cpu_time,
ln, ec, hosted_colls_potential_results_p=kwargs['hosted_colls_potential_results_p'], em=em))
kwargs['selected_external_collections_infos'] = print_external_results_overview(req, cc, [p, p1, p2, p3],
f, ec, verbose, ln, print_overview=em == "" or EM_REPOSITORY["overview"] in em)
# print number of hits found for XML outputs:
if of.startswith("x") or of == 'mobb':
req.write("<!-- Search-Engine-Total-Number-Of-Results: %s -->\n" % kwargs['results_final_nb_total'])
# print records:
if of in ['hcs', 'hcs2']:
prs_summarize_records(kwargs=kwargs, **kwargs)
elif of in ['hcv', 'htcv', 'tlcv'] and CFG_INSPIRE_SITE:
from invenio.legacy.search_engine.cvifier import cvify_records
cvify_records(results_final_for_all_selected_colls, of, req, so)
else:
prs_print_records(kwargs=kwargs, **kwargs)
# this is a copy of the prs_display_results with output parts removed, needed for external modules
def prs_rank_results(kwargs=None, results_final=None, req=None, colls_to_search=None,
sf=None, so=None, sp=None, of=None, rm=None, p=None, p1=None, p2=None, p3=None,
verbose=None, **dummy
):
## search stage 6: display results:
# split result set into collections
dummy_results_final_nb, dummy_results_final_nb_total, results_final_for_all_selected_colls = prs_split_into_collections(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, field=kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of)
return recIDs
def perform_request_cache(req, action="show"):
"""Manipulates the search engine cache."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
out = ""
out += "<h1>Search Cache</h1>"
req.write(out)
# show collection reclist cache:
out = "<h3>Collection reclist cache</h3>"
out += "- collection table last updated: %s" % get_table_update_time('collection')
out += "<br />- reclist cache timestamp: %s" % collection_reclist_cache.timestamp
out += "<br />- reclist cache contents:"
out += "<blockquote>"
for coll in collection_reclist_cache.cache.keys():
if collection_reclist_cache.cache[coll]:
out += "%s (%d)<br />" % (coll, len(collection_reclist_cache.cache[coll]))
out += "</blockquote>"
req.write(out)
# show field i18nname cache:
out = "<h3>Field I18N names cache</h3>"
out += "- fieldname table last updated: %s" % get_table_update_time('fieldname')
out += "<br />- i18nname cache timestamp: %s" % field_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for field in field_i18nname_cache.cache.keys():
for ln in field_i18nname_cache.cache[field].keys():
out += "%s, %s = %s<br />" % (field, ln, field_i18nname_cache.cache[field][ln])
out += "</blockquote>"
req.write(out)
# show collection i18nname cache:
out = "<h3>Collection I18N names cache</h3>"
out += "- collectionname table last updated: %s" % get_table_update_time('collectionname')
out += "<br />- i18nname cache timestamp: %s" % collection_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for coll in collection_i18nname_cache.cache.keys():
for ln in collection_i18nname_cache.cache[coll].keys():
out += "%s, %s = %s<br />" % (coll, ln, collection_i18nname_cache.cache[coll][ln])
out += "</blockquote>"
req.write(out)
req.write("</html>")
return "\n"
def perform_request_log(req, date=""):
"""Display search log information for given date."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
req.write("<h1>Search Log</h1>")
if date: # case A: display stats for a day
yyyymmdd = string.atoi(date)
req.write("<p><big><strong>Date: %d</strong></big><p>" % yyyymmdd)
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits"))
# read file:
p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, CFG_LOGDIR), 'r')
lines = p.readlines()
p.close()
# process lines:
i = 0
for line in lines:
try:
datetime, dummy_aas, p, f, c, nbhits = line.split("#")
i += 1
req.write("<tr><td align=\"right\">#%d</td><td>%s:%s:%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>"
% (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits))
except:
pass # ignore eventual wrong log lines
req.write("</table>")
else: # case B: display summary stats per day
yyyymm01 = int(time.strftime("%Y%m01", time.localtime()))
yyyymmdd = int(time.strftime("%Y%m%d", time.localtime()))
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></tr>" % ("Day", "Number of Queries"))
for day in range(yyyymm01, yyyymmdd + 1):
p = os.popen("grep -c ^%d %s/search.log" % (day, CFG_LOGDIR), 'r')
for line in p.readlines():
req.write("""<tr><td>%s</td><td align="right"><a href="%s/search/log?date=%d">%s</a></td></tr>""" %
(day, CFG_SITE_URL, day, line))
p.close()
req.write("</table>")
req.write("</html>")
return "\n"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def get_most_popular_field_values(recids, tags, exclude_values=None, count_repetitive_values=True, split_by=0):
"""
Analyze RECIDS and look for TAGS and return most popular values
and the frequency with which they occur sorted according to
descending frequency.
If a value is found in EXCLUDE_VALUES, then do not count it.
If COUNT_REPETITIVE_VALUES is True, then we count every occurrence
of value in the tags. If False, then we count the value only once
regardless of the number of times it may appear in a record.
(But, if the same value occurs in another record, we count it, of
course.)
@return: list of tuples containing tag and its frequency
Example:
>>> get_most_popular_field_values(range(11,20), '980__a')
[('PREPRINT', 10), ('THESIS', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'))
[('Ellis, J', 10), ('Ellis, N', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'), ('Ellis, J'))
[('Ellis, N', 7), ...]
"""
def _get_most_popular_field_values_helper_sorter(val1, val2):
"""Compare VAL1 and VAL2 according to, firstly, frequency, then
secondly, alphabetically."""
compared_via_frequencies = cmp(valuefreqdict[val2],
valuefreqdict[val1])
if compared_via_frequencies == 0:
return cmp(val1.lower(), val2.lower())
else:
return compared_via_frequencies
valuefreqdict = {}
## sanity check:
if not exclude_values:
exclude_values = []
if isinstance(tags, string_types):
tags = (tags,)
## find values to count:
vals_to_count = []
displaytmp = {}
if count_repetitive_values:
# counting technique A: can look up many records at once: (very fast)
for tag in tags:
vals_to_count.extend(get_fieldvalues(recids, tag, sort=False,
split_by=split_by))
else:
# counting technique B: must count record-by-record: (slow)
for recid in recids:
vals_in_rec = []
for tag in tags:
for val in get_fieldvalues(recid, tag, False):
vals_in_rec.append(val)
# do not count repetitive values within this record
# (even across various tags, so need to unify again):
dtmp = {}
for val in vals_in_rec:
dtmp[val.lower()] = 1
displaytmp[val.lower()] = val
vals_in_rec = dtmp.keys()
vals_to_count.extend(vals_in_rec)
## are we to exclude some of found values?
for val in vals_to_count:
if val not in exclude_values:
if val in valuefreqdict:
valuefreqdict[val] += 1
else:
valuefreqdict[val] = 1
## sort by descending frequency of values:
if not CFG_NUMPY_IMPORTABLE:
## original version
out = []
vals = valuefreqdict.keys()
vals.sort(_get_most_popular_field_values_helper_sorter)
for val in vals:
tmpdisplv = ''
if val in displaytmp:
tmpdisplv = displaytmp[val]
else:
tmpdisplv = val
out.append((tmpdisplv, valuefreqdict[val]))
return out
else:
f = [] # frequencies
n = [] # original names
ln = [] # lowercased names
## build lists within one iteration
for (val, freq) in iteritems(valuefreqdict):
f.append(-1 * freq)
if val in displaytmp:
n.append(displaytmp[val])
else:
n.append(val)
ln.append(val.lower())
## sort by frequency (desc) and then by lowercased name.
return [(n[i], -1 * f[i]) for i in numpy.lexsort([ln, f])]
def profile(p="", f="", c=CFG_SITE_NAME):
"""Profile search time."""
import profile as pyprofile
import pstats
pyprofile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile")
p = pstats.Stats("perform_request_search_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
return 0
def perform_external_collection_search_with_em(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, em=""):
perform_external_collection_search(req, current_collection, pattern_list, field, external_collection,
verbosity_level, lang, selected_external_collections_infos,
print_overview=em == "" or EM_REPOSITORY["overview"] in em,
print_search_info=em == "" or EM_REPOSITORY["search_info"] in em,
print_see_also_box=em == "" or EM_REPOSITORY["see_also_box"] in em,
print_body=em == "" or EM_REPOSITORY["body"] in em)
@cache.memoize(timeout=5)
def get_fulltext_terms_from_search_pattern(search_pattern):
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, search_pattern.encode('utf-8'), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
return keywords
def check_user_can_edit_record(req, recid):
""" Check if user has authorization to modify a collection
the recid belongs to
"""
record_collections = get_all_collections_of_a_record(recid)
if not record_collections:
# Check if user has access to all collections
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection='')
if auth_code == 0:
return True
else:
for collection in record_collections:
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection=collection)
if auth_code == 0:
return True
return False
| gpl-2.0 | 3,541,788,180,805,324,000 | 45.394049 | 268 | 0.549861 | false | 3.806749 | false | false | false |
mozman/ezdxf | tests/test_05_tools/test_510_byte_stream.py | 1 | 1134 | # Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
import struct
from ezdxf.tools.binarydata import ByteStream
def test_init():
bs = ByteStream(b'ABCDABC\x00')
assert bs.index == 0
assert len(bs.buffer) == 8
def test_read_ps():
bs = ByteStream(b'ABCDABC\x00')
s = bs.read_padded_string()
assert s == 'ABCDABC'
assert bs.index == 8
assert bs.has_data is False
def test_read_ps_align():
bs = ByteStream(b'ABCD\x00')
s = bs.read_padded_string()
assert s == 'ABCD'
assert bs.index == 8
assert bs.has_data is False
def test_read_pus():
bs = ByteStream(b'A\x00B\x00C\x00D\x00\x00\x00')
s = bs.read_padded_unicode_string()
assert s == 'ABCD'
assert bs.index == 12
assert bs.has_data is False
def test_read_doubles():
data = struct.pack('3d', 1.0, 2.0, 3.0)
bs = ByteStream(data)
x = bs.read_struct('d')[0]
y = bs.read_struct('d')[0]
z = bs.read_struct('d')[0]
assert (x, y, z) == (1.0, 2.0, 3.0)
assert bs.index == 24
assert bs.has_data is False
if __name__ == '__main__':
pytest.main([__file__])
| mit | 846,370,826,801,595,800 | 21.235294 | 52 | 0.602293 | false | 2.793103 | true | false | false |
alindt/Cinnamon | files/usr/lib/cinnamon-settings/modules/cs_default.py | 1 | 14784 | #!/usr/bin/env python
from SettingsWidgets import *
PREF_MEDIA_AUTORUN_NEVER = "autorun-never"
PREF_MEDIA_AUTORUN_X_CONTENT_START_APP = "autorun-x-content-start-app"
PREF_MEDIA_AUTORUN_X_CONTENT_IGNORE = "autorun-x-content-ignore"
PREF_MEDIA_AUTORUN_X_CONTENT_OPEN_FOLDER = "autorun-x-content-open-folder"
CUSTOM_ITEM_ASK = "cc-item-ask"
CUSTOM_ITEM_DO_NOTHING = "cc-item-do-nothing"
CUSTOM_ITEM_OPEN_FOLDER = "cc-item-open-folder"
MEDIA_HANDLING_SCHEMA = "org.cinnamon.desktop.media-handling"
PREF_CONTENT_TYPE = 0
PREF_GEN_CONTENT_TYPE = 1
PREF_LABEL = 2
DEF_CONTENT_TYPE = 0
DEF_LABEL = 1
DEF_HEADING = 2
preferred_app_defs = [
# for web, we need to support text/html,
# application/xhtml+xml and x-scheme-handler/https,
# hence the "*" pattern
( "x-scheme-handler/http", "x-scheme-handler/http", _("_Web") ),
( "x-scheme-handler/mailto", "x-scheme-handler/mailto", _("_Mail") ),
( "text/plain", "text", _("Text") ), #TODO: Add mnemonic once we're out of M16 release to preserve i18n for now
# 1st mimetype is to let us find apps
# 2nd mimetype is to set default handler for (so we handle all of that type, not just a specific format)
( "audio/x-vorbis+ogg", "audio", _("M_usic") ),
( "video/x-ogm+ogg", "video", _("_Video") ),
( "image/jpeg", "image", _("_Photos") )
]
removable_media_defs = [
( "x-content/audio-cdda", _("CD _audio") , _("Select an application for audio CDs")),
( "x-content/video-dvd", _("_DVD video"), _("Select an application for video DVDs") ),
( "x-content/audio-player", _("_Music player"), _("Select an application to run when a music player is connected") ),
( "x-content/image-dcf", _("_Photos"), _("Select an application to run when a camera is connected") ),
( "x-content/unix-software", _("_Software"), _("Select an application for software CDs") )
]
other_defs = [
# translators: these strings are duplicates of shared-mime-info
# strings, just here to fix capitalization of the English originals.
# If the shared-mime-info translation works for your language,
# simply leave these untranslated.
( "x-content/audio-dvd", _("audio DVD") ),
( "x-content/blank-bd", _("blank Blu-ray disc") ),
( "x-content/blank-cd", _("blank CD disc") ),
( "x-content/blank-dvd", _("blank DVD disc") ),
( "x-content/blank-hddvd", _("blank HD DVD disc") ),
( "x-content/video-bluray", _("Blu-ray video disc") ),
( "x-content/ebook-reader", _("e-book reader") ),
( "x-content/video-hddvd", _("HD DVD video disc") ),
( "x-content/image-picturecd", _("Picture CD") ),
( "x-content/video-svcd", _("Super Video CD") ),
( "x-content/video-vcd", _("Video CD") ),
( "x-content/win32-software", _("Windows software") ),
( "x-content/software", _("Software") )
]
class ColumnBox(Gtk.VBox):
def __init__(self, title, content):
super(ColumnBox, self).__init__()
label = Gtk.Label("")
label.set_markup('<b>%s\n</b>' % title)
label.set_alignment(0.5, 0.5)
self.set_homogeneous(False)
self.pack_start(label, False, False, 0)
self.pack_end(content, True, True, 0)
class ButtonTable(Gtk.Table):
def __init__(self, lines):
super(ButtonTable, self).__init__(lines, 2, False)
self.set_row_spacings(8)
self.set_col_spacings(15)
self.attach(Gtk.Label(""), 2, 3, 0, lines, Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0, 0)
self.row = 0
def addRow(self, label, button):
if label:
label = MnemonicLabel(label, button)
self.attach(label, 0, 1, self.row, self.row+1, Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0, 0)
self.attach(button, 1, 2, self.row, self.row+1, Gtk.AttachOptions.FILL, 0, 0, 0)
self.row += 1
def forgetRow(self):
self.row -= 1
class MnemonicLabel(Gtk.Label):
def __init__(self, text, widget):
super(MnemonicLabel, self).__init__("")
self.set_text_with_mnemonic(text)
self.set_alignment(1, 0.5)
self.get_style_context().add_class("dim-label")
self.set_mnemonic_widget(widget)
class DefaultAppChooserButton(Gtk.AppChooserButton):
def __init__(self, content_type, gen_content_type):
super(DefaultAppChooserButton, self).__init__(content_type=content_type)
self.content_type = content_type
self.generic_content_type = gen_content_type
self.set_show_default_item(True)
self.connect("changed", self.onChanged)
def onChanged(self, button):
info = button.get_app_info()
if info:
types = info.get_supported_types()
for t in types:
if self.generic_content_type in t:
if not info.set_as_default_for_type(t):
print "Failed to set '%s' as the default application for '%s'" % (info.get_name(), self.generic_content_type)
if self.content_type == "x-scheme-handler/http":
if info.set_as_default_for_type ("x-scheme-handler/https") == False:
print "Failed to set '%s' as the default application for '%s'" % (info.get_name(), "x-scheme-handler/https")
class CustomAppChooserButton(Gtk.AppChooserButton):
def __init__(self, media_settings, content_type, heading=None):
super(CustomAppChooserButton, self).__init__(content_type=content_type)
self.media_settings = media_settings
content_type = self.get_content_type()
self.set_show_default_item(True)
#fetch preferences for this content type
(pref_start_app, pref_ignore, pref_open_folder) = self.getPreferences()
pref_ask = not pref_start_app and not pref_ignore and not pref_open_folder
info = self.get_app_info()
#append the separator only if we have >= 1 apps in the chooser
if info:
self.append_separator()
icon = Gio.ThemedIcon.new("gtk-dialog-question")
self.append_custom_item(CUSTOM_ITEM_ASK, _("Ask what to do"), icon)
icon = Gio.ThemedIcon.new("gtk-directory")
self.append_custom_item(CUSTOM_ITEM_OPEN_FOLDER, _("Open folder"), icon)
icon = Gio.ThemedIcon.new("gtk-cancel")
self.append_custom_item(CUSTOM_ITEM_DO_NOTHING, _("Do nothing"), icon)
self.set_show_dialog_item(True)
self.set_heading(heading)
if pref_ask:
self.set_active_custom_item(CUSTOM_ITEM_ASK)
elif pref_ignore:
self.set_active_custom_item(CUSTOM_ITEM_DO_NOTHING)
elif pref_open_folder:
self.set_active_custom_item(CUSTOM_ITEM_OPEN_FOLDER)
self.connect("changed", self.onChanged)
self.connect("custom-item-activated", self.onCustomItemActivated)
def onChanged(self, button):
info = self.get_app_info()
if info:
content_type = self.get_content_type()
self.setPreferences(True, False, False)
info.set_as_default_for_type(content_type)
def onCustomItemActivated(self, button, item):
content_type = self.get_content_type()
if item == CUSTOM_ITEM_ASK:
self.setPreferences(False, False, False)
elif item == CUSTOM_ITEM_OPEN_FOLDER:
self.setPreferences(False, False, True)
elif item == CUSTOM_ITEM_DO_NOTHING:
self.setPreferences(False, True, False)
def getPreference(self, settings_key):
strv = self.media_settings.get_strv(settings_key)
return strv != None and self.get_content_type() in strv
def getPreferences(self):
pref_start_app = self.getPreference( PREF_MEDIA_AUTORUN_X_CONTENT_START_APP)
pref_ignore = self.getPreference(PREF_MEDIA_AUTORUN_X_CONTENT_IGNORE)
pref_open_folder = self.getPreference(PREF_MEDIA_AUTORUN_X_CONTENT_OPEN_FOLDER)
return (pref_start_app, pref_ignore, pref_open_folder)
def setPreference(self, pref_value, settings_key):
array = self.media_settings.get_strv(settings_key)
content_type = self.get_content_type()
array = [ v for v in array if v != content_type ]
if pref_value:
array.append(content_type)
self.media_settings.set_strv(settings_key, array)
def setPreferences(self, pref_start_app, pref_ignore, pref_open_folder):
self.setPreference(pref_start_app, PREF_MEDIA_AUTORUN_X_CONTENT_START_APP)
self.setPreference(pref_ignore, PREF_MEDIA_AUTORUN_X_CONTENT_IGNORE)
self.setPreference(pref_open_folder, PREF_MEDIA_AUTORUN_X_CONTENT_OPEN_FOLDER)
class OtherTypeDialog(Gtk.Dialog):
def __init__(self, media_settings):
super(OtherTypeDialog, self).__init__(_("Other Media"),
None,
0,
(_("Close"), Gtk.ResponseType.OK))
self.set_default_size(350, 100)
self.media_settings = media_settings
list_store = Gtk.ListStore(str, str)
list_store.set_sort_column_id (1, Gtk.SortType.ASCENDING)
self.type_combo = Gtk.ComboBox.new_with_model(list_store)
self.application_combo = None
content_types = Gio.content_types_get_registered()
for content_type in content_types:
if self.acceptContentType(content_type):
list_store.append([self.getDescription(content_type), content_type])
renderer = Gtk.CellRendererText()
self.type_combo.pack_start(renderer, True)
self.type_combo.add_attribute (renderer,"text", 0)
self.type_combo.set_active(False)
table = ButtonTable(2)
table.addRow(_("_Type:"), self.type_combo)
self.table = table
self.vbox.pack_start(ColumnBox(_("Select how other media should be handled"), table), True, True, 2)
self.vbox.show()
self.type_combo.connect("changed", self.onTypeComboChanged)
def acceptContentType(self, content_type):
if not content_type.startswith("x-content/"):
return False
for d in removable_media_defs:
if Gio.content_type_is_a(content_type, d[DEF_CONTENT_TYPE]):
return False
return True
def getDescription(self, content_type):
for d in other_defs:
if content_type == d[DEF_CONTENT_TYPE]:
s = d[DEF_LABEL]
if s == _(s):
description = Gio.content_type_get_description(content_type)
else:
description = s
break
if description == None:
print "Content type '%s' is missing from the info panel" % content_type
return Gio.content_type_get_description(content_type)
return description
def doShow(self, topLevel):
self.set_transient_for(topLevel)
self.set_modal(True)
self.connect("response", self.onResponse)
self.connect("delete-event", self.onDelete)
self.onTypeComboChanged(self.type_combo)
self.present()
self.show_all()
def onDelete(self, *args):
return self.hide_on_delete()
def doHide(self):
self.hide()
if self.application_combo != None:
self.application_combo.destroy()
self.application_combo = None
self.table.forgetRow()
def onResponse(self, dialog, response):
self.doHide()
def onTypeComboChanged(self, type_combo):
iter = type_combo.get_active_iter()
if not iter:
return
model = type_combo.get_model()
if not model:
return
x_content_type = model.get_value(iter, 1)
heading = model.get_value(iter, 0)
action_container = Gtk.HBox()
if self.application_combo != None:
self.application_combo.destroy()
self.table.forgetRow()
self.application_combo = CustomAppChooserButton(self.media_settings, x_content_type, heading)
self.application_combo.show()
self.table.addRow(_("_Action:"), self.application_combo)
class Module:
def __init__(self, content_box):
keywords = _("media, defaults, applications, programs, removable, browser, email, calendar, music, videos, photos, images, cd, autostart")
advanced = False
sidePage = SidePage(_("Applications & Removable Media"), "default-applications.svg", keywords, advanced, content_box)
self.sidePage = sidePage
self.name = "default"
self.category = "prefs"
hbox = Gtk.HBox()
hbox.set_homogeneous(True)
sidePage.add_widget(hbox, False)
hbox.pack_start(self.setupDefaultApps(), False, False, 0)
hbox.pack_start(self.setupMedia(), False, False, 0)
def setupDefaultApps(self):
table = ButtonTable(len(preferred_app_defs))
for d in preferred_app_defs:
table.addRow(d[PREF_LABEL], DefaultAppChooserButton(d[PREF_CONTENT_TYPE], d[PREF_GEN_CONTENT_TYPE]))
return ColumnBox(_("Default Applications"), table)
def onMoreClicked(self, button):
self.other_type_dialog.doShow(button.get_toplevel())
def setupMedia(self):
self.media_settings = Gio.Settings.new(MEDIA_HANDLING_SCHEMA)
self.other_type_dialog = OtherTypeDialog(self.media_settings)
hbox = Gtk.VBox()
hboxToggle = Gtk.VBox()
hbox.add(hboxToggle)
table = ButtonTable(len(removable_media_defs)+1)
hboxToggle.add(table)
for d in removable_media_defs:
table.addRow(d[DEF_LABEL], CustomAppChooserButton(self.media_settings, d[DEF_CONTENT_TYPE], d[DEF_HEADING]))
more = Gtk.Button.new_with_mnemonic(_("_Other Media..."))
more.connect("clicked", self.onMoreClicked)
table.addRow(None, more)
never = Gtk.CheckButton.new_with_mnemonic(_("_Never prompt or start programs on media insertion"))
hbox.add(never)
self.media_settings.bind(PREF_MEDIA_AUTORUN_NEVER, never, "active", Gio.SettingsBindFlags.DEFAULT)
self.media_settings.bind(PREF_MEDIA_AUTORUN_NEVER, hboxToggle, "sensitive", Gio.SettingsBindFlags.INVERT_BOOLEAN)
return ColumnBox(_("Select how media should be handled"), hbox)
| gpl-2.0 | -4,939,759,339,208,699,000 | 39.952909 | 150 | 0.603084 | false | 3.595331 | false | false | false |
bugzPDX/airmozilla | airmozilla/manage/views/dashboard.py | 1 | 5401 | import datetime
from django.contrib.auth.models import User
from django.shortcuts import render
from django.utils import timezone
from django.db.models import Sum
from jsonview.decorators import json_view
from airmozilla.main.models import (
Event,
SuggestedEvent,
Picture,
EventRevision,
)
from airmozilla.comments.models import Comment
from .decorators import staff_required
@staff_required
def dashboard(request):
"""Management home / explanation page."""
return render(request, 'manage/dashboard.html')
@staff_required
@json_view
def dashboard_data(request):
context = {}
now = timezone.now()
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
tomorrow = today + datetime.timedelta(days=1)
yesterday = today - datetime.timedelta(days=1)
this_week = today - datetime.timedelta(days=today.weekday())
next_week = this_week + datetime.timedelta(days=7)
last_week = this_week - datetime.timedelta(days=7)
this_month = today.replace(day=1)
next_month = this_month
while next_month.month == this_month.month:
next_month += datetime.timedelta(days=1)
last_month = (this_month - datetime.timedelta(days=1)).replace(day=1)
this_year = this_month.replace(month=1)
next_year = this_year.replace(year=this_year.year + 1)
last_year = this_year.replace(year=this_year.year - 1)
context['groups'] = []
def get_counts(qs, key):
counts = {}
def make_filter(gte=None, lt=None):
filter = {}
if gte is not None:
filter['%s__gte' % key] = gte
if lt is not None:
filter['%s__lt' % key] = lt
return filter
counts['today'] = qs.filter(
**make_filter(gte=today, lt=tomorrow)
).count()
counts['yesterday'] = qs.filter(
**make_filter(gte=yesterday, lt=today)).count()
counts['this_week'] = qs.filter(
**make_filter(gte=this_week, lt=next_week)).count()
counts['last_week'] = qs.filter(
**make_filter(gte=last_week, lt=this_week)).count()
counts['this_month'] = qs.filter(
**make_filter(gte=this_month, lt=next_month)).count()
counts['last_month'] = qs.filter(
**make_filter(gte=last_month, lt=this_month)).count()
counts['this_year'] = qs.filter(
**make_filter(gte=this_year, lt=next_year)).count()
counts['last_year'] = qs.filter(
**make_filter(gte=last_year, lt=this_year)).count()
counts['ever'] = qs.count()
return counts
# Events
events = Event.objects.exclude(status=Event.STATUS_REMOVED)
counts = get_counts(events, 'start_time')
context['groups'].append({
'name': 'New Events',
'counts': counts
})
# Suggested Events
counts = get_counts(SuggestedEvent.objects.all(), 'created')
context['groups'].append({
'name': 'Requested Events',
'counts': counts
})
# Users
counts = get_counts(User.objects.all(), 'date_joined')
context['groups'].append({
'name': 'New Users',
'counts': counts
})
# Comments
counts = get_counts(Comment.objects.all(), 'created')
context['groups'].append({
'name': 'Comments',
'counts': counts
})
# Event revisions
counts = get_counts(EventRevision.objects.all(), 'created')
context['groups'].append({
'name': 'Event Revisions',
'counts': counts
})
# Pictures
counts = get_counts(Picture.objects.all(), 'created')
context['groups'].append({
'name': 'Pictures',
'counts': counts
})
def get_duration_totals(qs):
key = 'start_time'
def make_filter(gte=None, lt=None):
filter = {}
if gte is not None:
filter['%s__gte' % key] = gte
if lt is not None:
filter['%s__lt' % key] = lt
return filter
counts = {}
def sum(elements):
seconds = elements.aggregate(Sum('duration'))['duration__sum']
seconds = seconds or 0 # in case it's None
minutes = seconds / 60
hours = minutes / 60
if hours > 1:
return "%dh" % hours
elif minutes > 1:
return "%dm" % minutes
return "%ds" % seconds
counts['today'] = sum(qs.filter(**make_filter(gte=today)))
counts['yesterday'] = sum(qs.filter(
**make_filter(gte=yesterday, lt=today)))
counts['this_week'] = sum(qs.filter(**make_filter(gte=this_week)))
counts['last_week'] = sum(qs.filter(
**make_filter(gte=last_week, lt=this_week)))
counts['this_month'] = sum(qs.filter(**make_filter(gte=this_month)))
counts['last_month'] = sum(qs.filter(
**make_filter(gte=last_month, lt=this_month)))
counts['this_year'] = sum(qs.filter(**make_filter(gte=this_year)))
counts['last_year'] = sum(qs.filter(
**make_filter(gte=last_year, lt=this_year)))
counts['ever'] = sum(qs)
return counts
# Exceptional
counts = get_duration_totals(Event.objects.exclude(duration__isnull=True))
context['groups'].append({
'name': 'Total Event Durations',
'counts': counts
})
return context
| bsd-3-clause | -7,759,845,208,928,086,000 | 29.514124 | 78 | 0.578967 | false | 3.717137 | false | false | false |
matthewgall/dnsjson.com | app.py | 1 | 5654 | #!/usr/bin/env python3
import os, logging, argparse, json, datetime
import requests
import dns.resolver
from bottle import route, request, response, redirect, hook, error, default_app, view, static_file, template
def set_content_type(fn):
def _return_type(*args, **kwargs):
if request.headers.get('Accept') == "application/json":
response.headers['Content-Type'] = 'application/json'
if request.headers.get('Accept') == "text/plain":
response.headers['Content-Type'] = 'text/plain'
if request.method != 'OPTIONS':
return fn(*args, **kwargs)
return _return_type
def enable_cors(fn):
def _enable_cors(*args, **kwargs):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
if request.method != 'OPTIONS':
return fn(*args, **kwargs)
return _enable_cors
def resolveDomain(domain, recordType, args):
records = []
if args.doh:
try:
payload = {
'name': domain,
'type': recordType
}
data = requests.get("{}".format(args.resolver), params=payload)
for rec in data.json()['Answer']:
records.append(rec['data'])
except:
return records
return records
else:
try:
resolver = dns.resolver.Resolver()
resolver.nameservers = args.resolver.split(',')
if recordType in args.records.split(','):
lookup = resolver.resolve(domain, recordType)
for data in lookup:
if recordType in ['A', 'AAAA']:
records.append(data.address)
elif recordType in ['TXT']:
for rec in data.strings:
records.append(rec.decode("utf-8").replace('"', '').strip())
else:
records.append(str(data).replace('"', '').strip())
return records
except dns.resolver.NXDOMAIN:
return records
except dns.resolver.NoAnswer:
return records
except dns.exception.Timeout:
return records
except dns.resolver.NoNameservers:
return records
@error('404')
@error('403')
def returnError(code, msg, contentType="text/plain"):
response.status = int(code)
response.content_type = contentType
return template('error')
@route('/static/<filepath:path>')
def static(filepath):
return static_file(filepath, root='views/static')
@route('/servers')
def servers():
try:
response.content_type = 'text/plain'
return "\r\n".join(args.resolver.split(","))
except:
return "Unable to open servers file."
@route('/version')
def version():
try:
dirname, filename = os.path.split(os.path.abspath(__file__))
del filename
f = open(os.getenv('VERSION_PATH', dirname + '/.git/refs/heads/master'), 'r')
content = f.read()
response.content_type = 'text/plain'
return content
except:
return "Unable to open version file."
@route('/<record>')
def route_redirect(record):
return redirect("/{}/A".format(record))
@route('/<record>/<type>')
@route('/<record>/<type>.<ext>')
@set_content_type
@enable_cors
def loadRecord(record, type='A', ext='html'):
try:
if record == "":
raise ValueError
if not ext in ["html","txt", "text", "json"]:
raise ValueError
if not type.upper() in args.records.split(','):
raise ValueError
except ValueError:
return returnError(404, "Not Found", "text/html")
if ext in ["json"]:
response.content_type = 'application/json'
if ext in ["txt", "text"]:
response.content_type = 'text/plain'
# We make a request to get information
data = resolveDomain(record, type.upper(), args)
if response.content_type == 'application/json':
return json.dumps({
'results': {
'name': record,
'type': type.upper(),
'records': data,
}
})
elif response.content_type == "text/plain":
return "\r\n".join(data)
else:
return template('rec', {
'name': record,
'type': type.upper(),
'records': data,
'recTypes': args.records.split(',')
})
@route('/', ('GET', 'POST'))
def index():
if request.method == "POST":
recordName = request.forms.get('recordName', '')
recordType = request.forms.get('recordType', '')
if recordName != '' and recordType in args.records.split(','):
return redirect("/{}/{}".format(recordName, recordType))
else:
return returnError(404, "We were not able to figure out what you were asking for", "text/html")
return template("home", {
'recTypes': args.records.split(',')
})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Server settings
parser.add_argument("-i", "--host", default=os.getenv('HOST', '127.0.0.1'), help="server ip")
parser.add_argument("-p", "--port", default=os.getenv('PORT', 5000), help="server port")
# Redis settings
parser.add_argument("--redis", default=os.getenv('REDIS', 'redis://localhost:6379/0'), help="redis connection string")
# Application settings
parser.add_argument("--doh", help="use DNS-over-HTTPS and treat --resolver as DNS-over-HTTPS capable (beta)", action="store_true")
parser.add_argument("--records", default=os.getenv('RECORDS', "A,AAAA,CAA,CNAME,DS,DNSKEY,MX,NS,NSEC,NSEC3,RRSIG,SOA,TXT"), help="supported records")
parser.add_argument("--resolver", default=os.getenv('RESOLVER', '8.8.8.8'), help="resolver address")
# Verbose mode
parser.add_argument("--verbose", "-v", help="increase output verbosity", action="store_true")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
app = default_app()
app.run(host=args.host, port=args.port, server='tornado')
except:
log.error("Unable to start server on {}:{}".format(args.host, args.port)) | mit | -8,385,073,387,106,826,000 | 28.763158 | 150 | 0.67156 | false | 3.196156 | false | false | false |
harshadyeola/easyengine | ee/cli/plugins/stack_upgrade.py | 1 | 12752 | from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.logging import Log
from ee.core.variables import EEVariables
from ee.core.aptget import EEAptGet
from ee.core.apt_repo import EERepo
from ee.core.services import EEService
from ee.core.fileutils import EEFileUtils
from ee.core.shellexec import EEShellExec
from ee.core.git import EEGit
from ee.core.download import EEDownload
import configparser
import os
class EEStackUpgradeController(CementBaseController):
class Meta:
label = 'upgrade'
stacked_on = 'stack'
stacked_type = 'nested'
description = ('Upgrade stack safely')
arguments = [
(['--all'],
dict(help='Upgrade all stack', action='store_true')),
(['--web'],
dict(help='Upgrade web stack', action='store_true')),
(['--admin'],
dict(help='Upgrade admin tools stack', action='store_true')),
(['--mail'],
dict(help='Upgrade mail server stack', action='store_true')),
(['--mailscanner'],
dict(help='Upgrade mail scanner stack', action='store_true')),
(['--nginx'],
dict(help='Upgrade Nginx stack', action='store_true')),
(['--nginxmainline'],
dict(help='Upgrade Nginx Mainline stack', action='store_true')),
(['--php'],
dict(help='Upgrade PHP stack', action='store_true')),
(['--mysql'],
dict(help='Upgrade MySQL stack', action='store_true')),
(['--hhvm'],
dict(help='Upgrade HHVM stack', action='store_true')),
(['--postfix'],
dict(help='Upgrade Postfix stack', action='store_true')),
(['--wpcli'],
dict(help='Upgrade WPCLI', action='store_true')),
(['--redis'],
dict(help='Upgrade Redis', action='store_true')),
(['--php56'],
dict(help="Upgrade to PHP5.6 from PHP5.5",
action='store_true')),
(['--no-prompt'],
dict(help="Upgrade Packages without any prompt",
action='store_true')),
]
@expose(hide=True)
def upgrade_php56(self):
if EEVariables.ee_platform_distro == "ubuntu":
if os.path.isfile("/etc/apt/sources.list.d/ondrej-php5-5_6-{0}."
"list".format(EEVariables.ee_platform_codename)):
Log.error(self, "Unable to find PHP 5.5")
else:
if not(os.path.isfile(EEVariables.ee_repo_file_path) and
EEFileUtils.grep(self, EEVariables.ee_repo_file_path,
"php55")):
Log.error(self, "Unable to find PHP 5.5")
Log.info(self, "During PHP update process non nginx-cached"
" parts of your site may remain down.")
# Check prompt
if (not self.app.pargs.no_prompt):
start_upgrade = input("Do you want to continue:[y/N]")
if start_upgrade != "Y" and start_upgrade != "y":
Log.error(self, "Not starting PHP package update")
if EEVariables.ee_platform_distro == "ubuntu":
EERepo.remove(self, ppa="ppa:ondrej/php5")
EERepo.add(self, ppa=EEVariables.ee_php_repo)
else:
EEAptGet.remove(self, ["php5-xdebug"])
EEFileUtils.searchreplace(self, EEVariables.ee_repo_file_path,
"php55", "php56")
Log.info(self, "Updating apt-cache, please wait...")
EEAptGet.update(self)
Log.info(self, "Installing packages, please wait ...")
if EEVariables.ee_platform_codename == 'trusty':
EEAptGet.install(self, EEVariables.ee_php5_6 + EEVariables.ee_php_extra)
else:
EEAptGet.install(self, EEVariables.ee_php)
if EEVariables.ee_platform_distro == "debian":
EEShellExec.cmd_exec(self, "pecl install xdebug")
with open("/etc/php5/mods-available/xdebug.ini",
encoding='utf-8', mode='a') as myfile:
myfile.write(";zend_extension=/usr/lib/php5/20131226/"
"xdebug.so\n")
EEFileUtils.create_symlink(self, ["/etc/php5/mods-available/"
"xdebug.ini", "/etc/php5/fpm/conf.d"
"/20-xedbug.ini"])
Log.info(self, "Successfully upgraded from PHP 5.5 to PHP 5.6")
@expose(hide=True)
def default(self):
# All package update
if ((not self.app.pargs.php56)):
apt_packages = []
packages = []
if ((not self.app.pargs.web) and (not self.app.pargs.nginx) and
(not self.app.pargs.php) and (not self.app.pargs.mysql) and
(not self.app.pargs.postfix) and (not self.app.pargs.hhvm) and
(not self.app.pargs.mailscanner) and (not self.app.pargs.all)
and (not self.app.pargs.wpcli) and (not self.app.pargs.redis) and (not self.app.pargs.nginxmainline)):
self.app.pargs.web = True
if self.app.pargs.all:
self.app.pargs.web = True
self.app.pargs.mail = True
if self.app.pargs.web:
if EEAptGet.is_installed(self, 'nginx-custom'):
self.app.pargs.nginx = True
elif EEAptGet.is_installed(self, 'nginx-mainline'):
self.app.pargs.nginxmainline = True
else:
Log.info(self, "Nginx is not already installed")
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
self.app.pargs.wpcli = True
if self.app.pargs.mail:
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.wpcli = True
self.app.pargs.postfix = True
if EEAptGet.is_installed(self, 'dovecot-core'):
apt_packages = apt_packages + EEVariables.ee_mail
self.app.pargs.mailscanner = True
else:
Log.info(self, "Mail server is not installed")
if self.app.pargs.nginx :
if EEAptGet.is_installed(self, 'nginx-custom'):
apt_packages = apt_packages + EEVariables.ee_nginx
else:
Log.info(self, "Nginx Stable is not already installed")
if self.app.pargs.nginxmainline:
if EEAptGet.is_installed(self, 'nginx-mainline'):
apt_packages = apt_packages + EEVariables.ee_nginx_dev
else:
Log.info(self, "Nginx Mainline is not already installed")
if self.app.pargs.php:
if EEVariables.ee_platform_codename != 'trusty':
if EEAptGet.is_installed(self, 'php5-fpm'):
apt_packages = apt_packages + EEVariables.ee_php
else:
Log.info(self, "PHP is not installed")
else:
if EEAptGet.is_installed(self, 'php5.6-fpm'):
apt_packages = apt_packages + EEVariables.ee_php5_6 + EEVariables.ee_php_extra
else:
Log.info(self, "PHP 5.6 is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
apt_packages = apt_packages + EEVariables.ee_php7_0 + EEVariables.ee_php_extra
else:
Log.info(self, "PHP 7.0 is not installed")
if self.app.pargs.hhvm:
if EEAptGet.is_installed(self, 'hhvm'):
apt_packages = apt_packages + EEVariables.ee_hhvm
else:
Log.info(self, "HHVM is not installed")
if self.app.pargs.mysql:
if EEAptGet.is_installed(self, 'mariadb-server'):
apt_packages = apt_packages + EEVariables.ee_mysql
else:
Log.info(self, "MariaDB is not installed")
if self.app.pargs.postfix:
if EEAptGet.is_installed(self, 'postfix'):
apt_packages = apt_packages + EEVariables.ee_postfix
else:
Log.info(self, "Postfix is not installed")
if self.app.pargs.redis:
if EEAptGet.is_installed(self, 'redis-server'):
apt_packages = apt_packages + EEVariables.ee_redis
else:
Log.info(self, "Redis is not installed")
if self.app.pargs.wpcli:
if os.path.isfile('/usr/bin/wp'):
packages = packages + [["https://github.com/wp-cli/wp-cli/"
"releases/download/v{0}/"
"wp-cli-{0}.phar"
"".format(EEVariables.ee_wp_cli),
"/usr/bin/wp",
"WP-CLI"]]
else:
Log.info(self, "WPCLI is not installed with EasyEngine")
if self.app.pargs.mailscanner:
if EEAptGet.is_installed(self, 'amavisd-new'):
apt_packages = (apt_packages + EEVariables.ee_mailscanner)
else:
Log.info(self, "MailScanner is not installed")
if len(packages) or len(apt_packages):
Log.info(self, "During package update process non nginx-cached"
" parts of your site may remain down")
# Check prompt
if (not self.app.pargs.no_prompt):
start_upgrade = input("Do you want to continue:[y/N]")
if start_upgrade != "Y" and start_upgrade != "y":
Log.error(self, "Not starting package update")
Log.info(self, "Updating packages, please wait...")
if len(apt_packages):
# apt-get update
EEAptGet.update(self)
# Update packages
EEAptGet.install(self, apt_packages)
# Post Actions after package updates
if (set(EEVariables.ee_nginx).issubset(set(apt_packages)) or
set(EEVariables.ee_nginx_dev).issubset(set(apt_packages))):
EEService.restart_service(self, 'nginx')
if EEVariables.ee_platform_codename != 'trusty':
if set(EEVariables.ee_php).issubset(set(apt_packages)):
EEService.restart_service(self, 'php5-fpm')
else:
if set(EEVariables.ee_php5_6).issubset(set(apt_packages)):
EEService.restart_service(self, 'php5.6-fpm')
if set(EEVariables.ee_php7_0).issubset(set(apt_packages)):
EEService.restart_service(self, 'php7.0-fpm')
if set(EEVariables.ee_hhvm).issubset(set(apt_packages)):
EEService.restart_service(self, 'hhvm')
if set(EEVariables.ee_postfix).issubset(set(apt_packages)):
EEService.restart_service(self, 'postfix')
if set(EEVariables.ee_mysql).issubset(set(apt_packages)):
EEService.restart_service(self, 'mysql')
if set(EEVariables.ee_mail).issubset(set(apt_packages)):
EEService.restart_service(self, 'dovecot')
if set(EEVariables.ee_redis).issubset(set(apt_packages)):
EEService.restart_service(self, 'redis-server')
if len(packages):
if self.app.pargs.wpcli:
EEFileUtils.remove(self,['/usr/bin/wp'])
Log.debug(self, "Downloading following: {0}".format(packages))
EEDownload.download(self, packages)
if self.app.pargs.wpcli:
EEFileUtils.chmod(self, "/usr/bin/wp", 0o775)
Log.info(self, "Successfully updated packages")
# PHP 5.6 to 5.6
elif (self.app.pargs.php56):
self.upgrade_php56()
else:
self.app.args.print_help()
| mit | -3,016,983,847,023,325,700 | 44.870504 | 117 | 0.513018 | false | 3.983755 | false | false | false |
noba3/KoTos | addons/plugin.video.filmibynaturex-2.5.7/mymoves/movie/Metadata.py | 1 | 2017 |
from common import XBMCInterfaceUtils, Logger
from metahandler import metahandlers # @UnresolvedImport
import sys
def retieveMovieInfoAndAddItem(request_obj, response_obj):
items = response_obj.get_item_list()
XBMCInterfaceUtils.callBackDialogProgressBar(getattr(sys.modules[__name__], '__addMovieInfo_in_item'), items, 'Retrieving MOVIE info', 'Failed to retrieve movie information, please try again later')
__metaget__ = None
def __addMovieInfo_in_item(item):
if item.get_next_action_name() == 'Movie_Streams':
year = unicode(item.get_moving_data()['movieYear'], errors='ignore').encode('utf-8')
title = unicode(item.get_moving_data()['movieTitle'], errors='ignore').encode('utf-8')
meta = None
try:
global __metaget__
if __metaget__ is None:
__metaget__ = metahandlers.MetaData()
meta = __metaget__.get_meta('movie', title, year=year)
except:
Logger.logDebug('Failed to load metahandler module')
xbmc_item = item.get_xbmc_list_item_obj()
if(meta is not None):
xbmc_item.setIconImage(meta['thumb_url'])
xbmc_item.setThumbnailImage(meta['cover_url'])
videoInfo = {'trailer_url':meta['trailer_url']}
for key, value in meta.items():
if type(value) is str:
value = unicode(value).encode('utf-8')
videoInfo[key] = value
xbmc_item.setInfo('video', videoInfo)
xbmc_item.setProperty('fanart_image', meta['backdrop_url'])
item.add_request_data('videoInfo', videoInfo)
contextMenuItems = []
contextMenuItems.append(('Movie Information', 'XBMC.Action(Info)'))
xbmc_item.addContextMenuItems(contextMenuItems, replaceItems=False)
else:
xbmc_item.setInfo('video', {'title':title, 'year':year})
item.add_request_data('videoInfo', {'title':title, 'year':year})
| gpl-2.0 | -5,643,942,562,608,663,000 | 42.847826 | 202 | 0.608825 | false | 3.88632 | false | false | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/soyuz/model/archivesubscriber.py | 1 | 9106 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Database class for table ArchiveSubscriber."""
__metaclass__ = type
__all__ = [
'ArchiveSubscriber',
]
from operator import itemgetter
import pytz
from storm.expr import (
And,
Desc,
Join,
LeftJoin,
)
from storm.locals import (
DateTime,
Int,
Reference,
Store,
Storm,
Unicode,
)
from storm.store import EmptyResultSet
from zope.component import getUtility
from zope.interface import implements
from lp.registry.interfaces.person import validate_person
from lp.registry.model.person import Person
from lp.registry.model.teammembership import TeamParticipation
from lp.services.database.constants import UTC_NOW
from lp.services.database.decoratedresultset import DecoratedResultSet
from lp.services.database.enumcol import DBEnum
from lp.services.identity.interfaces.emailaddress import EmailAddressStatus
from lp.services.identity.model.emailaddress import EmailAddress
from lp.soyuz.enums import ArchiveSubscriberStatus
from lp.soyuz.interfaces.archiveauthtoken import IArchiveAuthTokenSet
from lp.soyuz.interfaces.archivesubscriber import IArchiveSubscriber
from lp.soyuz.model.archiveauthtoken import ArchiveAuthToken
class ArchiveSubscriber(Storm):
"""See `IArchiveSubscriber`."""
implements(IArchiveSubscriber)
__storm_table__ = 'ArchiveSubscriber'
id = Int(primary=True)
archive_id = Int(name='archive', allow_none=False)
archive = Reference(archive_id, 'Archive.id')
registrant_id = Int(name='registrant', allow_none=False)
registrant = Reference(registrant_id, 'Person.id')
date_created = DateTime(
name='date_created', allow_none=False, tzinfo=pytz.UTC)
subscriber_id = Int(
name='subscriber', allow_none=False,
validator=validate_person)
subscriber = Reference(subscriber_id, 'Person.id')
date_expires = DateTime(
name='date_expires', allow_none=True, tzinfo=pytz.UTC)
status = DBEnum(
name='status', allow_none=False,
enum=ArchiveSubscriberStatus)
description = Unicode(name='description', allow_none=True)
date_cancelled = DateTime(
name='date_cancelled', allow_none=True, tzinfo=pytz.UTC)
cancelled_by_id = Int(name='cancelled_by', allow_none=True)
cancelled_by = Reference(cancelled_by_id, 'Person.id')
@property
def displayname(self):
"""See `IArchiveSubscriber`."""
return "%s's access to %s" % (
self.subscriber.displayname, self.archive.displayname)
def cancel(self, cancelled_by):
"""See `IArchiveSubscriber`."""
self.date_cancelled = UTC_NOW
self.cancelled_by = cancelled_by
self.status = ArchiveSubscriberStatus.CANCELLED
def getNonActiveSubscribers(self):
"""See `IArchiveSubscriber`."""
store = Store.of(self)
if self.subscriber.is_team:
# We get all the people who already have active tokens for
# this archive (for example, through separate subscriptions).
auth_token = LeftJoin(
ArchiveAuthToken,
And(ArchiveAuthToken.person_id == Person.id,
ArchiveAuthToken.archive_id == self.archive_id,
ArchiveAuthToken.date_deactivated == None))
team_participation = Join(
TeamParticipation,
TeamParticipation.personID == Person.id)
# Only return people with preferred email address set.
preferred_email = Join(
EmailAddress, EmailAddress.personID == Person.id)
# We want to get all participants who are themselves
# individuals, not teams:
non_active_subscribers = store.using(
Person, team_participation, preferred_email, auth_token).find(
(Person, EmailAddress),
EmailAddress.status == EmailAddressStatus.PREFERRED,
TeamParticipation.teamID == self.subscriber_id,
Person.teamowner == None,
# There is no existing archive auth token.
ArchiveAuthToken.person_id == None)
non_active_subscribers.order_by(Person.name)
return non_active_subscribers
else:
# Subscriber is not a team.
token_set = getUtility(IArchiveAuthTokenSet)
if token_set.getActiveTokenForArchiveAndPerson(
self.archive, self.subscriber) is not None:
# There are active tokens, so return an empty result
# set.
return EmptyResultSet()
# Otherwise return a result set containing only the
# subscriber and their preferred email address.
return store.find(
(Person, EmailAddress),
Person.id == self.subscriber_id,
EmailAddress.personID == Person.id,
EmailAddress.status == EmailAddressStatus.PREFERRED)
class ArchiveSubscriberSet:
"""See `IArchiveSubscriberSet`."""
def _getBySubscriber(self, subscriber, archive, current_only,
with_active_tokens):
"""Return all the subscriptions for a person.
:param subscriber: An `IPerson` for whom to return all
`ArchiveSubscriber` records.
:param archive: An optional `IArchive` which restricts
the results to that particular archive.
:param current_only: Whether the result should only include current
subscriptions (which is the default).
:param with_active_tokens: Indicates whether the tokens for the given
subscribers subscriptions should be included in the resultset.
By default the tokens are not included in the resultset.
^ """
# Grab the extra Storm expressions, for this query,
# depending on the params:
extra_exprs = self._getExprsForSubscriptionQueries(
archive, current_only)
origin = [
ArchiveSubscriber,
Join(
TeamParticipation,
TeamParticipation.teamID == ArchiveSubscriber.subscriber_id)]
if with_active_tokens:
result_row = (ArchiveSubscriber, ArchiveAuthToken)
# We need a left join with ArchiveSubscriber as
# the origin:
origin.append(
LeftJoin(
ArchiveAuthToken,
And(
ArchiveAuthToken.archive_id ==
ArchiveSubscriber.archive_id,
ArchiveAuthToken.person_id == subscriber.id,
ArchiveAuthToken.date_deactivated == None)))
else:
result_row = ArchiveSubscriber
# Set the main expression to find all the subscriptions for
# which the subscriber is a direct subscriber OR is a member
# of a subscribed team.
# Note: the subscription to the owner itself will also be
# part of the join as there is a TeamParticipation entry
# showing that each person is a member of the "team" that
# consists of themselves.
store = Store.of(subscriber)
return store.using(*origin).find(
result_row,
TeamParticipation.personID == subscriber.id,
*extra_exprs).order_by(Desc(ArchiveSubscriber.date_created))
def getBySubscriber(self, subscriber, archive=None, current_only=True):
"""See `IArchiveSubscriberSet`."""
return self._getBySubscriber(subscriber, archive, current_only, False)
def getBySubscriberWithActiveToken(self, subscriber, archive=None):
"""See `IArchiveSubscriberSet`."""
return self._getBySubscriber(subscriber, archive, True, True)
def getByArchive(self, archive, current_only=True):
"""See `IArchiveSubscriberSet`."""
extra_exprs = self._getExprsForSubscriptionQueries(
archive, current_only)
store = Store.of(archive)
result = store.using(ArchiveSubscriber,
Join(Person, ArchiveSubscriber.subscriber_id == Person.id)).find(
(ArchiveSubscriber, Person),
*extra_exprs).order_by(Person.name)
return DecoratedResultSet(result, itemgetter(0))
def _getExprsForSubscriptionQueries(self, archive=None,
current_only=True):
"""Return the Storm expressions required for the parameters.
Just to keep the code DRY.
"""
extra_exprs = []
# Restrict the results to the specified archive if requested:
if archive:
extra_exprs.append(ArchiveSubscriber.archive == archive)
# Restrict the results to only those subscriptions that are current
# if requested:
if current_only:
extra_exprs.append(
ArchiveSubscriber.status == ArchiveSubscriberStatus.CURRENT)
return extra_exprs
| agpl-3.0 | 6,795,547,473,769,740,000 | 37.100418 | 78 | 0.639798 | false | 4.474693 | false | false | false |
pnasrat/puppet-codereview | rietveld.py | 1 | 6634 | #!/usr/bin/python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to simplify some common AppEngine actions.
Use 'rietveld help' for a list of commands.
"""
import logging
import os
import re
import shutil
import subprocess
import sys
import zipfile
APPCFG = 'appcfg.py'
DEV_APPSERVER = 'dev_appserver.py'
RELEASE = 'release'
ZIPFILE = 'django.zip'
FILES = ["app.yaml", "index.yaml",
"__init__.py", "main.py", "settings.py", "urls.py"]
DIRS = ["static", "templates", "codereview"]
IGNORED_DIR = (".svn", "gis", "admin", "localflavor", "mysql", "mysql_old",
"oracle", "postgresql", "postgresql_psycopg2", "sqlite3",
"test")
IGNORED_EXT = (".pyc", ".pyo", ".po", ".mo")
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShell(command, print_output=False):
"""Executes a command and returns the output."""
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=use_shell)
output = ""
while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output += line
p.wait()
p.stdout.close()
return output
def Help():
print "Available commands:"
print "help"
print "release"
print "serve"
print "serve_email"
print "serve_remote"
print "serve_remote_email"
print "update"
print "update_indexes"
print "upload"
print "vacuum_indexes"
def CreateRelease():
""" Creates a "release" subdirectory.
This is a subdirectory containing a bunch of symlinks, from which the app can
be updated. The main reason for this is to import Django from a zipfile,
which saves dramatically in upload time: statting and computing the SHA1 for
1000s of files is slow. Even if most of those files don't actually need to
be uploaded, they still add to the work done for each update.
"""
def GetDjangoFiles():
"""Return a list of Django files to send to the server.
We prune:
- .svn subdirectories for obvious reasons.
- the other directories are huge and unneeded.
- *.po and *.mo files because they are bulky and unneeded.
- *.pyc and *.pyo because they aren't used by App Engine anyway.
"""
result = []
for root, dirs, files in os.walk("django"):
dirs[:] = [d for d in dirs if d not in IGNORED_DIR]
for file in files:
unused, extension = os.path.splitext(file)
if extension in IGNORED_EXT:
continue
result.append(os.path.join(root, file))
return result
def CopyRietveldDirectory(src, dst):
"""Copies a directory used by Rietveld.
Skips ".svn" directories and ".pyc" files.
"""
for root, dirs, files in os.walk(src):
if not os.path.exists(os.path.join(dst, root)):
os.mkdir(os.path.join(dst, root))
for file in files:
unused, extension = os.path.splitext(file)
if extension in (".pyc", ".pyo"):
continue
shutil.copyfile(os.path.join(root, file), os.path.join(dst, root, file))
dirs[:] = [d for d in dirs if d not in (".svn")]
for dir in dirs:
os.mkdir(os.path.join(dst, root, dir))
# Remove old ZIPFILE file.
if os.path.exists(ZIPFILE):
os.remove(ZIPFILE)
django_files = GetDjangoFiles()
django_zip = zipfile.ZipFile(ZIPFILE, "w")
for file in django_files:
django_zip.write(file, compress_type=zipfile.ZIP_DEFLATED)
django_zip.close()
# Remove old RELEASE directory.
if sys.platform.startswith("win"):
RunShell(["rmdir", "/s", "/q", RELEASE])
else:
RunShell(["rm", "-rf", RELEASE])
# Create new RELEASE directory.
os.mkdir(RELEASE)
if sys.platform.startswith("win"):
# No symbolic links on Windows, just copy.
for x in FILES + [ZIPFILE]:
shutil.copyfile(x, os.path.join(RELEASE, x))
for x in DIRS:
CopyRietveldDirectory(x, RELEASE)
else:
# Create symbolic links.
for x in FILES + DIRS + [ZIPFILE]:
RunShell(["ln", "-s", "../" + x, os.path.join(RELEASE, x)])
def GetApplicationName():
file = open("app.yaml", "r")
result = file.read()
file.close()
APP_REGEXP = ".*?application: ([\w\-]+)"
return re.compile(APP_REGEXP, re.DOTALL).match(result).group(1)
def Update(args):
print "Updating " + GetApplicationName()
output = RunShell(["svn", "info"])
revision = re.compile(".*?\nRevision: (\d+)",
re.DOTALL).match(output).group(1)
revision_file = os.path.join("templates", "live_revision.html")
file = open(revision_file, "w")
file.write('This is <a class="novisit" '
'href="http://code.google.com/p/rietveld/">Rietveld</a> r' +
revision)
file.close()
CreateRelease()
appcfg_args = [APPCFG, "update", RELEASE] + args
# Use os.system here because input might be required, and that doesn't work
# through subprocess.Popen.
os.system(" ".join(appcfg_args))
RunShell(["svn", "revert", revision_file])
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
Help()
return 0
command = argv[1]
if command == "help":
Help()
elif command == "serve":
RunShell([DEV_APPSERVER, "."], True)
elif command == "serve_remote":
RunShell([DEV_APPSERVER, "--address", "0.0.0.0", "."], True)
elif command == "serve_email":
RunShell([DEV_APPSERVER, "--enable_sendmail", "."], True)
elif command == "serve_remote_email":
RunShell([DEV_APPSERVER, "--enable_sendmail", "--address", "0.0.0.0", "."],
True)
elif command == "release":
CreateRelease()
elif command in ("update", "upload"):
Update(argv[2:])
elif command == "update_indexes":
RunShell([APPCFG, "update_indexes", "."], True)
elif command == "vacuum_indexes":
RunShell([APPCFG, "vacuum_indexes", "."], True)
else:
print "Unknown command: " + command
return 2
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -6,787,070,875,122,825,000 | 28.484444 | 80 | 0.641091 | false | 3.506342 | false | false | false |
F8LEFT/ART | KDE/share/ECM/find-modules/rules_engine.py | 1 | 21090 | #!/usr/bin/env python
#=============================================================================
# Copyright 2016 by Shaheed Haque ([email protected])
# Copyright 2016 Stephen Kelly <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#=============================================================================
"""SIP file generation rules engine."""
from __future__ import print_function
from abc import *
import argparse
import gettext
import inspect
import logging
import os
import re
import sys
import textwrap
import traceback
from copy import deepcopy
from clang.cindex import CursorKind
from clang.cindex import AccessSpecifier
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
logger = logging.getLogger(__name__)
gettext.install(__name__)
_SEPARATOR = "\x00"
def _parents(container):
parents = []
parent = container.semantic_parent
while parent and parent.kind != CursorKind.TRANSLATION_UNIT:
parents.append(parent.spelling)
parent = parent.semantic_parent
if parents:
parents = "::".join(reversed(parents))
else:
parents = os.path.basename(container.translation_unit.spelling)
return parents
class Rule(object):
def __init__(self, db, rule_number, fn, pattern_zip):
self.db = db
self.rule_number = rule_number
self.fn = fn
self.usage = 0
try:
groups = ["(?P<{}>{})".format(name, pattern) for pattern, name in pattern_zip]
groups = _SEPARATOR.join(groups)
self.matcher = re.compile(groups)
except Exception as e:
groups = ["{} '{}'".format(name, pattern) for pattern, name in pattern_zip]
groups = ", ".join(groups)
raise RuntimeError(_("Bad {}: {}: {}").format(self, groups, e))
def match(self, candidate):
return self.matcher.match(candidate)
def trace_result(self, parents, item, original, modified):
fqn = parents + "::" + original["name"] + "[" + str(item.extent.start.line) + "]"
self._trace_result(fqn, original, modified)
def _trace_result(self, fqn, original, modified):
if not modified["name"]:
logger.debug(_("Rule {} suppressed {}, {}").format(self, fqn, original))
else:
delta = False
for k, v in original.iteritems():
if v != modified[k]:
delta = True
break
if delta:
logger.debug(_("Rule {} modified {}, {}->{}").format(self, fqn, original, modified))
else:
logger.warn(_("Rule {} did not modify {}, {}").format(self, fqn, original))
def __str__(self):
return "[{},{}]".format(self.rule_number, self.fn.__name__)
class AbstractCompiledRuleDb(object):
__metaclass__ = ABCMeta
def __init__(self, db, parameter_names):
self.db = db
self.compiled_rules = []
for i, raw_rule in enumerate(db()):
if len(raw_rule) != len(parameter_names) + 1:
raise RuntimeError(_("Bad raw rule {}: {}: {}").format(db.__name__, raw_rule, parameter_names))
z = zip(raw_rule[:-1], parameter_names)
self.compiled_rules.append(Rule(db, i, raw_rule[-1], z))
self.candidate_formatter = _SEPARATOR.join(["{}"] * len(parameter_names))
def _match(self, *args):
candidate = self.candidate_formatter.format(*args)
for rule in self.compiled_rules:
matcher = rule.match(candidate)
if matcher:
#
# Only use the first matching rule.
#
rule.usage += 1
return matcher, rule
return None, None
@abstractmethod
def apply(self, *args):
raise NotImplemented(_("Missing subclass"))
def dump_usage(self, fn):
""" Dump the usage counts."""
for rule in self.compiled_rules:
fn(self.__class__.__name__, str(rule), rule.usage)
class ContainerRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR CONTAINERS.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any container (class, namespace, struct, union) to be
customised, for example to add SIP compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the container.
1. A regular expression which matches the container name.
2. A regular expression which matches any template parameters.
3. A regular expression which matches the container declaration.
4. A regular expression which matches any base specifiers.
5. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def container_xxx(container, sip, matcher):
'''
Return a modified declaration for the given container.
:param container: The clang.cindex.Cursor for the container.
:param sip: A dict with the following keys:
name The name of the container.
template_parameters Any template parameters.
decl The declaration.
base_specifiers Any base specifiers.
body The body, less the outer
pair of braces.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT body and annotations.
:return: An updated set of sip.xxx values. Setting sip.name to the
empty string will cause the container to be suppressed.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(ContainerRuleDb, self).__init__(db, ["parents", "container", "template_parameters", "decl", "base_specifiers"])
def apply(self, container, sip):
"""
Walk over the rules database for functions, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param sip: The SIP dict.
"""
parents = _parents(container)
matcher, rule = self._match(parents, sip["name"], sip["template_parameters"], sip["decl"], sip["base_specifiers"])
if matcher:
before = deepcopy(sip)
rule.fn(container, sip, matcher)
rule.trace_result(parents, container, before, sip)
class FunctionRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR FUNCTIONS.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any function to be customised, for example to add SIP
compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the function.
1. A regular expression which matches the function name.
2. A regular expression which matches any template parameters.
3. A regular expression which matches the function result.
4. A regular expression which matches the function parameters (e.g.
"int a, void *b" for "int foo(int a, void *b)").
5. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def function_xxx(container, function, sip, matcher):
'''
Return a modified declaration for the given function.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param sip: A dict with the following keys:
name The name of the function.
template_parameters Any template parameters.
fn_result Result, if not a constructor.
decl The declaration.
prefix Leading keyworks ("static"). Separated by space,
ends with a space.
suffix Trailing keywords ("const"). Separated by space, starts with
space.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT annotations.
:return: An updated set of sip.xxx values. Setting sip.name to the
empty string will cause the container to be suppressed.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(FunctionRuleDb, self).__init__(db, ["container", "function", "template_parameters", "fn_result", "parameters"])
def apply(self, container, function, sip):
"""
Walk over the rules database for functions, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param sip: The SIP dict.
"""
parents = _parents(function)
matcher, rule = self._match(parents, sip["name"], ", ".join(sip["template_parameters"]), sip["fn_result"], ", ".join(sip["parameters"]))
if matcher:
before = deepcopy(sip)
rule.fn(container, function, sip, matcher)
rule.trace_result(parents, function, before, sip)
class ParameterRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR FUNCTION PARAMETERS.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any parameter in any function to be customised, for
example to add SIP compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the function enclosing the parameter.
1. A regular expression which matches the function name enclosing the
parameter.
2. A regular expression which matches the parameter name.
3. A regular expression which matches the parameter declaration (e.g.
"int foo").
4. A regular expression which matches the parameter initialiser (e.g.
"Xyz:MYCONST + 42").
5. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def parameter_xxx(container, function, parameter, sip, init, matcher):
'''
Return a modified declaration and initialiser for the given parameter.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param parameter: The clang.cindex.Cursor for the parameter.
:param sip: A dict with the following keys:
name The name of the function.
decl The declaration.
init Any initialiser.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT annotations.
:return: An updated set of sip.xxx values.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(ParameterRuleDb, self).__init__(db, ["container", "function", "parameter", "decl", "init"])
def apply(self, container, function, parameter, sip):
"""
Walk over the rules database for parameters, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param parameter: The clang.cindex.Cursor for the parameter.
:param sip: The SIP dict.
"""
parents = _parents(function)
matcher, rule = self._match(parents, function.spelling, sip["name"], sip["decl"], sip["init"])
if matcher:
before = deepcopy(sip)
rule.fn(container, function, parameter, sip, matcher)
rule.trace_result(parents, parameter, before, sip)
class VariableRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR VARIABLES.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any variable to be customised, for example to add SIP
compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the variable.
1. A regular expression which matches the variable name.
2. A regular expression which matches the variable declaration (e.g.
"int foo").
3. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def variable_xxx(container, variable, sip, matcher):
'''
Return a modified declaration for the given variable.
:param container: The clang.cindex.Cursor for the container.
:param variable: The clang.cindex.Cursor for the variable.
:param sip: A dict with the following keys:
name The name of the variable.
decl The declaration.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT annotations.
:return: An updated set of sip.xxx values. Setting sip.name to the
empty string will cause the container to be suppressed.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(VariableRuleDb, self).__init__(db, ["container", "variable", "decl"])
def apply(self, container, variable, sip):
"""
Walk over the rules database for variables, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param variable: The clang.cindex.Cursor for the variable.
:param sip: The SIP dict.
"""
parents = _parents(variable)
matcher, rule = self._match(parents, sip["name"], sip["decl"])
if matcher:
before = deepcopy(sip)
rule.fn(container, variable, sip, matcher)
rule.trace_result(parents, variable, before, sip)
class RuleSet(object):
"""
To implement your own binding, create a subclass of RuleSet, also called
RuleSet in your own Python module. Your subclass will expose the raw rules
along with other ancilliary data exposed through the subclass methods.
You then simply run the SIP generation and SIP compilation programs passing
in the name of your rules file
"""
__metaclass__ = ABCMeta
@abstractmethod
def container_rules(self):
"""
Return a compiled list of rules for containers.
:return: A ContainerRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
@abstractmethod
def function_rules(self):
"""
Return a compiled list of rules for functions.
:return: A FunctionRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
@abstractmethod
def parameter_rules(self):
"""
Return a compiled list of rules for function parameters.
:return: A ParameterRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
@abstractmethod
def variable_rules(self):
"""
Return a compiled list of rules for variables.
:return: A VariableRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
def dump_unused(self):
"""Usage statistics, to identify unused rules."""
def dumper(db_name, rule, usage):
if usage:
logger.info(_("Rule {}::{} used {} times".format(db_name, rule, usage)))
else:
logger.warn(_("Rule {}::{} unused".format(db_name, rule)))
for db in [self.container_rules(), self.function_rules(), self.parameter_rules(),
self.variable_rules()]:
db.dump_usage(dumper)
def container_discard(container, sip, matcher):
sip["name"] = ""
def function_discard(container, function, sip, matcher):
sip["name"] = ""
def parameter_transfer_to_parent(container, function, parameter, sip, matcher):
if function.is_static_method():
sip["annotations"].add("Transfer")
else:
sip["annotations"].add("TransferThis")
def param_rewrite_mode_t_as_int(container, function, parameter, sip, matcher):
sip["decl"] = sip["decl"].replace("mode_t", "unsigned int")
def return_rewrite_mode_t_as_int(container, function, sip, matcher):
sip["fn_result"] = "unsigned int"
def variable_discard(container, variable, sip, matcher):
sip["name"] = ""
def parameter_strip_class_enum(container, function, parameter, sip, matcher):
sip["decl"] = sip["decl"].replace("class ", "").replace("enum ", "")
def function_discard_impl(container, function, sip, matcher):
if function.extent.start.column == 1:
sip["name"] = ""
def rules(project_rules):
"""
Constructor.
:param project_rules: The rules file for the project.
"""
import imp
imp.load_source("project_rules", project_rules)
#
# Statically prepare the rule logic. This takes the rules provided by the user and turns them into code.
#
return getattr(sys.modules["project_rules"], "RuleSet")()
| gpl-3.0 | 3,596,388,408,542,231,000 | 39.095057 | 144 | 0.600853 | false | 4.81947 | false | false | false |
bsmithyman/pymatsolver | pymatsolver/Tests/test_Triangle.py | 1 | 1715 | import unittest
import numpy as np, scipy.sparse as sp
TOL = 1e-12
class TestMumps(unittest.TestCase):
def setUp(self):
n = 50
nrhs = 20
self.A = sp.rand(n, n, 0.4) + sp.identity(n)
self.sol = np.ones((n, nrhs))
self.rhsU = sp.triu(self.A) * self.sol
self.rhsL = sp.tril(self.A) * self.sol
def test_directLower(self):
from pymatsolver import ForwardSolver
ALinv = ForwardSolver(sp.tril(self.A))
X = ALinv * self.rhsL
x = ALinv * self.rhsL[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def test_directLower_1(self):
from pymatsolver import BackwardSolver
AUinv = BackwardSolver(sp.triu(self.A))
X = AUinv * self.rhsU
x = AUinv * self.rhsU[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def test_directLower_python(self):
from pymatsolver import _ForwardSolver
ALinv = _ForwardSolver(sp.tril(self.A))
X = ALinv * self.rhsL
x = ALinv * self.rhsL[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def test_directLower_1_python(self):
from pymatsolver import _BackwardSolver
AUinv = _BackwardSolver(sp.triu(self.A))
X = AUinv * self.rhsU
x = AUinv * self.rhsU[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
if __name__ == '__main__':
unittest.main()
| mit | -503,422,981,200,351,500 | 34 | 68 | 0.602915 | false | 2.936644 | true | false | false |
ric2b/Vivaldi-browser | chromium/components/policy/tools/make_policy_zip.py | 1 | 2188 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a zip archive with policy template files.
"""
import optparse
import sys
import zipfile
def add_files_to_zip(zip_file, base_dir, file_list):
"""Pack a list of files into a zip archive, that is already opened for
writing.
Args:
zip_file: An object representing the zip archive.
base_dir: Base path of all the files in the real file system.
file_list: List of absolute file paths to add. Must start with base_dir.
The base_dir is stripped in the zip file entries.
"""
if (base_dir[-1] != '/'):
base_dir += '/'
for file_path in file_list:
assert file_path.startswith(base_dir)
zip_file.write(file_path, file_path[len(base_dir):])
return 0
def main(argv):
"""Pack a list of files into a zip archive.
Args:
output: The file path of the zip archive.
base_dir: Base path of input files.
languages: Comma-separated list of languages, e.g. en-US,de.
add: List of files to include in the archive. The language placeholder
${lang} is expanded into one file for each language.
"""
parser = optparse.OptionParser()
parser.add_option("--output", dest="output")
parser.add_option("--base_dir", dest="base_dir")
parser.add_option("--languages", dest="languages")
parser.add_option("--add", action="append", dest="files", default=[])
options, args = parser.parse_args(argv[1:])
# Process file list, possibly expanding language placeholders.
_LANG_PLACEHOLDER = "${lang}"
languages = filter(bool, options.languages.split(','))
file_list = []
for file_to_add in options.files:
if (_LANG_PLACEHOLDER in file_to_add):
for lang in languages:
file_list.append(file_to_add.replace(_LANG_PLACEHOLDER, lang))
else:
file_list.append(file_to_add)
zip_file = zipfile.ZipFile(options.output, 'w', zipfile.ZIP_DEFLATED)
try:
return add_files_to_zip(zip_file, options.base_dir, file_list)
finally:
zip_file.close()
if '__main__' == __name__:
sys.exit(main(sys.argv))
| bsd-3-clause | -7,656,171,030,158,575,000 | 31.656716 | 76 | 0.679159 | false | 3.523349 | false | false | false |
Solewer/vino-cave | vinocave/settings.py | 1 | 3114 | """
Django settings for vinocave project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vj#grw^2zlpbh^w&0aed2ac_q51p_s#kiw-f*x6(^u_xy_f-jk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vinocave.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vinocave.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| mit | -9,186,155,754,209,220,000 | 24.735537 | 91 | 0.685613 | false | 3.475446 | false | false | false |
toothris/toothris | src/bprofile.py | 1 | 5189 | # Copyright 2008, 2015 Oleg Plakhotniuk
#
# This file is part of Toothris.
#
# Toothris is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Toothris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Toothris. If not, see <http://www.gnu.org/licenses/>.
# LIBS
import pygame
# CONSTS
MEASURES_MIN_COUNT = 200
class Profiler :
def __init__ ( self, name ) :
self.name = name
self.time_min = 999999
self.time_max = 0
self.time_total = 0
self.measures = - MEASURES_MIN_COUNT
self.clock = pygame.time.Clock ()
self.measuring = False
self.profilers = {}
def begin ( self ) :
if self.measuring :
raise RuntimeError ( "trying to start already started profiler" )
self.clock.tick ()
self.measuring = True
def end ( self ) :
if not self.measuring :
raise RuntimeError ( "trying to stop not started profiler" )
self.clock.tick ()
self.measuring = False
self.measures += 1
if self.measures > 0 :
self.time_total += self.clock.get_time ()
self.time_min = min ( self.time_min, self.clock.get_time () )
self.time_max = max ( self.time_max, self.clock.get_time () )
def time_avg ( self ) :
return float ( self.time_total ) / max ( self.measures, 1 )
root_profilers = {}
stack_profilers = []
def begin ( name ) :
global stack_profiler
global root_profilers
if not isinstance ( name, type ( "" ) ) :
raise RuntimeError ( "string name expected" )
if name == "" :
raise RuntimeError ( "name must not be empty" )
if len ( stack_profilers ) > 0 :
profilers = stack_profilers [ len ( stack_profilers ) - 1 ].profilers
else :
profilers = root_profilers
if name in profilers :
profiler = profilers [ name ]
else :
profiler = Profiler ( name )
profilers [ name ] = profiler
profiler.begin ()
stack_profilers.append ( profiler )
def end ( name ) :
global stack_profilers
if not isinstance ( name, type ( "" ) ) :
raise RuntimeError ( "string name expected" )
if len ( stack_profilers ) == 0 :
raise RuntimeError ( "no profiler currently running" )
if name == "" :
raise RuntimeError ( "name must not be empty" )
last_profiler = stack_profilers [ len ( stack_profilers ) - 1 ]
if name != last_profiler.name :
raise RuntimeError ( "trying to stop profiler " + name + \
" before profiler " + last_profiler.name )
stack_profilers.pop ().end ()
def stats_profilers ( profilers, indent = 0 ) :
if len ( profilers ) == 0 :
return
def padded_str ( value, max_len = 0, left_padding = True ) :
if isinstance ( value, type ( "" ) ) :
str_value = value
elif isinstance ( value, type ( 0 ) ) :
str_value = str ( value )
elif isinstance ( value, type ( 0.0 ) ) :
str_value = "%(number).2f" % { "number" : value }
spaces = max ( 0, max_len - len ( str_value ) )
if left_padding :
return " " * spaces + str_value
else :
return str_value + " " * spaces
longest_name = max ( [ len ( padded_str ( p.name ) ) for p in profilers.values () ] )
longest_min = max ( [ len ( padded_str ( p.time_min ) ) for p in profilers.values () ] )
longest_max = max ( [ len ( padded_str ( p.time_max ) ) for p in profilers.values () ] )
longest_avg = max ( [ len ( padded_str ( p.time_avg() ) ) for p in profilers.values () ] )
longest_msr = max ( [ len ( padded_str ( p.measures ) ) for p in profilers.values () ] )
names = profilers.keys ()
names.sort ()
for name in names :
profiler = profilers [ name ]
if profiler.measures > 0 :
print " " * 4 * indent + padded_str ( profiler.name , longest_name, False ) + \
" : min = " + padded_str ( profiler.time_min , longest_min ) + \
" max = " + padded_str ( profiler.time_max , longest_max ) + \
" avg = " + padded_str ( profiler.time_avg(), longest_avg ) + \
" frames = " + padded_str ( profiler.measures , longest_msr )
else :
print " " * 4 * indent + padded_str ( profiler.name , longest_name, False ) + \
" : not enough frames to profile ( " + str ( -profiler.measures ) + " left )"
stats_profilers ( profiler.profilers, indent + 1 )
def stats () :
print "profilers stats:"
stats_profilers ( root_profilers )
| gpl-3.0 | 8,399,911,232,193,724,000 | 31.841772 | 99 | 0.56639 | false | 3.703783 | false | false | false |
bearops/ebzl | ebzl/modules/ecs.py | 1 | 4547 | from .. lib import (
ecs,
format as fmt,
parameters
)
from . import (
version
)
import os
import json
import argparse
def get_argument_parser():
parser = argparse.ArgumentParser("ebzl ecs")
parameters.add_profile(parse, required=False)
parameters.add_region(parser, required=False)
subparsers = parser.add_subparsers()
# ebzl ecs create
create_parser = subparsers.add_parser(
"create",
help="register a new task")
create_parser.set_defaults(func=create_task)
create_parser.add_argument("--family", required=True)
create_parser.add_argument("--name", required=True)
create_parser.add_argument("--image", required=True)
create_parser.add_argument("--version", default=version.get_version())
create_parser.add_argument("--command", default="")
create_parser.add_argument("--entrypoint", default=[])
create_parser.add_argument("--cpu", default=0)
create_parser.add_argument("--memory", default=250)
create_parser.add_argument("-v", "--var", action="append")
create_parser.add_argument("-f", "--var-file")
# ebzl ecs run
run_parser = subparsers.add_parser(
"run",
help="run registered task")
run_parser.set_defaults(func=run_task)
run_parser.add_argument("--task", required=True)
run_parser.add_argument("--cluster", default="default")
run_parser.add_argument("--command")
run_parser.add_argument("-v", "--var", action="append")
run_parser.add_argument("-f", "--var-file")
# ebzl ecs tasks
tasks_parser = subparsers.add_parser(
"tasks",
help="list available tasks")
tasks_parser.set_defaults(func=list_tasks)
# ebzl ecs clusters
clusters_parser = subparsers.add_parser(
"clusters",
help="list available clusters")
clusters_parser.set_defaults(func=list_clusters)
return parser
def parse_var_entry(var_entry):
parts = var_entry.strip().split("=")
return {"name": parts[0],
"value": "=".join(parts[1:])}
def parse_var_file(fpath):
if not fpath or not os.path.isfile(fpath):
return []
with open(os.path.expanduser(fpath), "rb") as f:
return map(parse_var_entry, f.readlines())
def get_env_options(args):
env_options = []
env_options.extend(parse_var_file(args.var_file))
if args.var:
env_options.extend(map(parse_var_entry, args.var))
return env_options
def get_container_definition(args):
return {
"name": args.name,
"image": "%s:%s" % (args.image, args.version),
"mountPoints": [],
"volumesFrom": [],
"portMappings": [],
"command": map(str.strip, args.command.split()),
"essential": True,
"entryPoint": args.entrypoint,
"links": [],
"cpu": int(args.cpu),
"memory": int(args.memory),
"environment": get_env_options(args)
}
def create_task(args):
conn = ecs.get_conn(profile=args.profile)
conn.register_task_definition(
family="AtlasCron",
containerDefinitions=[get_container_definition(args)])
def run_task(args):
conn = ecs.get_conn(profile=args.profile)
kwargs = {
"cluster": args.cluster,
"taskDefinition": args.task,
"count": 1,
}
if args.command or args.var or args.var_file:
overrides = {}
task = conn.describe_task_definition(taskDefinition=args.task)
overrides["name"] = (task["taskDefinition"]
["containerDefinitions"]
[0]
["name"])
if args.command:
overrides["command"] = map(str.strip, args.command.split())
env_options = get_env_options(args)
if env_options:
overrides["environment"] = env_options
kwargs["overrides"] = {"containerOverrides": [overrides]}
print conn.run_task(**kwargs)
def list_tasks(args):
conn = ecs.get_conn(profile=args.profile)
tasks = conn.list_task_definitions()
fmt.print_list([arn.split("/")[-1]
for arn in tasks["taskDefinitionArns"]])
def list_clusters(args):
conn = ecs.get_conn(profile=args.profile)
clusters = conn.list_clusters()
fmt.print_list([arn.split("/")[-1]
for arn in clusters["clusterArns"]])
def run(argv):
args = parameters.parse(
parser=get_argument_parser(),
argv=argv,
postprocessors=[parameters.add_default_region])
args.func(args)
| bsd-3-clause | -6,729,863,579,245,356,000 | 26.72561 | 74 | 0.607653 | false | 3.687753 | false | false | false |
rjungbeck/rasterizer | mupdf12.py | 1 | 2411 | from mupdfbase import MuPdfBase, Matrix, Rect, BBox
from ctypes import cdll,c_float, c_int, c_void_p, Structure, c_char_p,POINTER
FZ_STORE_UNLIMITED=0
class MuPdf(MuPdfBase):
def __init__(self):
self.dll=cdll.libmupdf
self.dll.fz_bound_page.argtypes=[c_void_p, c_void_p, POINTER(Rect)]
self.dll.fz_bound_page.restype=POINTER(Rect)
self.dll.fz_new_pixmap_with_bbox.argtypes=[c_void_p, c_void_p, POINTER(BBox)]
self.dll.fz_new_pixmap_with_bbox.restype=c_void_p
self.dll.fz_run_page.argtypes=[c_void_p, c_void_p, c_void_p, POINTER(Matrix), c_void_p]
self.dll.fz_run_page.restype=None
self.dll.fz_write_pam.argtypes=[c_void_p, c_void_p, c_char_p, c_int]
self.dll.fz_write_pam.restype=None
self.dll.fz_write_pbm.argtypes=[c_void_p, c_void_p, c_char_p]
self.dll.fz_write_pbm.restype=None
self.dll.fz_count_pages.argtypes=[c_void_p]
self.dll.fz_count_pages.restype=c_int
self.dll.fz_open_document_with_stream.argtypes=[c_void_p, c_char_p, c_void_p]
self.dll.fz_open_document_with_stream.restype=c_void_p
self.dll.fz_close_document.argtypes=[c_void_p]
self.dll.fz_close_document.restype=None
self.dll.fz_free_page.argtypes=[c_void_p, c_void_p]
self.dll.fz_free_page.restype=None
self.dll.fz_find_device_colorspace.argtypes=[c_void_p, c_char_p]
self.dll.fz_find_device_colorspace.restype=c_void_p
MuPdfBase.__init__(self)
def getSize(self):
rect=Rect()
self.dll.fz_bound_page(self.doc, self.page,rect)
return rect.x0, rect.y0, rect.x1, rect.y1
def getPageCount(self):
return self.dll.fz_count_pages(self.doc)
def loadPage(self, num):
self.page=self.dll.fz_load_page(self.doc, num-1)
def runPage(self, dev, transform):
self.dll.fz_run_page(self.doc, self.page, dev, transform, None)
def freePage(self):
self.dll.fz_free_page(self.doc, self.page)
self.page=None
def loadDocument(self, context, stream):
self.doc=self.dll.fz_open_document_with_stream(self.context, "application/pdf", self.stream)
def closeDocument(self):
if self.doc:
self.dll.fz_close_document(self.doc)
self.doc=None
def findColorspace(self, colorSpace):
return self.dll.fz_find_device_colorspace(self.context, colorSpace)
def setContext(self):
self.context=self.dll.fz_new_context(None, None, FZ_STORE_UNLIMITED)
| agpl-3.0 | -1,224,500,625,628,163,300 | 28.935897 | 94 | 0.68229 | false | 2.425553 | false | false | false |
MaisamArif/NEST | backend/tmp_markov_framework/markov_script.py | 1 | 1912 | import numpy as np
import random
def normalize(arr):
s = sum(arr)
if s == 0:
s = 1
arr[0] = 1
for i, val in enumerate(arr):
arr[i] = val/s
def generate(width, height):
matrix = []
for i in range(height):
matrix.append([])
for j in range(width):
matrix[i].append(float(random.randint(0, 1000))/1000)
normalize(matrix[i])
matrix[i] = [round(x, 3) for x in matrix[i]]
return np.matrix(matrix)
def initialize(soc0, soc1):
matricies = []
for i in range(4):
matricies.append(generate(4,4))
#format is as follows [P0, IM0, P1, IM1]
P0, IM0, P1, IM1 = matricies
vm1 = IM1[0:,0] * IM0[0,0:] + IM1[0:,1] * IM0[1,0:] + IM1[0:,2] * IM0[2,0:] +IM1[0:,3] * IM0[3,0:]
vm2 = IM0[0:,0] * IM1[0,0:] + IM0[0:,1] * IM1[1,0:] + IM0[0:,2] * IM1[2,0:] +IM0[0:,3] * IM1[3,0:]
c0_to_c1 = ((1-soc0) * P0)+ (soc0 * vm1)
c1_to_c0 = ((1-soc1) * P1)+ (soc1 * vm2)
matricies.append(c0_to_c1)
matricies.append(c1_to_c0)
if random.randint(0,1) == 1:
position_and_direction = ['right', 'left', 'left', 'right']
else:
position_and_direction = ['left', 'right', 'right', 'left']
return matricies#, position_and_direction
def traverse(matrix):
rand = float(random.randint(0,1000))/1000
count = 0
for i, elem in enumerate(matrix):
if rand > count and rand < count + elem:
return i
count += elem
return len(matrix) - 1
def continue_chain(emo1, emo2, matricies):
T1, T2 = matricies
if random.randint(0,1) == 1:
position_and_direction = ['right', 'left', 'left', 'right']
else:
position_and_direction = ['left', 'right', 'right', 'left']
return (traverse(T1.A[emo1]), traverse(T2.A[emo2]))#, position_and_direction
if __name__ == "__main__":
pass
def main():
pass
| gpl-3.0 | -5,248,547,207,103,633,000 | 23.202532 | 103 | 0.544979 | false | 2.708215 | false | false | false |
tbjoern/adventofcode | One/script.py | 1 | 1159 | file = open("input.txt", "r")
input = file.next()
sequence = input.split(", ")
class walker:
def __init__(self):
self.east = 0
self.south = 0
self.facing = 0
self.tiles = {}
def turnL(self):
if self.facing == 0:
self.facing = 3
else:
self.facing -= 1
def turnR(self):
if self.facing == 3:
self.facing = 0
else:
self.facing += 1
def walk(self,dist):
for i in range(0, dist):
if self.facing == 0:
self.south -= 1
elif self.facing == 1:
self.east += 1
elif self.facing == 2:
self.south += 1
else:
self.east -= 1
if self.kek():
return True
self.addTile(self.east,self.south)
return False
def totalDist(self):
return abs(self.east) + abs(self.south)
def addTile(self, x, y):
if x in self.tiles:
self.tiles[x].append(y)
else:
self.tiles[x] = [y]
def kek(self):
if self.east in self.tiles:
if self.south in self.tiles[self.east]:
return True
return False
w = walker()
for s in sequence:
if s[0] == "R":
w.turnR()
else:
w.turnL()
if w.walk(int(s[1:])):
break
print w.totalDist() | mit | 5,358,361,581,108,001,000 | 15.328358 | 42 | 0.559103 | false | 2.670507 | false | false | false |
CLVsol/odoo_addons | clv_seedling/batch_history/clv_seedling_batch_history.py | 1 | 2387 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
from openerp.osv import osv
from datetime import *
class clv_seedling_batch_history(osv.Model):
_name = 'clv_seedling.batch_history'
seedling_id = fields.Many2one('clv_seedling', 'Seedling', required=False)
batch_id = fields.Many2one('clv_batch', 'Batch', required=False)
incoming_date = fields.Datetime('Incoming Date', required=False,
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
outgoing_date = fields.Datetime('Outgoing Date', required=False)
notes = fields.Text(string='Notes')
_order = "incoming_date desc"
class clv_seedling(osv.Model):
_inherit = 'clv_seedling'
batch_history_ids = fields.One2many('clv_seedling.batch_history', 'seedling_id', 'Batch History')
class clv_batch(osv.Model):
_inherit = 'clv_batch'
seedling_batch_history_ids = fields.One2many('clv_seedling.batch_history', 'batch_id', 'Seedling Batch History')
| agpl-3.0 | 3,222,787,404,884,915,000 | 53.25 | 116 | 0.514872 | false | 4.564054 | false | false | false |
what-studio/profiling | test/test_tracing.py | 1 | 1802 | # -*- coding: utf-8 -*-
import sys
import pytest
from _utils import factorial, find_stats, foo
from profiling.stats import RecordingStatistics
from profiling.tracing import TracingProfiler
def test_setprofile():
profiler = TracingProfiler()
assert sys.getprofile() is None
with profiler:
assert sys.getprofile() == profiler._profile
assert sys.getprofile() is None
sys.setprofile(lambda *x: x)
with pytest.raises(RuntimeError):
profiler.start()
sys.setprofile(None)
def test_profile():
profiler = TracingProfiler()
frame = foo()
profiler._profile(frame, 'call', None)
profiler._profile(frame, 'return', None)
assert len(profiler.stats) == 1
stats1 = find_stats(profiler.stats, 'foo')
stats2 = find_stats(profiler.stats, 'bar')
stats3 = find_stats(profiler.stats, 'baz')
assert stats1.own_hits == 0
assert stats2.own_hits == 0
assert stats3.own_hits == 1
assert stats1.deep_hits == 1
assert stats2.deep_hits == 1
assert stats3.deep_hits == 1
def test_profiler():
profiler = TracingProfiler(base_frame=sys._getframe())
assert isinstance(profiler.stats, RecordingStatistics)
stats, cpu_time, wall_time = profiler.result()
assert len(stats) == 0
with profiler:
factorial(1000)
factorial(10000)
stats1 = find_stats(profiler.stats, 'factorial')
stats2 = find_stats(profiler.stats, '__enter__')
stats3 = find_stats(profiler.stats, '__exit__')
assert stats1.deep_time != 0
assert stats1.deep_time == stats1.own_time
assert stats1.own_time > stats2.own_time
assert stats1.own_time > stats3.own_time
assert stats1.own_hits == 2
assert stats2.own_hits == 0 # entering to __enter__() wasn't profiled.
assert stats3.own_hits == 1
| bsd-3-clause | 2,869,637,923,411,800,600 | 30.614035 | 75 | 0.671476 | false | 3.575397 | false | false | false |
AstroHuntsman/POCS | pocs/focuser/birger.py | 1 | 16549 | import io
import re
import serial
import time
import glob
from pocs.focuser.focuser import AbstractFocuser
# Birger adaptor serial numbers should be 5 digits
serial_number_pattern = re.compile('^\d{5}$')
# Error codes should be 'ERR' followed by 1-2 digits
error_pattern = re.compile('(?<=ERR)\d{1,2}')
error_messages = ('No error',
'Unrecognised command',
'Lens is in manual focus mode',
'No lens connected',
'Lens distance stop error',
'Aperture not initialised',
'Invalid baud rate specified',
'Reserved',
'Reserved',
'A bad parameter was supplied to the command',
'XModem timeout',
'XModem error',
'XModem unlock code incorrect',
'Not used',
'Invalid port',
'Licence unlock failure',
'Invalid licence file',
'Invalid library file',
'Reserved',
'Reserved',
'Not used',
'Library not ready for lens communications',
'Library not ready for commands',
'Command not licensed',
'Invalid focus range in memory. Try relearning the range',
'Distance stops not supported by the lens')
class Focuser(AbstractFocuser):
"""
Focuser class for control of a Canon DSLR lens via a Birger Engineering Canon EF-232 adapter
"""
# Class variable to cache the device node scanning results
_birger_nodes = None
# Class variable to store the device nodes already in use. Prevents scanning known Birgers &
# acts as a check against Birgers assigned to incorrect ports.
_assigned_nodes = []
def __init__(self,
name='Birger Focuser',
model='Canon EF-232',
initial_position=None,
dev_node_pattern='/dev/tty.USA49WG*.?',
*args, **kwargs):
super().__init__(name=name, model=model, *args, **kwargs)
self.logger.debug('Initialising Birger focuser')
if serial_number_pattern.match(self.port):
# Have been given a serial number
if self._birger_nodes is None:
# No cached device nodes scanning results, need to scan.
self._birger_nodes = {}
# Find nodes matching pattern
device_nodes = glob.glob(dev_node_pattern)
# Remove nodes already assigned to other Birger objects
device_nodes = [node for node in device_nodes if node not in self._assigned_nodes]
for device_node in device_nodes:
try:
serial_number = self.connect(device_node)
self._birger_nodes[serial_number] = device_node
except (serial.SerialException, serial.SerialTimeoutException, AssertionError):
# No birger on this node.
pass
finally:
self._serial_port.close()
# Search in cached device node scanning results for serial number
try:
device_node = self._birger_nodes[self.port]
except KeyError:
self.logger.critical("Could not find {} ({})!".format(self.name, self.port))
return
self.port = device_node
# Check that this node hasn't already been assigned to another Birgers
if self.port in self._assigned_nodes:
self.logger.critical("Device node {} already in use!".format(self.port))
return
self.connect(self.port)
self._assigned_nodes.append(self.port)
self._initialise
if initial_position:
self.position = initial_position
##################################################################################################
# Properties
##################################################################################################
@property
def is_connected(self):
"""
Checks status of serial port to determine if connected.
"""
connected = False
if self._serial_port:
connected = self._serial_port.isOpen()
return connected
@AbstractFocuser.position.getter
def position(self):
"""
Returns current focus position in the lens focus encoder units
"""
response = self._send_command('pf', response_length=1)
return int(response[0].rstrip())
@property
def min_position(self):
"""
Returns position of close limit of focus travel, in encoder units
"""
return self._min_position
@property
def max_position(self):
"""
Returns position of far limit of focus travel, in encoder units
"""
return self._max_position
@property
def lens_info(self):
"""
Return basic lens info (e.g. '400mm,f28' for a 400 mm f/2.8 lens)
"""
return self._lens_info
@property
def library_version(self):
"""
Returns the version string of the Birger adaptor library (firmware).
"""
return self._library_version
@property
def hardware_version(self):
"""
Returns the hardware version of the Birger adaptor
"""
return self._hardware_version
##################################################################################################
# Public Methods
##################################################################################################
def connect(self, port):
try:
# Configure serial port.
# Settings copied from Bob Abraham's birger.c
self._serial_port = serial.Serial()
self._serial_port.port = port
self._serial_port.baudrate = 115200
self._serial_port.bytesize = serial.EIGHTBITS
self._serial_port.parity = serial.PARITY_NONE
self._serial_port.stopbits = serial.STOPBITS_ONE
self._serial_port.timeout = 2.0
self._serial_port.xonxoff = False
self._serial_port.rtscts = False
self._serial_port.dsrdtr = False
self._serial_port.write_timeout = None
self._inter_byte_timeout = None
# Establish connection
self._serial_port.open()
except serial.SerialException as err:
self._serial_port = None
self.logger.critical('Could not open {}!'.format(port))
raise err
time.sleep(2)
# Want to use a io.TextWrapper in order to have a readline() method with universal newlines
# (Birger sends '\r', not '\n'). The line_buffering option causes an automatic flush() when
# a write contains a newline character.
self._serial_io = io.TextIOWrapper(io.BufferedRWPair(self._serial_port, self._serial_port),
newline='\r', encoding='ascii', line_buffering=True)
self.logger.debug('Established serial connection to {} on {}.'.format(self.name, port))
# Set 'verbose' and 'legacy' response modes. The response from this depends on
# what the current mode is... but after a power cycle it should be 'rm1,0', 'OK'
try:
self._send_command('rm1,0', response_length=0)
except AssertionError as err:
self.logger.critical('Error communicating with {} on {}!'.format(self.name, port))
raise err
# Return serial number
return send_command('sn', response_length=1)[0].rstrip()
def move_to(self, position):
"""
Move the focus to a specific position in lens encoder units.
Does not do any checking of the requested position but will warn if the lens reports hitting a stop.
Returns the actual position moved to in lens encoder units.
"""
response = self._send_command('fa{:d}'.format(int(position)), response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,N'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved to {} encoder units".format(r[:-2]))
if r[-1] == '1':
self.logger.warning('{} reported hitting a focus stop'.format(self))
return int(r[:-2])
def move_by(self, increment):
"""
Move the focus to a specific position in lens encoder units.
Does not do any checking of the requested increment but will warn if the lens reports hitting a stop.
Returns the actual distance moved in lens encoder units.
"""
response = self._send_command('mf{:d}'.format(increment), response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,N'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved by {} encoder units".format(r[:-2]))
if r[-1] == '1':
self.logger.warning('{} reported hitting a focus stop'.format(self))
return int(r[:-2])
##################################################################################################
# Private Methods
##################################################################################################
def _send_command(self, command, response_length=None, ignore_response=False):
"""
Sends a command to the Birger adaptor and retrieves the response.
Args:
command (string): command string to send (without newline), e.g. 'fa1000', 'pf'
response length (integer, optional, default=None): number of lines of response expected.
For most commands this should be 0 or 1. If None readlines() will be called to
capture all responses. As this will block until the timeout expires it should only
be used if the number of lines expected is not known (e.g. 'ds' command).
Returns:
list: possibly empty list containing the '\r' terminated lines of the response from the adaptor.
"""
if not self.is_connected:
self.logger.critical("Attempt to send command to {} when not connected!".format(self))
return
# Clear the input buffer in case there's anything left over in there.
self._serial_port.reset_input_buffer()
# Send command
self._serial_io.write(command + '\r')
if ignore_response:
return
# In verbose mode adaptor will first echo the command
echo = self._serial_io.readline().rstrip()
assert echo == command, self.logger.warning("echo != command: {} != {}".format(echo, command))
# Adaptor should then send 'OK', even if there was an error.
ok = self._serial_io.readline().rstrip()
assert ok == 'OK'
# Depending on which command was sent there may or may not be any further
# response.
response = []
if response_length == 0:
# Not expecting any further response. Should check the buffer anyway in case an error
# message has been sent.
if self._serial_port.in_waiting:
response.append(self._serial_io.readline())
elif response_length > 0:
# Expecting some number of lines of response. Attempt to read that many lines.
for i in range(response_length):
response.append(self._serial_io.readline())
else:
# Don't know what to expect. Call readlines() to get whatever is there.
response.append(self._serial_io.readlines())
# Check for an error message in response
if response:
# Not an empty list.
error_match = error_pattern.match(response[0])
if error_match:
# Got an error message! Translate it.
try:
error_message = error_messages[int(error_match.group())]
self.logger.error("{} returned error message '{}'!".format(self, error_message))
except Exception:
self.logger.error("Unknown error '{}' from {}!".format(error_match.group(), self))
return response
def _initialise(self):
# Get serial number. Note, this is the serial number of the Birger adaptor,
# *not* the attached lens (which would be more useful). Accessible as self.uid
self._get_serial_number()
# Get the version string of the adaptor software libray. Accessible as self.library_version
self._get_library_version()
# Get the hardware version of the adaptor. Accessible as self.hardware_version
self._get_hardware_version()
# Get basic lens info (e.g. '400mm,f28' for a 400 mm, f/2.8 lens). Accessible as self.lens_info
self._get_lens_info()
# Initialise the aperture motor. This also has the side effect of fully opening the iris.
self._initialise_aperture()
# Initalise focus. First move the focus to the close stop.
self._move_zero()
# Then reset the focus encoder counts to 0
self._zero_encoder()
self._min_position = 0
# Calibrate the focus with the 'Learn Absolute Focus Range' command
self._learn_focus_range()
# Finally move the focus to the far stop (close to where we'll want it) and record position
self._max_position = self._move_inf()
self.logger.info('\t\t\t {} initialised'.format(self))
def _get_serial_number(self):
response = self._send_command('sn', response_length=1)
self._serial_number = response[0].rstrip()
self.logger.debug("Got serial number {} for {} on {}".format(self.uid, self.name, self.port))
def _get_library_version(self):
response = self._send_command('lv', response_length=1)
self._library_version = response[0].rstrip()
self.logger.debug("Got library version '{}' for {} on {}".format(self.library_version, self.name, self.port))
def _get_hardware_version(self):
response = self._send_command('hv', response_length=1)
self._hardware_version = response[0].rstrip()
self.logger.debug("Got hardware version {} for {} on {}".format(self.hardware_version, self.name, self.port))
def _get_lens_info(self):
response = self._send_command('id', response_length=1)
self._lens_info = response[0].rstrip()
self.logger.debug("Got lens info '{}' for {} on {}".format(self.lens_info, self.name, self.port))
def _initialise_aperture(self):
self.logger.debug('Initialising aperture motor')
response = self._send_command('in', response_length=1)
if response[0].rstrip() != 'DONE':
self.logger.error("{} got response '{}', expected 'DONE'!".format(self, response[0].rstrip()))
def _move_zero(self):
response = self._send_command('mz', response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,1'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved {} encoder units to close stop".format(r[:-2]))
return int(r[:-2])
def _zero_encoder(self):
self.logger.debug('Setting focus encoder zero point')
self._send_command('sf0', response_length=0)
def _learn_focus_range(self):
self.logger.debug('Learning absolute focus range')
response = self._send_command('la', response_length=1)
if response[0].rstrip() != 'DONE:LA':
self.logger.error("{} got response '{}', expected 'DONE:LA'!".format(self, response[0].rstrip()))
def _move_inf(self):
response = self._send_command('mi', response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,1'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved {} encoder units to far stop".format(r[:-2]))
return int(r[:-2])
| mit | 46,177,568,672,669,770 | 40.580402 | 117 | 0.563236 | false | 4.38268 | false | false | false |
linuxmidhun/0install | zeroinstall/cmd/remove_feed.py | 1 | 1066 | """
The B{0install remove-feed} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
syntax = "[INTERFACE] FEED"
from zeroinstall import SafeException, _
from zeroinstall.injector import model, writer
from zeroinstall.cmd import add_feed, UsageError
add_options = add_feed.add_options
def handle(config, options, args):
"""@type args: [str]"""
if len(args) == 2:
iface = config.iface_cache.get_interface(model.canonical_iface_uri(args[0]))
try:
feed_url = model.canonical_iface_uri(args[1])
except SafeException:
feed_url = args[1] # File might not exist any longer
feed_import = add_feed.find_feed_import(iface, feed_url)
if not feed_import:
raise SafeException(_('Interface %(interface)s has no feed %(feed)s') %
{'interface': iface.uri, 'feed': feed_url})
iface.extra_feeds.remove(feed_import)
writer.save_interface(iface)
elif len(args) == 1:
add_feed.handle(config, options, args, add_ok = False, remove_ok = True)
else:
raise UsageError()
| lgpl-2.1 | 7,947,182,784,579,347,000 | 30.352941 | 78 | 0.707317 | false | 3.072046 | false | false | false |
catapult-project/catapult-csm | telemetry/telemetry/internal/browser/browser_unittest.py | 1 | 11913 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import shutil
import tempfile
import unittest
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.internal.browser import browser as browser_module
from telemetry.internal.browser import browser_finder
from telemetry.internal.platform import gpu_device
from telemetry.internal.platform import gpu_info
from telemetry.internal.platform import system_info
from telemetry.internal.util import path
from telemetry.testing import browser_test_case
from telemetry.testing import options_for_unittests
from telemetry.timeline import tracing_config
from devil.android import app_ui
import mock
import py_utils
class IntentionalException(Exception):
pass
class BrowserTest(browser_test_case.BrowserTestCase):
def testBrowserCreation(self):
self.assertEquals(1, len(self._browser.tabs))
# Different browsers boot up to different things.
assert self._browser.tabs[0].url
@decorators.Enabled('has tabs')
def testNewCloseTab(self):
existing_tab = self._browser.tabs[0]
self.assertEquals(1, len(self._browser.tabs))
existing_tab_url = existing_tab.url
new_tab = self._browser.tabs.New()
self.assertEquals(2, len(self._browser.tabs))
self.assertEquals(existing_tab.url, existing_tab_url)
self.assertEquals(new_tab.url, 'about:blank')
new_tab.Close()
self.assertEquals(1, len(self._browser.tabs))
self.assertEquals(existing_tab.url, existing_tab_url)
def testMultipleTabCalls(self):
self._browser.tabs[0].Navigate(self.UrlOfUnittestFile('blank.html'))
self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
def testTabCallByReference(self):
tab = self._browser.tabs[0]
tab.Navigate(self.UrlOfUnittestFile('blank.html'))
self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
@decorators.Enabled('has tabs')
def testCloseReferencedTab(self):
self._browser.tabs.New()
tab = self._browser.tabs[0]
tab.Navigate(self.UrlOfUnittestFile('blank.html'))
tab.Close()
self.assertEquals(1, len(self._browser.tabs))
@decorators.Enabled('has tabs')
def testForegroundTab(self):
# Should be only one tab at this stage, so that must be the foreground tab
original_tab = self._browser.tabs[0]
self.assertEqual(self._browser.foreground_tab, original_tab)
new_tab = self._browser.tabs.New()
# New tab shouls be foreground tab
self.assertEqual(self._browser.foreground_tab, new_tab)
# Make sure that activating the background tab makes it the foreground tab
original_tab.Activate()
self.assertEqual(self._browser.foreground_tab, original_tab)
# Closing the current foreground tab should switch the foreground tab to the
# other tab
original_tab.Close()
self.assertEqual(self._browser.foreground_tab, new_tab)
# This test uses the reference browser and doesn't have access to
# helper binaries like crashpad_database_util.
@decorators.Enabled('linux')
def testGetMinidumpPathOnCrash(self):
tab = self._browser.tabs[0]
with self.assertRaises(exceptions.AppCrashException):
tab.Navigate('chrome://crash', timeout=5)
crash_minidump_path = self._browser.GetMostRecentMinidumpPath()
self.assertIsNotNone(crash_minidump_path)
def testGetSystemInfo(self):
if not self._browser.supports_system_info:
logging.warning(
'Browser does not support getting system info, skipping test.')
return
info = self._browser.GetSystemInfo()
self.assertTrue(isinstance(info, system_info.SystemInfo))
self.assertTrue(hasattr(info, 'model_name'))
self.assertTrue(hasattr(info, 'gpu'))
self.assertTrue(isinstance(info.gpu, gpu_info.GPUInfo))
self.assertTrue(hasattr(info.gpu, 'devices'))
self.assertTrue(len(info.gpu.devices) > 0)
for g in info.gpu.devices:
self.assertTrue(isinstance(g, gpu_device.GPUDevice))
def testGetSystemInfoNotCachedObject(self):
if not self._browser.supports_system_info:
logging.warning(
'Browser does not support getting system info, skipping test.')
return
info_a = self._browser.GetSystemInfo()
info_b = self._browser.GetSystemInfo()
self.assertFalse(info_a is info_b)
def testGetSystemTotalMemory(self):
self.assertTrue(self._browser.memory_stats['SystemTotalPhysicalMemory'] > 0)
def testSystemInfoModelNameOnMac(self):
if self._browser.platform.GetOSName() != 'mac':
self.skipTest('This test is only run on macOS')
return
if not self._browser.supports_system_info:
logging.warning(
'Browser does not support getting system info, skipping test.')
return
info = self._browser.GetSystemInfo()
model_name_re = r"[a-zA-Z]* [0-9.]*"
self.assertNotEqual(re.match(model_name_re, info.model_name), None)
# crbug.com/628836 (CrOS, where system-guest indicates ChromeOS guest)
# github.com/catapult-project/catapult/issues/3130 (Windows)
@decorators.Disabled('cros-chrome-guest', 'system-guest', 'chromeos', 'win')
def testIsTracingRunning(self):
tracing_controller = self._browser.platform.tracing_controller
if not tracing_controller.IsChromeTracingSupported():
return
self.assertFalse(tracing_controller.is_tracing_running)
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
tracing_controller.StartTracing(config)
self.assertTrue(tracing_controller.is_tracing_running)
tracing_controller.StopTracing()
self.assertFalse(tracing_controller.is_tracing_running)
@decorators.Enabled('android')
def testGetAppUi(self):
self.assertTrue(self._browser.supports_app_ui_interactions)
ui = self._browser.GetAppUi()
self.assertTrue(isinstance(ui, app_ui.AppUi))
self.assertIsNotNone(ui.WaitForUiNode(resource_id='action_bar_root'))
class CommandLineBrowserTest(browser_test_case.BrowserTestCase):
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs('--user-agent=telemetry')
def testCommandLineOverriding(self):
# This test starts the browser with --user-agent=telemetry. This tests
# whether the user agent is then set.
t = self._browser.tabs[0]
t.Navigate(self.UrlOfUnittestFile('blank.html'))
t.WaitForDocumentReadyStateToBeInteractiveOrBetter()
self.assertEquals(t.EvaluateJavaScript('navigator.userAgent'),
'telemetry')
class DirtyProfileBrowserTest(browser_test_case.BrowserTestCase):
@classmethod
def CustomizeBrowserOptions(cls, options):
options.profile_type = 'small_profile'
@decorators.Disabled('chromeos') # crbug.com/243912
def testDirtyProfileCreation(self):
self.assertEquals(1, len(self._browser.tabs))
class BrowserLoggingTest(browser_test_case.BrowserTestCase):
@classmethod
def CustomizeBrowserOptions(cls, options):
options.logging_verbosity = options.VERBOSE_LOGGING
@decorators.Disabled('chromeos', 'android')
def testLogFileExist(self):
self.assertTrue(
os.path.isfile(self._browser._browser_backend.log_file_path))
def _GenerateBrowserProfile(number_of_tabs):
""" Generate a browser profile which browser had |number_of_tabs| number of
tabs opened before it was closed.
Returns:
profile_dir: the directory of profile.
"""
profile_dir = tempfile.mkdtemp()
options = options_for_unittests.GetCopy()
options.browser_options.output_profile_path = profile_dir
browser_to_create = browser_finder.FindBrowser(options)
browser_to_create.platform.network_controller.InitializeIfNeeded()
try:
with browser_to_create.Create(options) as browser:
browser.platform.SetHTTPServerDirectories(path.GetUnittestDataDir())
blank_file_path = os.path.join(path.GetUnittestDataDir(), 'blank.html')
blank_url = browser.platform.http_server.UrlOf(blank_file_path)
browser.foreground_tab.Navigate(blank_url)
browser.foreground_tab.WaitForDocumentReadyStateToBeComplete()
for _ in xrange(number_of_tabs - 1):
tab = browser.tabs.New()
tab.Navigate(blank_url)
tab.WaitForDocumentReadyStateToBeComplete()
return profile_dir
finally:
browser_to_create.platform.network_controller.Close()
class BrowserCreationTest(unittest.TestCase):
def setUp(self):
self.mock_browser_backend = mock.MagicMock()
self.mock_platform_backend = mock.MagicMock()
def testCleanedUpCalledWhenExceptionRaisedInBrowserCreation(self):
self.mock_platform_backend.platform.FlushDnsCache.side_effect = (
IntentionalException('Boom!'))
with self.assertRaises(IntentionalException):
browser_module.Browser(
self.mock_browser_backend, self.mock_platform_backend,
credentials_path=None)
self.assertTrue(self.mock_platform_backend.WillCloseBrowser.called)
def testOriginalExceptionNotSwallow(self):
self.mock_platform_backend.platform.FlushDnsCache.side_effect = (
IntentionalException('Boom!'))
self.mock_platform_backend.WillCloseBrowser.side_effect = (
IntentionalException('Cannot close browser!'))
with self.assertRaises(IntentionalException) as context:
browser_module.Browser(
self.mock_browser_backend, self.mock_platform_backend,
credentials_path=None)
self.assertIn('Boom!', context.exception.message)
class BrowserRestoreSessionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._number_of_tabs = 4
cls._profile_dir = _GenerateBrowserProfile(cls._number_of_tabs)
cls._options = options_for_unittests.GetCopy()
cls._options.browser_options.AppendExtraBrowserArgs(
['--restore-last-session'])
cls._options.browser_options.profile_dir = cls._profile_dir
cls._browser_to_create = browser_finder.FindBrowser(cls._options)
cls._browser_to_create.platform.network_controller.InitializeIfNeeded()
@decorators.Enabled('has tabs')
@decorators.Disabled('chromeos', 'win', 'mac')
# TODO(nednguyen): Enable this test on windowsn platform
def testRestoreBrowserWithMultipleTabs(self):
with self._browser_to_create.Create(self._options) as browser:
# The number of tabs will be self._number_of_tabs + 1 as it includes the
# old tabs and a new blank tab.
expected_number_of_tabs = self._number_of_tabs + 1
try:
py_utils.WaitFor(
lambda: len(browser.tabs) == expected_number_of_tabs, 10)
except:
logging.error('Number of tabs is %s' % len(browser.tabs))
raise
self.assertEquals(expected_number_of_tabs, len(browser.tabs))
@classmethod
def tearDownClass(cls):
cls._browser_to_create.platform.network_controller.Close()
shutil.rmtree(cls._profile_dir)
class TestBrowserOperationDoNotLeakTempFiles(unittest.TestCase):
@decorators.Enabled('win', 'linux')
# TODO(ashleymarie): Re-enable on mac
# BUG=catapult:#3523
@decorators.Isolated
def testBrowserNotLeakingTempFiles(self):
options = options_for_unittests.GetCopy()
browser_to_create = browser_finder.FindBrowser(options)
self.assertIsNotNone(browser_to_create)
before_browser_run_temp_dir_content = os.listdir(tempfile.tempdir)
browser_to_create.platform.network_controller.InitializeIfNeeded()
try:
with browser_to_create.Create(options) as browser:
tab = browser.tabs.New()
tab.Navigate('about:blank')
self.assertEquals(2, tab.EvaluateJavaScript('1 + 1'))
after_browser_run_temp_dir_content = os.listdir(tempfile.tempdir)
self.assertEqual(before_browser_run_temp_dir_content,
after_browser_run_temp_dir_content)
finally:
browser_to_create.platform.network_controller.Close()
| bsd-3-clause | -8,673,220,361,794,257,000 | 37.553398 | 80 | 0.732645 | false | 3.784307 | true | false | false |
helixyte/TheLMA | thelma/repositories/rdb/mappers/experimentjob.py | 1 | 1080 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Experiment job mapper.
"""
from sqlalchemy.orm import relationship
from everest.repositories.rdb.utils import mapper
from thelma.entities.experiment import Experiment
from thelma.entities.job import ExperimentJob
from thelma.entities.job import JOB_TYPES
__docformat__ = 'reStructuredText en'
__all__ = ['create_mapper']
def create_mapper(job_mapper, job_tbl, experiment_tbl):
"Mapper factory."
m = mapper(ExperimentJob, job_tbl,
inherits=job_mapper,
properties=dict(
experiments=relationship(Experiment,
order_by=experiment_tbl.c.experiment_id,
back_populates='job',
cascade='save-update, merge, delete'
)
),
polymorphic_identity=JOB_TYPES.EXPERIMENT
)
return m
| mit | -7,348,472,988,197,110,000 | 32.75 | 80 | 0.607407 | false | 4.576271 | false | false | false |
SeanEstey/Bravo | app/notify/tasks.py | 1 | 7873 | '''app.notify.tasks'''
import json, os, pytz
from os import environ as env
from datetime import datetime, date, time, timedelta
from dateutil.parser import parse
from bson import ObjectId as oid
from flask import g, render_template
from app import get_keys, celery #, smart_emit
from app.lib.dt import to_local
from app.lib import mailgun
from app.main import schedule
from app.main.parser import is_bus
from app.main.etapestry import call, EtapError
from . import email, events, sms, voice, pickups, triggers
from logging import getLogger
log = getLogger(__name__)
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def monitor_triggers(self, **kwargs):
ready = g.db.triggers.find({
'status':'pending',
'fire_dt':{
'$lt':datetime.utcnow()}})
for trigger in ready:
evnt = g.db.events.find_one({'_id':trigger['evnt_id']})
g.group = evnt['agency']
log.debug('Firing event trigger for %s', evnt['name'], extra={'trigger_id':str(trigger['_id'])})
try:
fire_trigger(trigger['_id'])
except Exception as e:
log.exception('Error firing event trigger for %s', evnt['name'])
pending = g.db.triggers.find({
'status':'pending',
'fire_dt': {
'$gt':datetime.utcnow()}}).sort('fire_dt', 1)
output = []
if pending.count() > 0:
tgr = pending.next()
delta = tgr['fire_dt'] - datetime.utcnow().replace(tzinfo=pytz.utc)
to_str = str(delta)[:-7]
return 'next trigger pending in %s' % to_str
else:
return '0 pending'
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def fire_trigger(self, _id=None, **rest):
'''Sends out all dependent sms/voice/email notifics messages
'''
status = ''
n_errors = 0
trig = g.db.triggers.find_one({'_id':oid(_id)})
event = g.db.events.find_one({'_id':trig['evnt_id']})
g.group = event['agency']
g.db.triggers.update_one(
{'_id':oid(_id)},
{'$set': {'task_id':self.request.id, 'status':'in-progress'}})
events.update_status(trig['evnt_id'])
ready = g.db.notifics.find(
{'trig_id':oid(_id), 'tracking.status':'pending'})
count = ready.count()
log.info('Sending notifications for event %s...', event['name'],
extra={'type':trig['type'], 'n_total':count})
#smart_emit('trigger_status',{
# 'trig_id': str(_id), 'status': 'in-progress'})
if env['BRV_SANDBOX'] == 'True':
log.info('sandbox: simulating voice/sms, rerouting emails')
for n in ready:
try:
if n['type'] == 'voice':
status = voice.call(n, get_keys('twilio'))
elif n['type'] == 'sms':
status = sms.send(n, get_keys('twilio'))
elif n['type'] == 'email':
status = email.send(n, get_keys('mailgun'))
except Exception as e:
n_errors +=1
status = 'error'
log.exception('Error sending notification to %s', n['to'],
extra={'type':n['type']})
else:
if status == 'failed':
n_errors += 1
finally:
pass
#smart_emit('notific_status', {
# 'notific_id':str(n['_id']), 'status':status})
g.db.triggers.update_one({'_id':oid(_id)}, {'$set': {'status': 'fired'}})
'''smart_emit('trigger_status', {
'trig_id': str(_id),
'status': 'fired',
'sent': count - n_errors,
'errors': n_errors})'''
log.info('%s/%s notifications sent for event %s', count - n_errors, count, event['name'],
extra={'type':trig['type'], 'n_total':count, 'n_errors':n_errors})
return 'success'
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def schedule_reminders(self, group=None, for_date=None, **rest):
if for_date:
for_date = parse(for_date).date()
groups = [g.db['groups'].find_one({'name':group})] if group else g.db['groups'].find()
evnt_ids = []
for group_ in groups:
n_success = n_fails = 0
g.group = group_['name']
log.info('Scheduling notification events...')
days_ahead = int(group_['notify']['sched_delta_days'])
on_date = date.today() + timedelta(days=days_ahead) if not for_date else for_date
date_str = on_date.strftime('%m-%d-%Y')
blocks = []
for key in group_['cal_ids']:
blocks += schedule.get_blocks(
group_['cal_ids'][key],
datetime.combine(on_date,time(8,0)),
datetime.combine(on_date,time(9,0)),
get_keys('google')['oauth'])
if len(blocks) == 0:
log.debug('no blocks on %s', date_str)
continue
else:
log.debug('%s events on %s: %s',
len(blocks), date_str, ", ".join(blocks))
for block in blocks:
if is_bus(block) and group_['notify']['sched_business'] == False:
continue
try:
evnt_id = pickups.create_reminder(g.group, block, on_date)
except EtapError as e:
n_fails +=1
log.exception('Error creating notification event %s', block)
continue
else:
n_success +=1
evnt_ids.append(str(evnt_id))
log.info('Created notification event %s', block)
log.info('Created %s/%s scheduled notification events',
n_success, n_success + n_fails)
return json.dumps(evnt_ids)
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def skip_pickup(self, evnt_id=None, acct_id=None, **rest):
'''User has opted out of a pickup via sms/voice/email noification.
Run is_valid() before calling this function.
@acct_id: _id from db.accounts, not eTap account id
'''
# Cancel any pending parent notifications
result = g.db.notifics.update_many(
{'acct_id':oid(acct_id), 'evnt_id':oid(evnt_id), 'tracking.status':'pending'},
{'$set':{'tracking.status':'cancelled'}})
acct = g.db.accounts.find_one_and_update(
{'_id':oid(acct_id)},
{'$set': {'opted_out': True}})
evnt = g.db.events.find_one({'_id':oid(evnt_id)})
if not evnt or not acct:
msg = 'evnt/acct not found (evnt_id=%s, acct_id=%s' %(evnt_id,acct_id)
log.error(msg)
raise Exception(msg)
g.group = evnt['agency']
log.info('%s opted out of pickup',
acct.get('name') or acct.get('email'),
extra={'event_name':evnt['name'], 'account_id':acct['udf']['etap_id']})
try:
call('skip_pickup', data={
'acct_id': acct['udf']['etap_id'],
'date': acct['udf']['pickup_dt'].strftime('%d/%m/%Y'),
'next_pickup': to_local(
acct['udf']['future_pickup_dt'],
to_str='%d/%m/%Y')})
except Exception as e:
log.exception('Error calling skip_pickup')
log.exception("Error updating account %s",
acct.get('name') or acct.get('email'),
extra={'account_id': acct['udf']['etap_id']})
if not acct.get('email'):
return 'success'
try:
body = render_template(
'email/%s/no_pickup.html' % g.group,
to=acct['email'],
account=to_local(obj=acct, to_str='%B %d %Y'))
except Exception as e:
log.exception('Error rendering no_pickup template')
raise
else:
mailgun.send(
acct['email'],
'Thanks for Opting Out',
body,
get_keys('mailgun'),
v={'type':'opt_out', 'group':g.group})
return 'success'
| gpl-2.0 | -6,240,724,140,937,675,000 | 32.7897 | 104 | 0.526737 | false | 3.61977 | false | false | false |
tuxofil/Gps2Udp | misc/server/gps2udp.py | 1 | 5891 | #!/usr/bin/env python
"""
Receive Geo location data from the Gps2Udp Android application
via UDP/IP and forward them to the stdout line by line.
There is some requirements to a valid incoming packet:
- it must be of form: TIMESTAMP LATITUDE LONGITUDE ACCURACY [other fields];
- TIMESTAMP is a Unix timestamp (seconds since 1 Jan 1970);
- the diff between TIMESTAMP and local time must be less
than MAX_TIME_DIFF (definition of the MAX_TIME_DIFF variable see below);
- TIMESTAMP must be greater than timestamp of a previous valid packet;
- LATITUDE is a float between [-90.0..90.0];
- LONGITUDE is a float between [-180.0..180.0];
- ACCURACY is an integer between [0..MAX_ACCURACY] (definition of
MAX_ACCURACY variable see below).
If any of the requirements are not met, the packet will be silently ignored.
When started with --signed command line option, an extra field must
be defined in each incoming UDP packet - DIGEST. With the field common
packet format must be of form:
TIMESTAMP LATITUDE LONGITUDE ACCURACY DIGEST
DIGEST - is a SHA1 from "TIMESTAMP LATITUDE LONGITUDE ACCURACY" + secret
string known only by Gps2Udp client (Android app) and the server. The
server reads the secret from GPS2UDP_SECRET environment variable.
Important notes. When in --signed mode:
- any packet without the digest will be ignored;
- any packet with digest not matched with digest calculated on the
server side, will be ignored;
- if the secret is not defined (GPS2UDP_SECRET environment variable is not
set or empty), no packets will be matched as valid.
"""
import getopt
import hashlib
import os
import os.path
import socket
import sys
import time
DEFAULT_PORT = 5000
# Maximum time difference between a timestamp in a packet and
# the local Unix timestamp (in seconds).
MAX_TIME_DIFF = 60 * 5
# Maximum valid accuracy value (in meters).
MAX_ACCURACY = 10000 # 10km
# Here will be stored the timestamp of the last valid packet received.
# The timestamp will be used later to avoid receiving data from the past.
LAST_TIMESTAMP = None
def usage(exitcode = 1):
"""
Show usage info and exit.
"""
argv0 = os.path.basename(sys.argv[0])
print 'Usage: {0} [options]'.format(argv0)
print ' Options:'
print ' --signed check every UDP packet for digital signature;'
print ' --port=N UDP port number to listen. Default is 5000.'
sys.exit(exitcode)
def main():
"""
Entry point.
"""
try:
cmd_opts, _cmd_args = getopt.getopt(
sys.argv[1:], '', ['port=', 'signed'])
except getopt.GetoptError as exc:
sys.stderr.write('Error: ' + str(exc) + '\n')
usage()
cmd_opts = dict(cmd_opts)
port = int(cmd_opts.get('--port', str(DEFAULT_PORT)))
signed = '--signed' in cmd_opts
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', port))
while True:
data, _addr = sock.recvfrom(100)
try:
result = parse_packet(data, signed)
except PacketParseError:
continue
sys.stdout.write(format_packet(result))
sys.stdout.flush()
class PacketParseError(Exception):
"""Bad packet received."""
pass
def parse_packet(data, signed = False):
"""
Parse and check incoming packet.
The packet must be of form:
TIMESTAMP LATITUDE LONGITUDE ACCURACY
:param data: packet body
:type data: string
:param signed: if True, the packet will be checked for a
valid digital signature
:type signed: boolean
:rtype: dict
"""
global LAST_TIMESTAMP
result = {}
tokens = [elem for elem in data.strip().split(' ') if elem]
if signed:
# check the signature
if len(tokens) < 5:
raise PacketParseError
payload = ' '.join(tokens[:4])
digest = tokens[4]
secret = os.environ.get('GPS2UDP_SECRET')
if secret is None or len(secret) == 0:
# secret is not defined => unable to check
raise PacketParseError
hasher = hashlib.sha1()
hasher.update(payload + secret)
if hasher.hexdigest() != digest:
# digital signature mismatch
raise PacketParseError
else:
# check tokens count
if len(tokens) < 4:
raise PacketParseError
# parse the tokens
try:
result['timestamp'] = int(tokens[0])
result['latitude'] = float(tokens[1])
result['longitude'] = float(tokens[2])
result['accuracy'] = int(tokens[3])
except ValueError:
raise PacketParseError
# check timestamp
time_diff = abs(result['timestamp'] - int(time.time()))
if time_diff > MAX_TIME_DIFF:
# the timestamp differs from NOW for more than 5 minutes
raise PacketParseError
if LAST_TIMESTAMP is not None:
if result['timestamp'] <= LAST_TIMESTAMP:
# the timestamp is not greater than the previous timestamp
raise PacketParseError
# check lat&long values
if not (-90.0 <= result['latitude'] <= 90.0):
raise PacketParseError
if not (-180.0 <= result['longitude'] <= 180.0):
raise PacketParseError
# check accuracy value
if result['accuracy'] < 0 or result['accuracy'] > MAX_ACCURACY:
raise PacketParseError
# All checks is passed => packet is valid.
# Save the timestamp in global var:
LAST_TIMESTAMP = result['timestamp']
return result
def format_packet(data):
"""
Format received packet for the stdout.
:param data: packet data
:type data: dict
:rtype: string
"""
return (str(data['timestamp']) + ' ' +
format(data['latitude'], '.7f') + ' ' +
format(data['longitude'], '.7f') + ' ' +
str(data['accuracy']) + '\n')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
| bsd-2-clause | -6,502,868,409,911,242,000 | 31.016304 | 76 | 0.647938 | false | 3.983097 | false | false | false |
Puppet-Finland/trac | files/spam-filter/tracspamfilter/captcha/keycaptcha.py | 1 | 4322 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Dirk Stöcker <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
import hashlib
import random
import urllib2
from trac.config import Option
from trac.core import Component, implements
from trac.util.html import tag
from tracspamfilter.api import user_agent
from tracspamfilter.captcha import ICaptchaMethod
class KeycaptchaCaptcha(Component):
"""KeyCaptcha implementation"""
implements(ICaptchaMethod)
private_key = Option('spam-filter', 'captcha_keycaptcha_private_key', '',
"""Private key for KeyCaptcha usage.""", doc_domain="tracspamfilter")
user_id = Option('spam-filter', 'captcha_keycaptcha_user_id', '',
"""User id for KeyCaptcha usage.""", doc_domain="tracspamfilter")
def generate_captcha(self, req):
session_id = "%d-3.4.0.001" % random.randint(1, 10000000)
sign1 = hashlib.md5(session_id + req.remote_addr +
self.private_key).hexdigest()
sign2 = hashlib.md5(session_id + self.private_key).hexdigest()
varblock = "var s_s_c_user_id = '%s';\n" % self.user_id
varblock += "var s_s_c_session_id = '%s';\n" % session_id
varblock += "var s_s_c_captcha_field_id = 'keycaptcha_response_field';\n"
varblock += "var s_s_c_submit_button_id = 'keycaptcha_response_button';\n"
varblock += "var s_s_c_web_server_sign = '%s';\n" % sign1
varblock += "var s_s_c_web_server_sign2 = '%s';\n" % sign2
varblock += "document.s_s_c_debugmode=1;\n"
fragment = tag(tag.script(varblock, type='text/javascript'))
fragment.append(
tag.script(type='text/javascript',
src='http://backs.keycaptcha.com/swfs/cap.js')
)
fragment.append(
tag.input(type='hidden', id='keycaptcha_response_field',
name='keycaptcha_response_field')
)
fragment.append(
tag.input(type='submit', id='keycaptcha_response_button',
name='keycaptcha_response_button')
)
req.session['captcha_key_session'] = session_id
return None, fragment
def verify_key(self, private_key, user_id):
if private_key is None or user_id is None:
return False
# FIXME - Not yet implemented
return True
def verify_captcha(self, req):
session = None
if 'captcha_key_session' in req.session:
session = req.session['captcha_key_session']
del req.session['captcha_key_session']
response_field = req.args.get('keycaptcha_response_field')
val = response_field.split('|')
s = hashlib.md5('accept' + val[1] + self.private_key +
val[2]).hexdigest()
self.log.debug("KeyCaptcha response: %s .. %s .. %s",
response_field, s, session)
if s == val[0] and session == val[3]:
try:
request = urllib2.Request(
url=val[2],
headers={"User-agent": user_agent}
)
response = urllib2.urlopen(request)
return_values = response.read()
response.close()
except Exception, e:
self.log.warning("Exception in KeyCaptcha handling (%s)", e)
else:
self.log.debug("KeyCaptcha check result: %s", return_values)
if return_values == '1':
return True
self.log.warning("KeyCaptcha returned invalid check result: "
"%s (%s)", return_values, response_field)
else:
self.log.warning("KeyCaptcha returned invalid data: "
"%s (%s,%s)", response_field, s, session)
return False
def is_usable(self, req):
return self.private_key and self.user_id
| bsd-2-clause | -6,845,159,761,740,019,000 | 37.580357 | 82 | 0.588753 | false | 3.823894 | false | false | false |
thiagopena/PySIGNFe | pysignfe/nfse/bhiss/v10/SubstituicaoNfse.py | 1 | 1459 | # -*- coding: utf-8 -*-
from pysignfe.xml_sped import *
class InfSubstituicaoNfse(XMLNFe):
def __init__(self):
super(InfSubstituicaoNfse, self).__init__()
self.Id = TagCaracter(nome=u'InfSubstituicaoNfse', propriedade=u'Id', raiz=u'/')
self.NfseSubstituidora = TagInteiro(nome=u'NfseSubstituidora', tamanho=[1,15], raiz=u'/')
def get_xml(self):
self.Id.valor = u'substituicao:'+str(self.NfseSubstituidora.valor)
xml = XMLNFe.get_xml(self)
xml += self.Id.xml
xml += self.NfseSubstituidora.xml
xml += u'</InfSubstituicaoNfse>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.NfseSubstituidora.xml = arquivo
xml = property(get_xml, set_xml)
class SubstituicaoNfse(XMLNFe):
def __init__(self):
super(SubstituicaoNfse, self).__init__()
self.InfSubstituicaoNfse = InfSubstituicaoNfse()
self.Signature = Signature()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<SubstituicaoNfse>'
xml += self.InfSubstituicaoNfse.xml
xml += self.Signature.xml
xml += u'</SubstituicaoNfse>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.InfSubstituicaoNfse.xml = arquivo
self.Signature.xml = self._le_noh('//Rps/sig:Signature')
xml = property(get_xml, set_xml) | lgpl-2.1 | 7,768,406,906,340,372,000 | 31.444444 | 97 | 0.601782 | false | 2.844055 | false | false | false |
panyam/libgraph | libgraph/graphs.py | 1 | 2606 |
class Edge(object):
def __init__(self, source, target, data = None):
self._source, self._target, self.data = source, target, data
def __repr__(self):
return "Edge<%s <-> %s>" % (repr(self.source), repr(self.target))
@property
def source(self): return self._source
@property
def target(self): return self._target
class Graph(object):
def __init__(self, multi = False, directed = False, key_func = None, neighbors_func = None):
self.nodes = {}
self._is_directed = directed
self._is_multi = multi
self.neighbors_func = neighbors_func
self.key_func = key_func or (lambda x: x)
@property
def is_directed(self): return self._is_directed
@property
def is_multi(self): return self._is_multi
def get_edge(self, source, target):
return self.nodes.get(self.key_func(source), {}).get(self.key_func(target), None)
def add_nodes(self, *nodes):
return [self.add_node(node) for node in nodes]
def add_node(self, node):
"""
Adds or update a node (any hashable) in the graph.
"""
if node not in self.nodes: self.nodes[self.key_func(node)] = {}
return self.nodes[self.key_func(node)]
def neighbors(self, node):
"""Return the neighbors of a node."""
if self.neighbors_func:
return self.neighbors_func(node)
else:
return self.nodes.get(self.key_func(node), {})
def iter_neighbors(self, node, reverse = False):
"""
Return an iterator of neighbors (along with any edge data) for a particular node.
Override this method for custom node storage and inspection strategies.
"""
neighbors = self.neighbors(node)
if type(neighbors) is dict:
if reverse: return reversed(self.neighbors(node).items())
else: return self.neighbors(node).iteritems()
else:
if reverse: return reversed(neighbors)
else: return neighbors
def add_raw_edge(self, edge):
self.add_nodes(edge.source,edge.target)
source,target = edge.source,edge.target
source_key = self.key_func(source)
target_key = self.key_func(target)
self.nodes[source_key][target_key] = edge
if not self.is_directed and source_key != target_key:
self.nodes[target_key][source_key] = edge
return edge
def add_edge(self, source, target):
return self.add_raw_edge(Edge(source, target))
def add_edges(self, *edges):
return [self.add_edge(*e) for e in edges]
| apache-2.0 | 1,825,467,902,488,189,400 | 33.746667 | 96 | 0.608212 | false | 3.809942 | false | false | false |
akarol/cfme_tests | cfme/tests/cloud_infra_common/test_html5_vm_console.py | 1 | 8156 | # -*- coding: utf-8 -*-
"""Test for HTML5 Remote Consoles of VMware/RHEV/RHOSP Providers."""
import pytest
import imghdr
import time
import re
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.common.provider import CloudInfraProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.common.vm import VM
from cfme.utils import ssh
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.conf import credentials
from cfme.utils.providers import ProviderFilter
from wait_for import wait_for
from markers.env_markers.provider import providers
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.provider(gen_func=providers,
filters=[ProviderFilter(classes=[CloudInfraProvider],
required_flags=['html5_console'])],
scope='module'),
]
@pytest.fixture(scope="function")
def vm_obj(request, provider, setup_provider, console_template, vm_name):
"""
Create a VM on the provider with the given template, and return the vm_obj.
Also, it will remove VM from provider using nested function _delete_vm
after the test is completed.
"""
vm_obj = VM.factory(vm_name, provider, template_name=console_template.name)
@request.addfinalizer
def _delete_vm():
try:
vm_obj.delete_from_provider()
except Exception:
logger.warning("Failed to delete vm `{}`.".format(vm_obj.name))
vm_obj.create_on_provider(timeout=2400, find_in_cfme=True, allow_skip="default")
if provider.one_of(OpenStackProvider):
# Assign FloatingIP to Openstack Instance from pool
# so that we can SSH to it
public_net = provider.data['public_network']
provider.mgmt.assign_floating_ip(vm_obj.name, public_net)
return vm_obj
@pytest.mark.rhv1
def test_html5_vm_console(appliance, provider, configure_websocket, vm_obj,
configure_console_vnc, take_screenshot):
"""
Test the HTML5 console support for a particular provider.
The supported providers are:
VMware
Openstack
RHV
For a given provider, and a given VM, the console will be opened, and then:
- The console's status will be checked.
- A command that creates a file will be sent through the console.
- Using ssh we will check that the command worked (i.e. that the file
was created.
"""
console_vm_username = credentials[provider.data.templates.get('console_template')
['creds']].get('username')
console_vm_password = credentials[provider.data.templates.get('console_template')
['creds']].get('password')
vm_obj.open_console(console='VM Console')
assert vm_obj.vm_console, 'VMConsole object should be created'
vm_console = vm_obj.vm_console
try:
# If the banner/connection-status element exists we can get
# the connection status text and if the console is healthy, it should connect.
assert vm_console.wait_for_connect(180), "VM Console did not reach 'connected' state"
# Get the login screen image, and make sure it is a jpeg file:
screen = vm_console.get_screen()
assert imghdr.what('', screen) == 'jpeg'
assert vm_console.wait_for_text(text_to_find="login:", timeout=200), ("VM Console"
" didn't prompt for Login")
# Enter Username:
vm_console.send_keys(console_vm_username)
assert vm_console.wait_for_text(text_to_find="Password", timeout=200), ("VM Console"
" didn't prompt for Password")
# Enter Password:
vm_console.send_keys("{}\n".format(console_vm_password))
time.sleep(5) # wait for login to complete
# This regex can find if there is a word 'login','password','incorrect' present in
# text, irrespective of its case
regex_for_login_password = re.compile(r'\blogin\b | \bpassword\b| \bincorrect\b',
flags=re.I | re.X)
def _validate_login():
"""
Try to read what is on present on the last line in console.
If it is word 'login', enter username, if 'password' enter password, in order
to make the login successful
"""
if vm_console.find_text_on_screen(text_to_find='login', current_line=True):
vm_console.send_keys(console_vm_username)
if vm_console.find_text_on_screen(text_to_find='Password', current_line=True):
vm_console.send_keys("{}\n".format(console_vm_password))
# if the login attempt failed for some reason (happens with RHOS-cirros),
# last line of the console will contain one of the following words:
# [login, password, incorrect]
# if so, regex_for_login_password will find it and result will not be []
# .split('\n')[-1] splits the console text on '\n' & picks last item of resulting list
result = regex_for_login_password.findall(vm_console.get_screen_text().split('\n')[-1])
return result == []
# if _validate_login() returns True, it means we did not find any of words
# [login, password, incorrect] on last line of console text, which implies login success
wait_for(func=_validate_login, timeout=300, delay=5)
logger.info("Wait to get the '$' prompt")
if provider.one_of(VMwareProvider):
vm_console.wait_for_text(text_to_find=provider.data.templates.get('console_template')
['prompt_text'], timeout=200)
else:
time.sleep(15)
# create file on system
vm_console.send_keys("touch blather")
if not (BZ.bugzilla.get_bug(1491387).is_opened):
# Test pressing ctrl-alt-delete...we should be able to get a new login prompt:
vm_console.send_ctrl_alt_delete()
assert vm_console.wait_for_text(text_to_find="login:", timeout=200,
to_disappear=True), ("Text 'login:' never disappeared, indicating failure"
" of CTRL+ALT+DEL button functionality, please check if OS reboots on "
"CTRL+ALT+DEL key combination and CTRL+ALT+DEL button on HTML5 Console is working.")
assert vm_console.wait_for_text(text_to_find="login:", timeout=200), ("VM Console"
" didn't prompt for Login")
if not provider.one_of(OpenStackProvider):
assert vm_console.send_fullscreen(), ("VM Console Toggle Full Screen button does"
" not work")
with ssh.SSHClient(hostname=vm_obj.ip_address, username=console_vm_username,
password=console_vm_password) as ssh_client:
# if file was created in previous steps it will be removed here
# we will get instance of SSHResult
# Sometimes Openstack drops characters from word 'blather' hence try to remove
# file using partial file name. Known issue, being worked on.
command_result = ssh_client.run_command("rm blather", ensure_user=True)
assert command_result
except Exception as e:
# Take a screenshot if an exception occurs
vm_console.switch_to_console()
take_screenshot("ConsoleScreenshot")
vm_console.switch_to_appliance()
raise e
finally:
vm_console.close_console_window()
# Logout is required because when running the Test back 2 back against RHV and VMware
# Providers, following issue would arise:
# If test for RHV is just finished, code would proceed to adding VMware Provider and once it
# is added, then it will navigate to Infrastructure -> Virtual Machines Page, it will see
# "Page Does not Exists" Error, because the browser will try to go to the
# VM details page of RHV VM which is already deleted
# at the End of test for RHV Provider Console and test would fail.
# Logging out would get rid of this issue.
appliance.server.logout()
| gpl-2.0 | -2,640,400,068,133,238,300 | 43.813187 | 100 | 0.643943 | false | 4.073926 | true | false | false |
adsabs/citation_helper_service | citation_helper_service/citation_helper.py | 1 | 1894 | '''
Created on Nov 1, 2014
@author: ehenneken
'''
from __future__ import absolute_import
# general module imports
import sys
import os
import operator
from itertools import groupby
from flask import current_app
from .utils import get_data
from .utils import get_meta_data
__all__ = ['get_suggestions']
def get_suggestions(**args):
# initializations
papers = []
bibcodes = []
if 'bibcodes' in args:
bibcodes = args['bibcodes']
if len(bibcodes) == 0:
return []
# Any overrides for default values?
Nsuggestions = current_app.config.get('CITATION_HELPER_NUMBER_SUGGESTIONS')
# get rid of potential trailing spaces
bibcodes = [a.strip() for a in bibcodes][
:current_app.config.get('CITATION_HELPER_MAX_INPUT')]
# start processing
# get the citations for all publications (keeping multiplicity is
# essential)
papers = get_data(bibcodes=bibcodes)
if "Error" in papers:
return papers
# removes papers from the original list to get candidates
papers = [a for a in papers if a not in bibcodes]
# establish frequencies of papers in results
paperFreq = [(k, len(list(g))) for k, g in groupby(sorted(papers))]
# and sort them, most frequent first
paperFreq = sorted(paperFreq, key=operator.itemgetter(1), reverse=True)
# remove all papers with frequencies smaller than threshold
paperFreq = [a for a in paperFreq if a[1] > current_app.config.get(
'CITATION_HELPER_THRESHOLD_FREQUENCY')]
# get metadata for suggestions
meta_dict = get_meta_data(results=paperFreq[:Nsuggestions])
if "Error"in meta_dict:
return meta_dict
# return results in required format
return [{'bibcode': x, 'score': y, 'title': meta_dict[x]['title'],
'author':meta_dict[x]['author']} for (x, y) in
paperFreq[:Nsuggestions] if x in meta_dict.keys()]
| mit | -4,987,392,031,141,406,000 | 33.436364 | 79 | 0.67265 | false | 3.546816 | false | false | false |
fenimore/freeebot | freeebot.py | 1 | 5664 | #!/usr/bin/env python
"""Twitter Bot for posting craigslist postings of Free Stuff
Currently set up for New York.
Example usage:
python tweetstuffs.py
Attributes:
- NO_IMAGE -- link for when there is no image found
- FILE -- path to tmp file
- PATH -- current directory
- C_KEY, C_SECRET, A_TOKEN, A_TOKEN_SECRET -- twitter api tokens
@author: Fenimore Love
@license: MIT
@date: 2015-2016
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import re, sys, os, time, urllib.error, urllib.request
from datetime import datetime
from time import gmtime, strftime, sleep
import tweepy
#from freestuffs import stuff_scraper
from freestuffs.stuff_scraper import StuffScraper
from secrets import *
# ====== Individual bot configuration ==========================
bot_username = 'freeebot'
logfile = bot_username
# ==============================================================
PATH = os.getcwd()
if not os.path.exists(PATH + '/tmp/'):
os.makedirs(PATH + '/tmp/')
if not os.path.exists(PATH + '/log/'):
os.makedirs(PATH + '/log/')
NO_IMAGE = 'http://upload.wikimedia.org/wikipedia/commons/a/ac/No_image_available.svg'
FILE = PATH + '/tmp/tmp-filename.jpg'
def create_tweet(stuff):
"""Create string for tweet with stuff.
TODO: replace New York with NY
TODO: add a hashtag
"""
post = {"title": stuff['title'],
"loc" : stuff['location'],
"url" : stuff['url']}
_text = post["loc"].strip(', New York') + "\n" + post["title"] +" " + post["url"] + ' #FreeStuffNY'
_text = check_length(_text, post)
return _text
def tweet(new_stuffs_set):
"""Tweet new free stuff."""
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
# Unpack set of sorted tuples back into dicts
stuffs = map(dict, new_stuffs_set)
if len(list(new_stuffs_set)) is not 0: # if there exists new items
for stuff in stuffs:
tweet = create_tweet(stuff)
if str(stuff['image']) == NO_IMAGE:
isImage = False
else:
isImage = True
try:
urllib.request.urlretrieve(stuff['image'], FILE)
except:
log('image: '+ stuff['image'] + 'can\'t be found')
isImage = False
try:
if isImage:
log("\n\n Posting with Media \n " + tweet + "\n ----\n")
api.update_with_media(FILE, status=tweet)
else:
log("\n\n Posting without media\n "
+ tweet + "\n ----\n")
api.update_status(tweet)
except tweepy.TweepError as e:
log('Failure ' + stuff['title'])
log(e.reason)
else:
print("\n ----\n")
def check_length(tweet, post):
"""Check if tweet is proper length."""
size = len(tweet) - len(post["url"])
if size < 145: # tweet is good
return tweet
else:
log("Tweet too long")
tweet = post["loc"] + "\n" + post["title"] + " " + post["url"]
size = len(tweet) - post["url"]
if size > 144: # tweet is still not good
tweet = post["title"] + " " + post["url"]
return tweet
return tweet
def log(message):
"""Log message to logfile. And print it out."""
# TODO: fix
date = strftime("-%d-%b-%Y", gmtime())
path = os.path.realpath(os.path.join(os.getcwd(), 'log'))
with open(os.path.join(path, logfile + date + '.log'),'a+') as f:
t = strftime("%d %b %Y %H:%M:%S", gmtime())
print("\n" + t + " " + message) # print it tooo...
f.write("\n" + t + " " + message)
if __name__ == "__main__":
"""Tweet newly posted Free stuff objects.
Using sets of the tupled-sorted-dict-stuffs to compare
old scrapes and new. No need calling for precise, as
twitter doesn't need the coordinates. If the set has 15
items, doesn't post, in order to stop flooding twitter
on start up.
"""
#process_log = open(os.path.join('log', logfile_username),'a+')
_location = 'newyork' # TODO: Change to brooklyn?
stale_set = set() # the B set is what has already been
log("\n\nInitiating\n\n")
while True:
stuffs = [] # a list of dicts
for stuff in StuffScraper(_location, 15).stuffs: # convert stuff
stuff_dict = {'title':stuff.thing, # object into dict
'location':stuff.location,
'url':stuff.url, 'image':stuff.image}
stuffs.append(stuff_dict)
fresh_set = set() # A set, Fresh out the oven
for stuff in stuffs:
tup = tuple(sorted(stuff.items()))
fresh_set.add(tup)
"""Evaluate if there have been new posts"""
ready_set = fresh_set - stale_set # Get the difference
stale_set = fresh_set
if len(list(ready_set)) is not 15:
tweet(ready_set)
log("\n New Stuffs : " + str(len(list(ready_set)))+
"\n Todays Stuffs : "+ str(len(list(stale_set)))+
"\n\n Sleep Now (-_-)Zzz... \n")
sleep(1000) # 3600 Seconds = Hour
| mit | 6,432,309,924,595,103,000 | 35.307692 | 103 | 0.570798 | false | 3.644788 | false | false | false |
embray/numpy | numpy/lib/npyio.py | 1 | 66490 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from ._compiled_base import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError("Illegal argument")
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. Compressed files with the filename extension
``.gz`` are acceptable. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else:
# Try a pickle
try:
return pickle.load(fid)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
Character separating columns.
newline : str, optional
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_rows : int, optional
`skip_rows` was deprecated in numpy 1.5, and will be removed in
numpy 2.0. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was deprecated in numpy 1.5, and will be removed in
numpy 2.0. Please use `missing_values` instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(
"The use of `skiprows` is deprecated, it will be removed in "
"numpy 2.0.\nPlease use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(
"The use of `missing` is deprecated, it will be removed in "
"Numpy 2.0.\nPlease use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
#
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = list(zip(*[[converter._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, converter) in enumerate(converters)]))
else:
rows = list(zip(*[[converter._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, converter) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause | -2,169,429,861,298,942,200 | 33.957939 | 89 | 0.559107 | false | 4.12597 | false | false | false |
StudyBlue/sblibs | sblibs/display/general.py | 1 | 1801 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2016
#
# @project: Decorating
# @author: Manoel Vilela
# @email: [email protected]
#
# pylint: disable=redefined-builtin
# pylint: disable=invalid-name
"""
An collection of usefull decorators for debug
and time evaluation of functions flow
"""
# stdlib
from functools import wraps
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2: # pragma: no cover
from itertools import izip
zip = izip
else: # pragma: no cover
zip = zip
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
# Copied from `six' library.
# Copyright (c) 2010-2015 Benjamin Peterson
# License: MIT
class metaclass(meta):
"""Dummy metaclass"""
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def cache(function):
"""
Function: cache
Summary: Decorator used to cache the input->output
Examples: An fib memoized executes at O(1) time
instead O(e^n)
Attributes:
@param (function): function
Returns: wrapped function
TODO: Give support to functions with kwargs
"""
memory = {}
miss = object()
@wraps(function)
def _wrapper(*args):
result = memory.get(args, miss)
if result is miss:
_wrapper.call += 1
result = function(*args)
memory[args] = result
return result
_wrapper.call = 0
return _wrapper
| bsd-2-clause | 1,671,279,908,194,633,700 | 23 | 78 | 0.622222 | false | 3.797468 | false | false | false |
microelly2/geodata | geodat/import_aster.py | 1 | 5208 | ''' geodat import AST (gdal)'''
# -*- coding: utf-8 -*-
#-------------------------------------------------
#-- geodat import AST (gdal)
#--
#-- microelly 2016 v 0.1
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
#http://geoinformaticstutorial.blogspot.de/2012/09/reading-raster-data-with-python-and-gdal.html
#http://forum.freecadweb.org/viewtopic.php?f=8&t=17647&start=10#p139201
# the ast file is expected in ~/.FreeCAD/geodat/AST
# FreeCAD.ConfigGet("UserAppData") +'/geodat/AST/ASTGTM2_' + ff +'_dem.tif'
'''
ASTER GDEM Policy Agreements
I agree to redistribute the ASTER GDEM *only* to individuals within my organization or project of intended use or in response to disasters in support of the GEO Disaster Theme.
When presenting or publishing ASTER GDEM data, I agree to include "ASTER GDEM is a product of METI and NASA."
Because there are known inaccuracies and artifacts in the data set, please use the product with awareness of its limitations. The data are provided "as is" and neither NASA nor METI/ERSDAC will be responsible for any damages resulting from use of the data.
'''
from geodat.say import *
import geodat.transversmercator
from geodat.transversmercator import TransverseMercator
import geodat.import_xyz
import geodat.geodat_lib
# apt-get install python-gdal
import gdal
from gdalconst import *
import WebGui
import Points
def import_ast(b=50.26,l=11.39):
'''get the data from a downloaded file
the file is expected in FreeCAD.ConfigGet("UserAppData") + '/geodat/AST/'
with the common filename for lan/lot parameters
example .../.FreeCAD/geodat/AST/ASTGTM2_N51E010_dem.tif
'''
bs=np.floor(b)
ls=np.floor(l)
# the ast dataset
ff="N%02dE%03d" % (int(bs),int(ls))
fn=FreeCAD.ConfigGet("UserAppData") +'/geodat/AST/ASTGTM2_' + ff +'_dem.tif'
print(fn)
'''
fn='/home/microelly2/FCB/b217_heightmaps/tandemx_daten/Chile-Chuquicatmata.tif'
b=-22.3054705
l=-68.9259643
bs=np.floor(b)
ls=np.floor(l)
print(fn)
'''
dataset = gdal.Open(fn, GA_ReadOnly)
if dataset == None:
msg="\nProblem cannot open " + fn + "\n"
FreeCAD.Console.PrintError(msg)
errorDialog(msg)
return
cols=dataset.RasterXSize
rows=dataset.RasterYSize
geotransform = dataset.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
band = dataset.GetRasterBand(1)
data = band.ReadAsArray(0, 0, cols, rows)
#data.shape -> 3601 x 3601 secs
# erfurt 51,11
#data[0,0]
# zeitz 51,12
#data[3600,0]
# windischletten(zapfendorf) 50,11
#data[0,3600]
# troestau fichtelgebirge 50,12
#data[3600,3600]
px=int(round((bs+1-b)*3600))
py=int(round((l-ls)*3600))
pts=[]
d=70
d1=20
d2=50
d1=d
d2=d
tm=TransverseMercator()
tm.lat=b
tm.lon=l
center=tm.fromGeographic(tm.lat,tm.lon)
z0= data[px,py] # relative height to origin px,py
for x in range(px-d1,px+d1):
for y in range(py-d2,py+d2):
ll=tm.fromGeographic(bs+1-1.0/3600*x,ls+1.0/3600*y)
pt=FreeCAD.Vector(ll[0]-center[0],ll[1]-center[1], 1000.0* (data[x,y]-z0))
pts.append(pt)
# display the point cloud
p=Points.Points(pts)
Points.show(p)
return pts
s6='''
MainWindow:
VerticalLayout:
id:'main'
# setFixedHeight: 600
setFixedWidth: 600
move: PySide.QtCore.QPoint(3000,100)
QtGui.QLabel:
setText:"C O N F I G U R A T I O N"
QtGui.QLabel:
QtGui.QLineEdit:
id: 'bl'
# zeyerner wand **
#(50.2570152,11.3818337)
# outdoor inn *
#(50.3737109,11.1891891)
# roethen **
#(50.3902794,11.157629)
# kreuzung huettengrund nach judenbach ***
#(50.368209,11.2016135)
setText:"50.368209,11.2016135"
# coburg zentrum
setText:"50.2639926,10.9686946"
QtGui.QPushButton:
setText: "Create height models"
clicked.connect: app.runbl
QtGui.QPushButton:
setText: "show Map"
clicked.connect: app.showMap
'''
## the gui backend
class MyApp(object):
## create the height model
def runbl(self):
bl=self.root.ids['bl'].text()
spli=bl.split(',')
b=float(spli[0])
l=float(spli[1])
s=15
import_heights(float(b),float(l),float(s))
## display the location in openstreeetmap
def showMap(self):
bl=self.root.ids['bl'].text()
spli=bl.split(',')
b=float(spli[0])
l=float(spli[1])
s=15
WebGui.openBrowser( "http://www.openstreetmap.org/#map=16/"+str(b)+'/'+str(l))
## the dialog to import a gdal file
def mydialog():
'''the dialog to import a gdal file'''
app=MyApp()
import geodat
import geodat.miki as gmiki
miki=gmiki.Miki()
miki.app=app
app.root=miki
miki.run(s6)
FreeCAD.mm=miki
return miki
## import heigjs using import_xyz
def import_heights(b,l,s):
ts=time.time()
pcl=import_ast(b,l)
pts=pcl
ff="N" + str(b) + " E" + str(l)
nurbs=geodat.import_xyz.suv2(ff,pts,u=0,v=0,d=140,la=140,lb=140)
te=time.time()
print ("time to create models:",te-ts)
fn=geodat.geodat_lib.genSizeImage(size=512)
# geodat.geodat_lib.addImageTexture(nurbs,fn,scale=(8,3))
nurbs.ViewObject.Selectable = False
## test start and hide the dialog
def runtest():
m=mydialog()
m.objects[0].hide()
if __name__ == '__main__':
runtest()
def importASTER():
mydialog()
| lgpl-3.0 | -4,922,200,730,300,529,000 | 20.520661 | 256 | 0.679724 | false | 2.493059 | false | false | false |
flavoso/gerencex | gerencex/core/tests/test_view_office_tickets.py | 1 | 1342 | import datetime
from django.contrib.auth.models import User
from django.shortcuts import resolve_url as r
from django.test import TestCase
from django.utils import timezone
from gerencex.core.models import Restday, Office
class OfficeTicketsViewTest(TestCase):
def setUp(self):
self.office = Office.objects.create(
name='Terceira Diacomp',
initials='DIACOMP3'
)
User.objects.create_user('testuser', '[email protected]', 'senha123')
self.user = User.objects.get(username='testuser')
self.user.first_name = 'Ze'
self.user.last_name = 'Mane'
self.user.userdetail.office = self.office
self.user.save()
self.client.login(username='testuser', password='senha123')
self.resp = self.client.get(r('office_tickets'))
def test_get(self):
"""GET must return status code 200"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
"""Must use restdays.html"""
self.assertTemplateUsed(self.resp, 'office_tickets.html')
def test_html(self):
# print(self.resp.content)
contents = [
'Terceira Diacomp',
'Ze Mane'
]
for expected in contents:
with self.subTest():
self.assertContains(self.resp, expected)
| gpl-3.0 | 6,811,396,555,601,649,000 | 30.952381 | 73 | 0.629657 | false | 3.738162 | true | false | false |
sloede/modm | modfileparser.py | 1 | 6189 | #!/usr/bin/env python
# Modm - Modules iMproved
# Copyright (C) 2013-2014 Michael Schlottke
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# System imports
import os
import shlex
# Project imports
from env import Env,EnvVariable
from basheval import BashEval
class ModfileParser:
"""
Class to parse module files and execute commands found in them.
"""
backup_prefix = 'MODM_BACKUP_'
def __init__(self, env=Env(), basheval=BashEval()):
"""Save arguments to class and initialize list of valid commands.
Arguments:
env -- object to handle environment variables
basheval -- object to convert commands to Bash evaluation strings
"""
# Save arguments
self.env = env
self.be = basheval
# Init commands
self.commands = dict()
self.init_commands()
# Init other members
self.do_unload = False
def init_commands(self):
"""Initialize all commands that are supported in module files."""
self.commands['prepend_path'] = lambda *x: self.cmd_prepend_variable(
*x,
kind='path')
self.commands['prepend_string'] = lambda *x: self.cmd_prepend_variable(
*x,
kind='string')
self.commands['print'] = self.cmd_print
self.commands['print_load'] = lambda *x: self.cmd_print(
*x,
unload=False)
self.commands['print_unload'] = lambda *x: self.cmd_print(
*x,
load=False)
self.commands['set'] = self.cmd_set
def cmd_prepend_variable(self, name, value, kind='string'):
"""Prepend variable `name` with `value`."""
# Create variable if it does not exist yet
if not name in self.env.variables:
self.env.variables[name] = EnvVariable(name, kind=kind)
# Prepend value (or undo prepend)
self.env.variables[name].prepend(value, undo=self.do_unload)
def cmd_append_variable(self, name, value, kind='string'):
"""Append variable `name` with `value`."""
# Create variable if it does not exist yet
if not name in self.env.variables:
self.env.variables[name] = EnvVariable(name, kind=kind)
# Append value (or undo append)
self.env.variables[name].append(value, undo=self.do_unload)
def cmd_print(self, message, load=True, unload=True):
"""Print `message`."""
if (load and not self.do_unload) or (unload and self.do_unload):
self.be.echo(message)
def cmd_set(self, name, value):
"""Set variable `name` to `value`.
Save backup of `name` if it exists already, and restore the
original value upon unloading.
"""
# Create variable if it does not exist yet
if not name in self.env.variables:
self.env.variables[name] = EnvVariable(name)
# Determine name of potential backup variable and create backup variable
# if it does not exist
backupname = self.backup_prefix + name
if backupname not in self.env.variables:
self.env.variables[backupname] = EnvVariable(backupname)
# If variable is to be set, check if it is already set and save backup
if not self.do_unload:
if self.env.variables[name].is_set():
self.env.variables[backupname].set_value(
self.env.variables[name].get_value())
self.env.variables[name].set_value(value)
# If variable is to be unset, check if backup variable exists and
# restore it
else:
if self.env.variables[backupname].is_set():
self.env.variables[name].set_value(
self.env.variables[backupname].get_value())
self.env.variables[backupname].unset()
else:
self.env.variables[name].unset()
def load(self, modfile):
"""Load module file `modfile`."""
self.do_unload = False
return self.parse(modfile)
def unload(self, modfile):
"""Unload module file `modfile`."""
self.do_unload = True
return self.parse(modfile)
def parse(self, modfile):
"""Parse module file `modfile` and execute commands that are found.
Return true if parsing was successful, otherwise false."""
# Return without doing anything if file is not found
if not os.path.isfile(modfile):
return
# Read module file
with open(modfile, 'r') as f:
lines = f.readlines()
# Try to parse each line into shell tokens or die
try:
splitlines = [shlex.split(line) for line in lines]
except Exception as e:
self.be.error("Bad syntax in module file '{mf}': {e} ({n})".format(
mf=modfile, e=e, n=type(e).__name__))
return False
# Parse each line indicidually
for tokens in splitlines:
# Skip line if there were no tokens
if len(tokens) == 0:
continue
# First token is command, rest (if existing) are arguments
cmd = tokens[0]
args = tokens[1:]
# If command exists, execute it while providing the arguments from
# the file
if cmd in self.commands:
self.commands[cmd](*args)
# Return true to indicate that nothing was wrong
return True
| gpl-2.0 | 8,359,449,882,160,916,000 | 34.774566 | 80 | 0.604944 | false | 4.21594 | false | false | false |
cp16net/trove | trove/tests/unittests/guestagent/test_dbaas.py | 1 | 144181 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
import subprocess
import tempfile
import time
from uuid import uuid4
from mock import ANY
from mock import DEFAULT
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import PropertyMock
from oslo_utils import netutils
import sqlalchemy
import testtools
from testtools.matchers import Equals
from testtools.matchers import Is
from testtools.matchers import Not
from trove.common import cfg
from trove.common.exception import BadRequest
from trove.common.exception import GuestError
from trove.common.exception import PollTimeOut
from trove.common.exception import ProcessExecutionError
from trove.common import instance as rd_instance
from trove.common import utils
from trove.conductor import api as conductor_api
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.cassandra import (
service as cass_service)
from trove.guestagent.datastore.experimental.cassandra import (
system as cass_system)
from trove.guestagent.datastore.experimental.couchbase import (
service as couchservice)
from trove.guestagent.datastore.experimental.couchdb import (
service as couchdb_service)
from trove.guestagent.datastore.experimental.db2 import (
service as db2service)
from trove.guestagent.datastore.experimental.mongodb import (
service as mongo_service)
from trove.guestagent.datastore.experimental.mongodb import (
system as mongo_system)
from trove.guestagent.datastore.experimental.redis import service as rservice
from trove.guestagent.datastore.experimental.redis.service import RedisApp
from trove.guestagent.datastore.experimental.redis import system as RedisSystem
from trove.guestagent.datastore.experimental.vertica import (
system as vertica_system)
from trove.guestagent.datastore.experimental.vertica.service import (
VerticaAppStatus)
from trove.guestagent.datastore.experimental.vertica.service import VerticaApp
import trove.guestagent.datastore.mysql.service as dbaas
from trove.guestagent.datastore.mysql.service import KeepAliveConnection
from trove.guestagent.datastore.mysql.service import MySqlAdmin
from trove.guestagent.datastore.mysql.service import MySqlApp
from trove.guestagent.datastore.mysql.service import MySqlAppStatus
from trove.guestagent.datastore.mysql.service import MySqlRootAccess
from trove.guestagent.datastore.service import BaseDbStatus
from trove.guestagent.db import models
from trove.guestagent import dbaas as dbaas_sr
from trove.guestagent.dbaas import get_filesystem_volume_stats
from trove.guestagent.dbaas import to_gb
from trove.guestagent import pkg
from trove.guestagent.volume import VolumeDevice
from trove.instance.models import InstanceServiceStatus
from trove.tests.unittests.util import util
CONF = cfg.CONF
"""
Unit tests for the classes and functions in dbaas.py.
"""
FAKE_DB = {"_name": "testDB", "_character_set": "latin2",
"_collate": "latin2_general_ci"}
FAKE_DB_2 = {"_name": "testDB2", "_character_set": "latin2",
"_collate": "latin2_general_ci"}
FAKE_USER = [{"_name": "random", "_password": "guesswhat",
"_host": "%", "_databases": [FAKE_DB]}]
conductor_api.API.get_client = Mock()
conductor_api.API.heartbeat = Mock()
class FakeAppStatus(BaseDbStatus):
def __init__(self, id, status):
self.id = id
self.next_fake_status = status
def _get_actual_db_status(self):
return self.next_fake_status
def set_next_status(self, next_status):
self.next_fake_status = next_status
def _is_query_router(self):
return False
class DbaasTest(testtools.TestCase):
def setUp(self):
super(DbaasTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_utils_execute = dbaas.utils.execute
def tearDown(self):
super(DbaasTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.utils.execute = self.orig_utils_execute
@patch.object(operating_system, 'remove')
def test_clear_expired_password(self, mock_remove):
secret_content = ("# The random password set for the "
"root user at Wed May 14 14:06:38 2014 "
"(local time): somepassword")
with patch.object(dbaas.utils, 'execute',
return_value=(secret_content, None)):
dbaas.clear_expired_password()
self.assertEqual(2, dbaas.utils.execute.call_count)
self.assertEqual(1, mock_remove.call_count)
@patch.object(operating_system, 'remove')
def test_no_secret_content_clear_expired_password(self, mock_remove):
with patch.object(dbaas.utils, 'execute', return_value=('', None)):
dbaas.clear_expired_password()
self.assertEqual(1, dbaas.utils.execute.call_count)
mock_remove.assert_not_called()
@patch.object(operating_system, 'remove')
def test_fail_password_update_content_clear_expired_password(self,
mock_remove):
secret_content = ("# The random password set for the "
"root user at Wed May 14 14:06:38 2014 "
"(local time): somepassword")
with patch.object(dbaas.utils, 'execute',
side_effect=[(secret_content, None),
ProcessExecutionError]):
dbaas.clear_expired_password()
self.assertEqual(2, dbaas.utils.execute.call_count)
mock_remove.assert_not_called()
@patch.object(operating_system, 'remove')
@patch.object(dbaas.utils, 'execute', side_effect=ProcessExecutionError)
def test_fail_retrieve_secret_content_clear_expired_password(self,
mock_execute,
mock_remove):
dbaas.clear_expired_password()
self.assertEqual(1, mock_execute.call_count)
mock_remove.assert_not_called()
def test_get_auth_password(self):
dbaas.utils.execute_with_timeout = Mock(
return_value=("password ", None))
password = dbaas.get_auth_password()
self.assertEqual("password", password)
def test_get_auth_password_error(self):
dbaas.utils.execute_with_timeout = Mock(
return_value=("password", "Error"))
self.assertRaises(RuntimeError, dbaas.get_auth_password)
def test_service_discovery(self):
with patch.object(os.path, 'isfile', return_value=True):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.assertIsNotNone(mysql_service['cmd_start'])
self.assertIsNotNone(mysql_service['cmd_enable'])
def test_load_mysqld_options(self):
output = "mysqld would've been started with the these args:\n"\
"--user=mysql --port=3306 --basedir=/usr "\
"--tmpdir=/tmp --skip-external-locking"
with patch.object(os.path, 'isfile', return_value=True):
dbaas.utils.execute = Mock(return_value=(output, None))
options = dbaas.load_mysqld_options()
self.assertEqual(5, len(options))
self.assertEqual(["mysql"], options["user"])
self.assertEqual(["3306"], options["port"])
self.assertEqual(["/usr"], options["basedir"])
self.assertEqual(["/tmp"], options["tmpdir"])
self.assertTrue("skip-external-locking" in options)
def test_load_mysqld_options_contains_plugin_loads_options(self):
output = ("mysqld would've been started with the these args:\n"
"--plugin-load=blackhole=ha_blackhole.so "
"--plugin-load=federated=ha_federated.so")
with patch.object(os.path, 'isfile', return_value=True):
dbaas.utils.execute = Mock(return_value=(output, None))
options = dbaas.load_mysqld_options()
self.assertEqual(1, len(options))
self.assertEqual(["blackhole=ha_blackhole.so",
"federated=ha_federated.so"],
options["plugin-load"])
@patch.object(os.path, 'isfile', return_value=True)
def test_load_mysqld_options_error(self, mock_exists):
dbaas.utils.execute = Mock(side_effect=ProcessExecutionError())
self.assertFalse(dbaas.load_mysqld_options())
def test_get_datadir(self):
cnf_value = '[mysqld]\ndatadir=/var/lib/mysql/data'
with patch.object(dbaas, 'read_mycnf', Mock(return_value=cnf_value)):
self.assertEqual('/var/lib/mysql/data',
dbaas.get_datadir(reset_cache=True))
class ResultSetStub(object):
def __init__(self, rows):
self._rows = rows
def __iter__(self):
return self._rows.__iter__()
@property
def rowcount(self):
return len(self._rows)
def __repr__(self):
return self._rows.__repr__()
class MySqlAdminMockTest(testtools.TestCase):
def tearDown(self):
super(MySqlAdminMockTest, self).tearDown()
def test_list_databases(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute',
return_value=ResultSetStub(
[('db1', 'utf8', 'utf8_bin'),
('db2', 'utf8', 'utf8_bin'),
('db3', 'utf8', 'utf8_bin')])):
databases, next_marker = MySqlAdmin().list_databases(limit=10)
self.assertThat(next_marker, Is(None))
self.assertThat(len(databases), Is(3))
class MySqlAdminTest(testtools.TestCase):
def setUp(self):
super(MySqlAdminTest, self).setUp()
self.orig_get_engine = dbaas.get_engine
self.orig_LocalSqlClient = dbaas.LocalSqlClient
self.orig_LocalSqlClient_enter = dbaas.LocalSqlClient.__enter__
self.orig_LocalSqlClient_exit = dbaas.LocalSqlClient.__exit__
self.orig_LocalSqlClient_execute = dbaas.LocalSqlClient.execute
self.orig_MySQLUser_is_valid_user_name = (
models.MySQLUser._is_valid_user_name)
dbaas.get_engine = MagicMock(name='get_engine')
dbaas.LocalSqlClient = Mock
dbaas.LocalSqlClient.__enter__ = Mock()
dbaas.LocalSqlClient.__exit__ = Mock()
dbaas.LocalSqlClient.execute = Mock()
self.mySqlAdmin = MySqlAdmin()
def tearDown(self):
super(MySqlAdminTest, self).tearDown()
dbaas.get_engine = self.orig_get_engine
dbaas.LocalSqlClient = self.orig_LocalSqlClient
dbaas.LocalSqlClient.__enter__ = self.orig_LocalSqlClient_enter
dbaas.LocalSqlClient.__exit__ = self.orig_LocalSqlClient_exit
dbaas.LocalSqlClient.execute = self.orig_LocalSqlClient_execute
models.MySQLUser._is_valid_user_name = (
self.orig_MySQLUser_is_valid_user_name)
def test__associate_dbs(self):
db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"},
{"grantee": "'test_user'@'%'", "table_schema": "db2"},
{"grantee": "'test_user'@'%'", "table_schema": "db3"},
{"grantee": "'test_user1'@'%'", "table_schema": "db1"},
{"grantee": "'test_user1'@'%'", "table_schema": "db3"}]
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.databases = []
expected = ("SELECT grantee, table_schema FROM "
"information_schema.SCHEMA_PRIVILEGES WHERE privilege_type"
" != 'USAGE' GROUP BY grantee, table_schema;")
with patch.object(dbaas.LocalSqlClient, 'execute',
Mock(return_value=db_result)):
self.mySqlAdmin._associate_dbs(user)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
self.assertEqual(3, len(user.databases))
self.assertEqual(expected, args[0].text,
"Associate database queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_change_passwords(self):
user = [{"name": "test_user", "host": "%", "password": "password"}]
self.mySqlAdmin.change_passwords(user)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("UPDATE mysql.user SET Password="
"PASSWORD('password') WHERE User = 'test_user' "
"AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Change password queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_update_attributes_password(self):
db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"},
{"grantee": "'test_user'@'%'", "table_schema": "db2"}]
user = MagicMock()
user.name = "test_user"
user.host = "%"
user_attrs = {"password": "password"}
with patch.object(dbaas.LocalSqlClient, 'execute',
Mock(return_value=db_result)):
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
with patch.object(self.mySqlAdmin, 'grant_access'):
self.mySqlAdmin.update_attributes('test_user', '%',
user_attrs)
self.assertEqual(0,
self.mySqlAdmin.grant_access.call_count)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("UPDATE mysql.user SET Password="
"PASSWORD('password') WHERE User = 'test_user' "
"AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Update attributes queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_update_attributes_name(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user_attrs = {"name": "new_name"}
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
with patch.object(self.mySqlAdmin, 'grant_access'):
self.mySqlAdmin.update_attributes('test_user', '%', user_attrs)
self.mySqlAdmin.grant_access.assert_called_with(
'new_name', '%', set([]))
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("UPDATE mysql.user SET User='new_name' "
"WHERE User = 'test_user' AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Update attributes queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_update_attributes_host(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user_attrs = {"host": "new_host"}
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
with patch.object(self.mySqlAdmin, 'grant_access'):
self.mySqlAdmin.update_attributes('test_user', '%', user_attrs)
self.mySqlAdmin.grant_access.assert_called_with(
'test_user', 'new_host', set([]))
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("UPDATE mysql.user SET Host='new_host' "
"WHERE User = 'test_user' AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Update attributes queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_create_database(self):
databases = []
databases.append(FAKE_DB)
self.mySqlAdmin.create_database(databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(expected, args[0].text,
"Create database queries are not the same")
self.assertEqual(1, dbaas.LocalSqlClient.execute.call_count,
"The client object was not called exactly once, " +
"it was called %d times"
% dbaas.LocalSqlClient.execute.call_count)
def test_create_database_more_than_1(self):
databases = []
databases.append(FAKE_DB)
databases.append(FAKE_DB_2)
self.mySqlAdmin.create_database(databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(expected, args[0].text,
"Create database queries are not the same")
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB2` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(expected, args[0].text,
"Create database queries are not the same")
self.assertEqual(2, dbaas.LocalSqlClient.execute.call_count,
"The client object was not called exactly twice, " +
"it was called %d times"
% dbaas.LocalSqlClient.execute.call_count)
def test_create_database_no_db(self):
databases = []
self.mySqlAdmin.create_database(databases)
self.assertFalse(dbaas.LocalSqlClient.execute.called,
"The client object was called when it wasn't " +
"supposed to")
def test_delete_database(self):
database = {"_name": "testDB"}
self.mySqlAdmin.delete_database(database)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = "DROP DATABASE `testDB`;"
self.assertEqual(expected, args[0].text,
"Delete database queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_delete_user(self):
user = {"_name": "testUser", "_host": None}
self.mySqlAdmin.delete_user(user)
# For some reason, call_args is None.
call_args = dbaas.LocalSqlClient.execute.call_args
if call_args is not None:
args, _ = call_args
expected = "DROP USER `testUser`@`%`;"
self.assertEqual(expected, args[0].text,
"Delete user queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_create_user(self):
self.mySqlAdmin.create_user(FAKE_USER)
access_grants_expected = ("GRANT ALL PRIVILEGES ON `testDB`.* TO "
"`random`@`%` IDENTIFIED BY 'guesswhat';")
create_user_expected = ("GRANT USAGE ON *.* TO `random`@`%` "
"IDENTIFIED BY 'guesswhat';")
create_user, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
self.assertEqual(create_user_expected, create_user[0].text,
"Create user queries are not the same")
access_grants, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
self.assertEqual(access_grants_expected, access_grants[0].text,
"Create user queries are not the same")
self.assertEqual(2, dbaas.LocalSqlClient.execute.call_count)
def test_list_databases(self):
self.mySqlAdmin.list_databases()
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
def test_list_databases_with_limit(self):
limit = 2
self.mySqlAdmin.list_databases(limit)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertTrue("LIMIT " + str(limit + 1) in args[0].text)
def test_list_databases_with_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_databases(marker=marker)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND schema_name > '" + marker + "'" in args[0].text)
def test_list_databases_with_include_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_databases(marker=marker, include_marker=True)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue(("AND schema_name >= '%s'" % marker) in args[0].text)
def test_list_users(self):
self.mySqlAdmin.list_users()
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertFalse("AND Marker > '" in args[0].text)
def test_list_users_with_limit(self):
limit = 2
self.mySqlAdmin.list_users(limit)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
("LIMIT " + str(limit + 1)),
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
def test_list_users_with_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_users(marker=marker)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host, Marker",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND Marker > '" + marker + "'" in args[0].text)
def test_list_users_with_include_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_users(marker=marker, include_marker=True)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND Marker >= '" + marker + "'" in args[0].text)
@patch.object(dbaas.MySqlAdmin, '_associate_dbs')
def test_get_user(self, mock_associate_dbs):
"""
Unit tests for mySqlAdmin.get_user.
This test case checks if the sql query formed by the get_user method
is correct or not by checking with expected query.
"""
username = "user1"
hostname = "%"
user = [{"User": "user1", "Host": "%", 'Password': 'some_thing'}]
dbaas.LocalSqlClient.execute.return_value.fetchall = Mock(
return_value=user)
self.mySqlAdmin.get_user(username, hostname)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost' AND User = 'user1'",
"ORDER BY User, Host",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertEqual(1, mock_associate_dbs.call_count)
def test_fail_get_user(self):
username = "os_admin"
hostname = "host"
self.assertRaisesRegexp(BadRequest, "Username os_admin is not valid",
self.mySqlAdmin.get_user, username, hostname)
def test_grant_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.password = 'some_password'
databases = ['db1']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
self.mySqlAdmin.grant_access('test_user', '%', databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("GRANT ALL PRIVILEGES ON `db1`.* TO `test_user`@`%` "
"IDENTIFIED BY PASSWORD 'some_password';")
self.assertEqual(expected, args[0].text,
"Grant access queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_fail_grant_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.password = 'some_password'
databases = ['mysql']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
self.mySqlAdmin.grant_access('test_user', '%', databases)
# since mysql is not a database to be provided access to,
# testing that executed was not called in grant access.
dbaas.LocalSqlClient.execute.assert_not_called()
def test_is_root_enabled(self):
self.mySqlAdmin.is_root_enabled()
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("SELECT User FROM mysql.user WHERE "
"User = 'root' AND Host != 'localhost';")
self.assertEqual(expected, args[0].text,
"Find root enabled queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_revoke_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.password = 'some_password'
databases = ['db1']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
self.mySqlAdmin.revoke_access('test_usr', '%', databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("REVOKE ALL ON `['db1']`.* FROM `test_user`@`%`;")
self.assertEqual(expected, args[0].text,
"Revoke access queries are not the same")
def test_list_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.databases = ['db1', 'db2']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
databases = self.mySqlAdmin.list_access('test_usr', '%')
self.assertEqual(2, len(databases),
"List access queries are not the same")
class MySqlAppTest(testtools.TestCase):
def setUp(self):
super(MySqlAppTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_time_sleep = time.sleep
self.orig_unlink = os.unlink
self.orig_get_auth_password = dbaas.get_auth_password
self.orig_service_discovery = operating_system.service_discovery
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.mySqlApp = MySqlApp(self.appStatus)
mysql_service = {'cmd_start': Mock(),
'cmd_stop': Mock(),
'cmd_enable': Mock(),
'cmd_disable': Mock(),
'bin': Mock()}
operating_system.service_discovery = Mock(
return_value=mysql_service)
time.sleep = Mock()
os.unlink = Mock()
dbaas.get_auth_password = Mock()
self.mock_client = Mock()
self.mock_execute = Mock()
self.mock_client.__enter__ = Mock()
self.mock_client.__exit__ = Mock()
self.mock_client.__enter__.return_value.execute = self.mock_execute
def tearDown(self):
super(MySqlAppTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
time.sleep = self.orig_time_sleep
os.unlink = self.orig_unlink
operating_system.service_discovery = self.orig_service_discovery
dbaas.get_auth_password = self.orig_get_auth_password
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def mysql_starts_successfully(self):
def start(update_db=False):
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.start_mysql.side_effect = start
def mysql_starts_unsuccessfully(self):
def start():
raise RuntimeError("MySQL failed to start!")
self.mySqlApp.start_mysql.side_effect = start
def mysql_stops_successfully(self):
def stop():
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db.side_effect = stop
def mysql_stops_unsuccessfully(self):
def stop():
raise RuntimeError("MySQL failed to stop!")
self.mySqlApp.stop_db.side_effect = stop
def test_stop_mysql(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_mysql_with_db_update(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
@patch.object(utils, 'execute_with_timeout')
def test_stop_mysql_do_not_start_on_reboot(self, mock_execute):
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db(True, True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
self.assertEqual(2, mock_execute.call_count)
def test_stop_mysql_error(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.mySqlApp.stop_db)
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
def test_stop_mysql_key_error(self, mock_execute, mock_service):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp.stop_db)
self.assertEqual(0, mock_execute.call_count)
def test_restart_is_successful(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mysql_stops_successfully()
self.mysql_starts_successfully()
self.mySqlApp.restart()
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
def test_restart_mysql_wont_start_up(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mysql_stops_unsuccessfully()
self.mysql_starts_unsuccessfully()
self.assertRaises(RuntimeError, self.mySqlApp.restart)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_wipe_ib_logfiles_error(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(ProcessExecutionError,
self.mySqlApp.wipe_ib_logfiles)
def test_start_mysql(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp._enable_mysql_on_boot = Mock()
self.mySqlApp.start_mysql()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_mysql_with_db_update(self):
dbaas.utils.execute_with_timeout = Mock()
self.mySqlApp._enable_mysql_on_boot = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.start_mysql(update_db=True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
def test_start_mysql_runs_forever(self):
dbaas.utils.execute_with_timeout = Mock()
self.mySqlApp._enable_mysql_on_boot = Mock()
self.mySqlApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.mySqlApp.start_mysql)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_start_mysql_error(self):
self.mySqlApp._enable_mysql_on_boot = Mock()
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(RuntimeError, self.mySqlApp.start_mysql)
def test_start_db_with_conf_changes(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.mysql_starts_successfully()
self.appStatus.status = rd_instance.ServiceStatuses.SHUTDOWN
self.mySqlApp.start_db_with_conf_changes(Mock())
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assertEqual(rd_instance.ServiceStatuses.RUNNING,
self.appStatus._get_actual_db_status())
def test_start_db_with_conf_changes_mysql_is_running(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertRaises(RuntimeError,
self.mySqlApp.start_db_with_conf_changes,
Mock())
def test_remove_overrides(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(ProcessExecutionError, self.mySqlApp.start_mysql)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'remove')
@patch.object(dbaas, 'get_auth_password', return_value='some_password')
@patch.object(dbaas.MySqlApp, '_write_config_overrides')
def test_reset_configuration(self, mock_write_overrides,
mock_get_auth_password, mock_remove,
mock_move):
configuration = {'config_contents': 'some junk'}
self.mySqlApp.reset_configuration(configuration=configuration)
self.assertEqual(1, mock_get_auth_password.call_count)
self.assertEqual(2, mock_move.call_count)
self.assertEqual(2, mock_remove.call_count)
self.assertEqual(0, mock_write_overrides.call_count)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'remove')
@patch.object(dbaas.MySqlApp, '_write_config_overrides')
def test__write_mycnf(self, mock_write_overrides, mock_remove, mock_move):
self.mySqlApp._write_mycnf('some_password', 'some junk', 'something')
self.assertEqual(2, mock_move.call_count)
self.assertEqual(2, mock_remove.call_count)
self.assertEqual(1, mock_write_overrides.call_count)
def test_mysql_error_in_write_config_verify_unlink(self):
configuration = {'config_contents': 'some junk'}
dbaas.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('something')))
self.assertRaises(ProcessExecutionError,
self.mySqlApp.reset_configuration,
configuration=configuration)
self.assertEqual(1, dbaas.utils.execute_with_timeout.call_count)
self.assertEqual(1, os.unlink.call_count)
self.assertEqual(1, dbaas.get_auth_password.call_count)
def test_mysql_error_in_write_config(self):
configuration = {'config_contents': 'some junk'}
dbaas.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('something')))
self.assertRaises(ProcessExecutionError,
self.mySqlApp.reset_configuration,
configuration=configuration)
self.assertEqual(1, dbaas.utils.execute_with_timeout.call_count)
self.assertEqual(1, dbaas.get_auth_password.call_count)
@patch.object(utils, 'execute_with_timeout')
def test__enable_mysql_on_boot(self, mock_execute):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.mySqlApp._enable_mysql_on_boot()
self.assertEqual(1, mock_execute.call_count)
mock_execute.assert_called_with(mysql_service['cmd_enable'],
shell=True)
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
def test_fail__enable_mysql_on_boot(self, mock_execute, mock_service):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp._enable_mysql_on_boot)
self.assertEqual(0, mock_execute.call_count)
@patch.object(utils, 'execute_with_timeout')
def test__disable_mysql_on_boot(self, mock_execute):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.mySqlApp._disable_mysql_on_boot()
self.assertEqual(1, mock_execute.call_count)
mock_execute.assert_called_with(mysql_service['cmd_disable'],
shell=True)
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
def test_fail__disable_mysql_on_boot(self, mock_execute, mock_service):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp._disable_mysql_on_boot)
self.assertEqual(0, mock_execute.call_count)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'chmod')
@patch.object(utils, 'execute_with_timeout')
def test_update_overrides(self, mock_execute, mock_chmod, mock_move):
override_value = 'something'
self.mySqlApp.update_overrides(override_value)
with open(dbaas.MYCNF_OVERRIDES_TMP, 'r') as test_file:
test_data = test_file.read()
self.assertEqual(override_value, test_data)
mock_chmod.assert_called_with(dbaas.MYCNF_OVERRIDES,
dbaas.FileMode.SET_GRP_RW_OTH_R,
as_root=True)
mock_move.assert_called_with(dbaas.MYCNF_OVERRIDES_TMP,
dbaas.MYCNF_OVERRIDES, as_root=True)
# Remove the residual file
os.remove(dbaas.MYCNF_OVERRIDES_TMP)
@patch.object(os.path, 'exists', return_value=True)
@patch.object(operating_system, 'remove')
def test_remove_override(self, mock_remove, mock_exists):
self.mySqlApp.remove_overrides()
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(1, mock_exists.call_count)
mock_remove.assert_called_once_with(ANY, as_root=True)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'chmod')
def test_write_replication_source_overrides(self, mock_chmod, mock_move):
self.mySqlApp.write_replication_source_overrides('something')
self.assertEqual(1, mock_move.call_count)
self.assertEqual(1, mock_chmod.call_count)
@patch.object(dbaas.MySqlApp, '_write_replication_overrides')
def test_write_replication_replica_overrides(self, mock_write_overrides):
self.mySqlApp.write_replication_replica_overrides('something')
self.assertEqual(1, mock_write_overrides.call_count)
@patch.object(os.path, 'exists', return_value=True)
@patch.object(operating_system, 'remove')
def test_remove_replication_source_overrides(self, mock_remove, mock_exists
):
self.mySqlApp.remove_replication_source_overrides()
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(1, mock_exists.call_count)
@patch.object(dbaas.MySqlApp, '_remove_replication_overrides')
def test_remove_replication_replica_overrides(self, mock_remove_overrides):
self.mySqlApp.remove_replication_replica_overrides()
self.assertEqual(1, mock_remove_overrides.call_count)
@patch.object(os.path, 'exists', return_value=True)
def test_exists_replication_source_overrides(self, mock_exists):
self.assertTrue(self.mySqlApp.exists_replication_source_overrides())
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_grant_replication_privilege(self, *args):
replication_user = {'name': 'testUSr', 'password': 'somePwd'}
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.grant_replication_privilege(replication_user)
args, _ = self.mock_execute.call_args_list[0]
expected = ("GRANT REPLICATION SLAVE ON *.* TO `testUSr`@`%` "
"IDENTIFIED BY 'somePwd';")
self.assertEqual(expected, args[0].text,
"Replication grant statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_port(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.get_port()
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT @@port")
self.assertEqual(expected, args[0],
"Port queries are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_binlog_position(self, *args):
result = {'File': 'mysql-bin.003', 'Position': '73'}
self.mock_execute.return_value.first = Mock(return_value=result)
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
found_result = self.mySqlApp.get_binlog_position()
self.assertEqual(result['File'], found_result['log_file'])
self.assertEqual(result['Position'], found_result['position'])
args, _ = self.mock_execute.call_args_list[0]
expected = ("SHOW MASTER STATUS")
self.assertEqual(expected, args[0],
"Master status queries are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_execute_on_client(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.execute_on_client('show tables')
args, _ = self.mock_execute.call_args_list[0]
expected = ("show tables")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(dbaas.MySqlApp, '_wait_for_slave_status')
def test_start_slave(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.start_slave()
args, _ = self.mock_execute.call_args_list[0]
expected = ("START SLAVE")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(dbaas.MySqlApp, '_wait_for_slave_status')
def test_stop_slave_with_failover(self, *args):
self.mock_execute.return_value.first = Mock(
return_value={'Master_User': 'root'})
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.stop_slave(True)
self.assertEqual('root', result['replication_user'])
expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL"]
self.assertEqual(len(expected), len(self.mock_execute.call_args_list))
for i in range(len(self.mock_execute.call_args_list)):
args, _ = self.mock_execute.call_args_list[i]
self.assertEqual(expected[i], args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(dbaas.MySqlApp, '_wait_for_slave_status')
def test_stop_slave_without_failover(self, *args):
self.mock_execute.return_value.first = Mock(
return_value={'Master_User': 'root'})
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.stop_slave(False)
self.assertEqual('root', result['replication_user'])
expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL",
"DROP USER root"]
self.assertEqual(len(expected), len(self.mock_execute.call_args_list))
for i in range(len(self.mock_execute.call_args_list)):
args, _ = self.mock_execute.call_args_list[i]
self.assertEqual(expected[i], args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_stop_master(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.stop_master()
args, _ = self.mock_execute.call_args_list[0]
expected = ("RESET MASTER")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test__wait_for_slave_status(self, *args):
mock_client = Mock()
mock_client.execute = Mock()
result = ['Slave_running', 'on']
mock_client.execute.return_value.first = Mock(return_value=result)
self.mySqlApp._wait_for_slave_status('ON', mock_client, 5)
args, _ = mock_client.execute.call_args_list[0]
expected = ("SHOW GLOBAL STATUS like 'slave_running'")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(utils, 'poll_until', side_effect=PollTimeOut)
def test_fail__wait_for_slave_status(self, *args):
self.assertRaisesRegexp(RuntimeError,
"Replication is not on after 5 seconds.",
self.mySqlApp._wait_for_slave_status, 'ON',
Mock(), 5)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test__get_slave_status(self, *args):
self.mock_execute.return_value.first = Mock(return_value='some_thing')
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp._get_slave_status()
self.assertEqual('some_thing', result)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SHOW SLAVE STATUS")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_latest_txn_id(self, *args):
self.mock_execute.return_value.first = Mock(return_value=['some_thing']
)
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.get_latest_txn_id()
self.assertEqual('some_thing', result)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT @@global.gtid_executed")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_wait_for_txn(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.wait_for_txn('abcd')
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('abcd')")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_txn_count(self, *args):
self.mock_execute.return_value.first = Mock(
return_value=['b1f3f33a-0789-ee1c-43f3-f8373e12f1ea:1'])
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.get_txn_count()
self.assertEqual(1, result)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT @@global.gtid_executed")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
class MySqlAppInstallTest(MySqlAppTest):
def setUp(self):
super(MySqlAppInstallTest, self).setUp()
self.orig_create_engine = sqlalchemy.create_engine
self.orig_pkg_version = dbaas.packager.pkg_version
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
self.mock_client = Mock()
self.mock_execute = Mock()
self.mock_client.__enter__ = Mock()
self.mock_client.__exit__ = Mock()
self.mock_client.__enter__.return_value.execute = self.mock_execute
def tearDown(self):
super(MySqlAppInstallTest, self).tearDown()
sqlalchemy.create_engine = self.orig_create_engine
dbaas.packager.pkg_version = self.orig_pkg_version
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_install(self):
self.mySqlApp._install_mysql = Mock()
pkg.Package.pkg_is_installed = Mock(return_value=False)
utils.execute_with_timeout = Mock()
pkg.Package.pkg_install = Mock()
self.mySqlApp._clear_mysql_config = Mock()
self.mySqlApp._create_mysql_confd_dir = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.install_if_needed(["package"])
self.assertTrue(pkg.Package.pkg_install.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_secure(self):
dbaas.clear_expired_password = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mySqlApp._write_mycnf = Mock()
self.mysql_stops_successfully()
self.mysql_starts_successfully()
sqlalchemy.create_engine = Mock()
self.mySqlApp.secure('contents', None)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(utils, 'generate_random_password',
return_value='some_password')
def test_secure_root(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.secure_root()
update_root_password, _ = self.mock_execute.call_args_list[0]
update_expected = ("UPDATE mysql.user SET Password="
"PASSWORD('some_password') "
"WHERE User = 'root' AND Host = 'localhost';")
remove_root, _ = self.mock_execute.call_args_list[1]
remove_expected = ("DELETE FROM mysql.user WHERE "
"User = 'root' AND Host != 'localhost';")
self.assertEqual(update_expected, update_root_password[0].text,
"Update root password queries are not the same")
self.assertEqual(remove_expected, remove_root[0].text,
"Remove root queries are not the same")
@patch.object(operating_system, 'create_directory')
def test__create_mysql_confd_dir(self, mkdir_mock):
self.mySqlApp._create_mysql_confd_dir()
mkdir_mock.assert_called_once_with('/etc/mysql/conf.d', as_root=True)
@patch.object(operating_system, 'move')
def test__clear_mysql_config(self, mock_move):
self.mySqlApp._clear_mysql_config()
self.assertEqual(3, mock_move.call_count)
@patch.object(operating_system, 'move', side_effect=ProcessExecutionError)
def test_exception__clear_mysql_config(self, mock_move):
self.mySqlApp._clear_mysql_config()
# call-count needs to be same as normal,
# because exception is eaten to make the flow goto next file-move.
self.assertEqual(3, mock_move.call_count)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_apply_overrides(self, *args):
overrides = {'sort_buffer_size': 1000000}
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.apply_overrides(overrides)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SET GLOBAL sort_buffer_size=1000000")
self.assertEqual(expected, args[0].text,
"Set global statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_make_read_only(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.make_read_only('ON')
args, _ = self.mock_execute.call_args_list[0]
expected = ("set global read_only = ON")
self.assertEqual(expected, args[0].text,
"Set read_only statements are not the same")
def test_install_install_error(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
pkg.Package.pkg_is_installed = Mock(return_value=False)
self.mySqlApp._clear_mysql_config = Mock()
self.mySqlApp._create_mysql_confd_dir = Mock()
pkg.Package.pkg_install = \
Mock(side_effect=pkg.PkgPackageStateError("Install error"))
self.assertRaises(pkg.PkgPackageStateError,
self.mySqlApp.install_if_needed, ["package"])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_secure_write_conf_error(self):
dbaas.clear_expired_password = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mySqlApp._write_mycnf = Mock(
side_effect=IOError("Could not write file"))
self.mysql_stops_successfully()
self.mysql_starts_successfully()
sqlalchemy.create_engine = Mock()
self.assertRaises(IOError, self.mySqlApp.secure, "foo", None)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class TextClauseMatcher(object):
def __init__(self, text):
self.text = text
def __repr__(self):
return "TextClause(%s)" % self.text
def __eq__(self, arg):
print("Matching %s" % arg.text)
return self.text in arg.text
def mock_sql_connection():
utils.execute_with_timeout = MagicMock(return_value=['fake_password',
None])
mock_engine = MagicMock()
sqlalchemy.create_engine = MagicMock(return_value=mock_engine)
mock_conn = MagicMock()
dbaas.LocalSqlClient.__enter__ = MagicMock(return_value=mock_conn)
dbaas.LocalSqlClient.__exit__ = MagicMock(return_value=None)
return mock_conn
class MySqlAppMockTest(testtools.TestCase):
def setUp(self):
super(MySqlAppMockTest, self).setUp()
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
def tearDown(self):
super(MySqlAppMockTest, self).tearDown()
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_secure_keep_root(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
utils.execute_with_timeout = MagicMock(return_value=None)
# skip writing the file for now
with patch.object(os.path, 'isfile', return_value=False):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
dbaas.clear_expired_password = MagicMock(return_value=None)
app = MySqlApp(mock_status)
app._write_mycnf = MagicMock(return_value=True)
app.start_mysql = MagicMock(return_value=None)
app.stop_db = MagicMock(return_value=None)
app.secure('foo', None)
self.assertTrue(mock_conn.execute.called)
def test_secure_with_mycnf_error(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_stop': 'service mysql stop'}):
utils.execute_with_timeout = MagicMock(return_value=None)
# skip writing the file for now
with patch.object(os.path, 'isfile', return_value=False):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
dbaas.clear_expired_password = MagicMock(return_value=None)
app = MySqlApp(mock_status)
dbaas.clear_expired_password = MagicMock(return_value=None)
self.assertRaises(TypeError, app.secure, None, None)
self.assertTrue(mock_conn.execute.called)
# At least called twice
self.assertTrue(mock_conn.execute.call_count >= 2)
(mock_status.wait_for_real_status_to_change_to.
assert_called_with(rd_instance.ServiceStatuses.SHUTDOWN,
app.state_change_wait_time, False))
class MySqlRootStatusTest(testtools.TestCase):
def setUp(self):
super(MySqlRootStatusTest, self).setUp()
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
def tearDown(self):
super(MySqlRootStatusTest, self).tearDown()
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_root_is_enabled(self):
mock_conn = mock_sql_connection()
mock_rs = MagicMock()
mock_rs.rowcount = 1
with patch.object(mock_conn, 'execute', return_value=mock_rs):
self.assertThat(MySqlRootAccess().is_root_enabled(), Is(True))
def test_root_is_not_enabled(self):
mock_conn = mock_sql_connection()
mock_rs = MagicMock()
mock_rs.rowcount = 0
with patch.object(mock_conn, 'execute', return_value=mock_rs):
self.assertThat(MySqlRootAccess.is_root_enabled(), Equals(False))
def test_enable_root(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
# invocation
user_ser = MySqlRootAccess.enable_root()
# verification
self.assertThat(user_ser, Not(Is(None)))
mock_conn.execute.assert_any_call(TextClauseMatcher('CREATE USER'),
user='root', host='%')
mock_conn.execute.assert_any_call(TextClauseMatcher(
'GRANT ALL PRIVILEGES ON *.*'))
mock_conn.execute.assert_any_call(TextClauseMatcher(
'UPDATE mysql.user'))
def test_enable_root_failed(self):
with patch.object(models.MySQLUser, '_is_valid_user_name',
return_value=False):
self.assertRaises(ValueError, MySqlAdmin().enable_root)
class MockStats:
f_blocks = 1024 ** 2
f_bsize = 4096
f_bfree = 512 * 1024
class InterrogatorTest(testtools.TestCase):
def tearDown(self):
super(InterrogatorTest, self).tearDown()
def test_to_gb(self):
result = to_gb(123456789)
self.assertEqual(0.11, result)
def test_to_gb_zero(self):
result = to_gb(0)
self.assertEqual(0.0, result)
def test_get_filesystem_volume_stats(self):
with patch.object(os, 'statvfs', return_value=MockStats):
result = get_filesystem_volume_stats('/some/path/')
self.assertEqual(4096, result['block_size'])
self.assertEqual(1048576, result['total_blocks'])
self.assertEqual(524288, result['free_blocks'])
self.assertEqual(4.0, result['total'])
self.assertEqual(2147483648, result['free'])
self.assertEqual(2.0, result['used'])
def test_get_filesystem_volume_stats_error(self):
with patch.object(os, 'statvfs', side_effect=OSError):
self.assertRaises(
RuntimeError,
get_filesystem_volume_stats, '/nonexistent/path')
class ServiceRegistryTest(testtools.TestCase):
def setUp(self):
super(ServiceRegistryTest, self).setUp()
def tearDown(self):
super(ServiceRegistryTest, self).tearDown()
def test_datastore_registry_with_extra_manager(self):
datastore_registry_ext_test = {
'test': 'trove.guestagent.datastore.test.manager.Manager',
}
dbaas_sr.get_custom_managers = Mock(
return_value=datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual(datastore_registry_ext_test.get('test', None),
test_dict.get('test'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('mysql'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('percona'))
self.assertEqual('trove.guestagent.datastore.experimental.redis.'
'manager.Manager',
test_dict.get('redis'))
self.assertEqual('trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager',
test_dict.get('cassandra'))
self.assertEqual('trove.guestagent.datastore.experimental.'
'couchbase.manager.Manager',
test_dict.get('couchbase'))
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual('trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager',
test_dict.get('couchdb'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
def test_datastore_registry_with_existing_manager(self):
datastore_registry_ext_test = {
'mysql': 'trove.guestagent.datastore.mysql.'
'manager.Manager123',
}
dbaas_sr.get_custom_managers = Mock(
return_value=datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager123',
test_dict.get('mysql'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('percona'))
self.assertEqual('trove.guestagent.datastore.experimental.redis.'
'manager.Manager',
test_dict.get('redis'))
self.assertEqual('trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager',
test_dict.get('cassandra'))
self.assertEqual('trove.guestagent.datastore.experimental.couchbase.'
'manager.Manager',
test_dict.get('couchbase'))
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual('trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager',
test_dict.get('couchdb'))
self.assertEqual('trove.guestagent.datastore.experimental.vertica.'
'manager.Manager',
test_dict.get('vertica'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
def test_datastore_registry_with_blank_dict(self):
datastore_registry_ext_test = dict()
dbaas_sr.get_custom_managers = Mock(
return_value=datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('mysql'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('percona'))
self.assertEqual('trove.guestagent.datastore.experimental.redis.'
'manager.Manager',
test_dict.get('redis'))
self.assertEqual('trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager',
test_dict.get('cassandra'))
self.assertEqual('trove.guestagent.datastore.experimental.couchbase.'
'manager.Manager',
test_dict.get('couchbase'))
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual('trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager',
test_dict.get('couchdb'))
self.assertEqual('trove.guestagent.datastore.experimental.vertica.'
'manager.Manager',
test_dict.get('vertica'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
class KeepAliveConnectionTest(testtools.TestCase):
class OperationalError(Exception):
def __init__(self, value):
self.args = [value]
def __str__(self):
return repr(self.value)
def setUp(self):
super(KeepAliveConnectionTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_LOG_err = dbaas.LOG
def tearDown(self):
super(KeepAliveConnectionTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.LOG = self.orig_LOG_err
def test_checkout_type_error(self):
dbapi_con = Mock()
dbapi_con.ping = Mock(side_effect=TypeError("Type Error"))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(TypeError, self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
def test_checkout_disconnection_error(self):
dbapi_con = Mock()
dbapi_con.OperationalError = self.OperationalError
dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(2013))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(sqlalchemy.exc.DisconnectionError,
self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
def test_checkout_operation_error(self):
dbapi_con = Mock()
dbapi_con.OperationalError = self.OperationalError
dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(1234))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(self.OperationalError, self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
class BaseDbStatusTest(testtools.TestCase):
def setUp(self):
super(BaseDbStatusTest, self).setUp()
util.init_db()
self.orig_dbaas_time_sleep = time.sleep
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(BaseDbStatusTest, self).tearDown()
time.sleep = self.orig_dbaas_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def test_begin_install(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.begin_install()
self.assertEqual(rd_instance.ServiceStatuses.BUILDING,
self.baseDbStatus.status)
def test_begin_restart(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.restart_mode = False
self.baseDbStatus.begin_restart()
self.assertTrue(self.baseDbStatus.restart_mode)
def test_end_install_or_restart(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.SHUTDOWN)
self.baseDbStatus.end_install_or_restart()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN,
self.baseDbStatus.status)
self.assertFalse(self.baseDbStatus.restart_mode)
def test_is_installed(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertTrue(self.baseDbStatus.is_installed)
def test_is_installed_none(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = None
self.assertTrue(self.baseDbStatus.is_installed)
def test_is_installed_building(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.BUILDING
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_installed_new(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.NEW
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_installed_failed(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.FAILED
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_restarting(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.restart_mode = True
self.assertTrue(self.baseDbStatus._is_restarting)
def test_is_running(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertTrue(self.baseDbStatus.is_running)
def test_is_running_not(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.SHUTDOWN
self.assertFalse(self.baseDbStatus.is_running)
def test_wait_for_real_status_to_change_to(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.RUNNING)
time.sleep = Mock()
self.assertTrue(self.baseDbStatus.
wait_for_real_status_to_change_to
(rd_instance.ServiceStatuses.RUNNING, 10))
def test_wait_for_real_status_to_change_to_timeout(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.RUNNING)
time.sleep = Mock()
self.assertFalse(self.baseDbStatus.
wait_for_real_status_to_change_to
(rd_instance.ServiceStatuses.SHUTDOWN, 10))
class MySqlAppStatusTest(testtools.TestCase):
def setUp(self):
super(MySqlAppStatusTest, self).setUp()
util.init_db()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_load_mysqld_options = dbaas.load_mysqld_options
self.orig_dbaas_os_path_exists = dbaas.os.path.exists
self.orig_dbaas_time_sleep = time.sleep
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(MySqlAppStatusTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.load_mysqld_options = self.orig_load_mysqld_options
dbaas.os.path.exists = self.orig_dbaas_os_path_exists
time.sleep = self.orig_dbaas_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def test_get_actual_db_status(self):
dbaas.utils.execute_with_timeout = Mock(return_value=(None, None))
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status)
@patch.object(utils, 'execute_with_timeout',
side_effect=ProcessExecutionError())
@patch.object(os.path, 'exists', return_value=True)
def test_get_actual_db_status_error_crashed(self, mock_exists,
mock_execute):
dbaas.load_mysqld_options = Mock(return_value={})
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status)
def test_get_actual_db_status_error_shutdown(self):
mocked = Mock(side_effect=ProcessExecutionError())
dbaas.utils.execute_with_timeout = mocked
dbaas.load_mysqld_options = Mock(return_value={})
dbaas.os.path.exists = Mock(return_value=False)
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status)
def test_get_actual_db_status_error_blocked(self):
dbaas.utils.execute_with_timeout = MagicMock(
side_effect=[ProcessExecutionError(), ("some output", None)])
dbaas.load_mysqld_options = Mock()
dbaas.os.path.exists = Mock(return_value=True)
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.BLOCKED, status)
class TestRedisApp(testtools.TestCase):
def setUp(self):
super(TestRedisApp, self).setUp()
self.FAKE_ID = 1000
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
with patch.multiple(RedisApp, _build_admin_client=DEFAULT,
_init_overrides_dir=DEFAULT):
self.app = RedisApp(state_change_wait_time=0)
self.orig_os_path_isfile = os.path.isfile
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
utils.execute_with_timeout = Mock()
rservice.utils.execute_with_timeout = Mock()
def tearDown(self):
super(TestRedisApp, self).tearDown()
self.app = None
os.path.isfile = self.orig_os_path_isfile
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
rservice.utils.execute_with_timeout = \
self.orig_utils_execute_with_timeout
def test_install_if_needed_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=True):
with patch.object(RedisApp, '_install_redis', return_value=None):
self.app.install_if_needed('bar')
pkg.Package.pkg_is_installed.assert_any_call('bar')
self.assertEqual(0, RedisApp._install_redis.call_count)
def test_install_if_needed_not_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=False):
with patch.object(RedisApp, '_install_redis', return_value=None):
self.app.install_if_needed('asdf')
pkg.Package.pkg_is_installed.assert_any_call('asdf')
RedisApp._install_redis.assert_any_call('asdf')
def test_install_redis(self):
with patch.object(utils, 'execute_with_timeout'):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
with patch.object(RedisApp, 'start_redis', return_value=None):
self.app._install_redis('redis')
pkg.Package.pkg_install.assert_any_call('redis', {}, 1200)
RedisApp.start_redis.assert_any_call()
self.assertTrue(utils.execute_with_timeout.called)
def test_enable_redis_on_boot_without_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_enable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._enable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_enable_redis_on_boot_with_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_enable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._enable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_disable_redis_on_boot_with_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_disable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._disable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_disable_redis_on_boot_without_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_disable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._disable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_stop_db_without_fail(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self.app.status = mock_status
RedisApp._disable_redis_on_boot = MagicMock(
return_value=None)
with patch.object(operating_system, 'stop_service') as stop_srv_mock:
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self.app.stop_db(do_not_start_on_reboot=True)
stop_srv_mock.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
self.assertTrue(RedisApp._disable_redis_on_boot.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
def test_stop_db_with_failure(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self.app.status = mock_status
RedisApp._disable_redis_on_boot = MagicMock(
return_value=None)
with patch.object(operating_system, 'stop_service') as stop_srv_mock:
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=False)
self.app.stop_db(do_not_start_on_reboot=True)
stop_srv_mock.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
self.assertTrue(RedisApp._disable_redis_on_boot.called)
self.assertTrue(mock_status.end_install_or_restart.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
def test_restart(self):
mock_status = MagicMock()
self.app.status = mock_status
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(RedisApp, 'stop_db', return_value=None):
with patch.object(RedisApp, 'start_redis', return_value=None):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
self.app.restart()
mock_status.begin_restart.assert_any_call()
RedisApp.stop_db.assert_any_call()
RedisApp.start_redis.assert_any_call()
mock_status.end_install_or_restart.assert_any_call()
def test_start_redis(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self._assert_start_redis(mock_status)
@patch.object(utils, 'execute_with_timeout')
def test_start_redis_with_failure(self, exec_mock):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=False)
mock_status.end_install_or_restart = MagicMock()
self._assert_start_redis(mock_status)
exec_mock.assert_called_once_with('pkill', '-9', 'redis-server',
run_as_root=True, root_helper='sudo')
mock_status.end_install_or_restart.assert_called_once_with()
@patch.multiple(operating_system, start_service=DEFAULT,
enable_service_on_boot=DEFAULT)
def _assert_start_redis(self, mock_status, start_service,
enable_service_on_boot):
self.app.status = mock_status
self.app.start_redis()
mock_status.wait_for_real_status_to_change_to.assert_called_once_with(
rd_instance.ServiceStatuses.RUNNING, ANY, False)
enable_service_on_boot.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
start_service.assert_called_once_with(RedisSystem.SERVICE_CANDIDATES)
class CassandraDBAppTest(testtools.TestCase):
def setUp(self):
super(CassandraDBAppTest, self).setUp()
self.utils_execute_with_timeout = (
cass_service.utils.execute_with_timeout)
self.sleep = time.sleep
self.pkg_version = cass_service.packager.pkg_version
self.pkg = cass_service.packager
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.cassandra = cass_service.CassandraApp(self.appStatus)
self.orig_unlink = os.unlink
def tearDown(self):
super(CassandraDBAppTest, self).tearDown()
cass_service.utils.execute_with_timeout = (self.
utils_execute_with_timeout)
time.sleep = self.sleep
cass_service.packager.pkg_version = self.pkg_version
cass_service.packager = self.pkg
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.cassandra.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_with_db_update(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.cassandra.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_stop_db_error(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.cassandra.stop_db)
def test_restart(self):
self.cassandra.stop_db = Mock()
self.cassandra.start_db = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.restart()
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra_runs_forever(self):
cass_service.utils.execute_with_timeout = Mock()
(self.cassandra.status.
wait_for_real_status_to_change_to) = Mock(return_value=False)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.cassandra.stop_db)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_start_db_with_db_update(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.RUNNING)
self.cassandra.start_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra_error(self):
self.cassandra._enable_db_on_boot = Mock()
self.cassandra.state_change_wait_time = 1
cass_service.utils.execute_with_timeout = Mock(
side_effect=ProcessExecutionError('Error'))
self.assertRaises(RuntimeError, self.cassandra.start_db)
def test_install(self):
self.cassandra._install_db = Mock()
self.pkg.pkg_is_installed = Mock(return_value=False)
self.cassandra.install_if_needed(['cassandra'])
self.assertTrue(self.cassandra._install_db.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_install_install_error(self):
self.cassandra.start_db = Mock()
self.cassandra.stop_db = Mock()
self.pkg.pkg_is_installed = Mock(return_value=False)
self.cassandra._install_db = Mock(
side_effect=pkg.PkgPackageStateError("Install error"))
self.assertRaises(pkg.PkgPackageStateError,
self.cassandra.install_if_needed,
['cassandra=1.2.10'])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_cassandra_error_in_write_config_verify_unlink(self):
# this test verifies not only that the write_config
# method properly invoked execute, but also that it properly
# attempted to unlink the file (as a result of the exception)
mock_unlink = Mock(return_value=0)
# We call tempfile.mkstemp() here and Mock() the mkstemp()
# parameter to write_config for testability.
(temp_handle, temp_config_name) = tempfile.mkstemp()
mock_mkstemp = MagicMock(return_value=(temp_handle, temp_config_name))
configuration = 'this is my configuration'
with patch('trove.guestagent.common.operating_system.move',
side_effect=ProcessExecutionError('some exception')):
self.assertRaises(ProcessExecutionError,
self.cassandra.write_config,
config_contents=configuration,
execute_function=Mock(),
mkstemp_function=mock_mkstemp,
unlink_function=mock_unlink)
self.assertEqual(1, mock_unlink.call_count)
# really delete the temporary_config_file
os.unlink(temp_config_name)
@patch.multiple('trove.guestagent.common.operating_system',
chown=DEFAULT, chmod=DEFAULT, move=DEFAULT)
def test_cassandra_write_config(self, chown, chmod, move):
# ensure that write_config creates a temporary file, and then
# moves the file to the final place. Also validate the
# contents of the file written.
# We call tempfile.mkstemp() here and Mock() the mkstemp()
# parameter to write_config for testability.
(temp_handle, temp_config_name) = tempfile.mkstemp()
mock_mkstemp = MagicMock(return_value=(temp_handle, temp_config_name))
configuration = 'some arbitrary configuration text'
mock_execute = MagicMock(return_value=('', ''))
self.cassandra.write_config(configuration,
execute_function=mock_execute,
mkstemp_function=mock_mkstemp)
move.assert_called_with(temp_config_name, cass_system.CASSANDRA_CONF,
as_root=True)
chown.assert_called_with(cass_system.CASSANDRA_CONF,
"cassandra", "cassandra", recursive=False,
as_root=True)
chmod.assert_called_with(
cass_system.CASSANDRA_CONF, FileMode.ADD_READ_ALL, as_root=True)
self.assertEqual(1, mock_mkstemp.call_count)
with open(temp_config_name, 'r') as config_file:
configuration_data = config_file.read()
self.assertEqual(configuration, configuration_data)
# really delete the temporary_config_file
os.unlink(temp_config_name)
class CouchbaseAppTest(testtools.TestCase):
def fake_couchbase_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(CouchbaseAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
couchservice.utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
time.sleep = Mock()
self.orig_service_discovery = operating_system.service_discovery
self.orig_get_ip = netutils.get_my_ipv4
operating_system.service_discovery = (
self.fake_couchbase_service_discovery)
netutils.get_my_ipv4 = Mock()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.couchbaseApp = couchservice.CouchbaseApp(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(CouchbaseAppTest, self).tearDown()
couchservice.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
netutils.get_my_ipv4 = self.orig_get_ip
operating_system.service_discovery = self.orig_service_discovery
time.sleep = self.orig_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.couchbaseApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_error(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.couchbaseApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp.stop_db = Mock()
self.couchbaseApp.start_db = Mock()
self.couchbaseApp.restart()
self.assertTrue(self.couchbaseApp.stop_db.called)
self.assertTrue(self.couchbaseApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp._enable_db_on_boot = Mock()
self.couchbaseApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_error(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
couchservice.utils.execute_with_timeout = mocked
self.couchbaseApp._enable_db_on_boot = Mock()
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
def test_start_db_runs_forever(self):
couchservice.utils.execute_with_timeout = Mock()
self.couchbaseApp._enable_db_on_boot = Mock()
self.couchbaseApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_install_when_couchbase_installed(self):
couchservice.packager.pkg_is_installed = Mock(return_value=True)
couchservice.utils.execute_with_timeout = Mock()
self.couchbaseApp.install_if_needed(["package"])
self.assertTrue(couchservice.packager.pkg_is_installed.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class CouchDBAppTest(testtools.TestCase):
def fake_couchdb_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(CouchDBAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
couchdb_service.utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
time.sleep = Mock()
self.orig_service_discovery = operating_system.service_discovery
self.orig_get_ip = netutils.get_my_ipv4
operating_system.service_discovery = (
self.fake_couchdb_service_discovery)
netutils.get_my_ipv4 = Mock()
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.couchdbApp = couchdb_service.CouchDBApp(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(CouchDBAppTest, self).tearDown()
couchdb_service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
netutils.get_my_ipv4 = self.orig_get_ip
operating_system.service_discovery = self.orig_service_discovery
time.sleep = self.orig_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.couchdbApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_error(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.couchdbApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp.stop_db = Mock()
self.couchdbApp.start_db = Mock()
self.couchdbApp.restart()
self.assertTrue(self.couchdbApp.stop_db.called)
self.assertTrue(self.couchdbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp._enable_db_on_boot = Mock()
self.couchdbApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_error(self):
couchdb_service.utils.execute_with_timeout = Mock(
side_effect=ProcessExecutionError('Error'))
self.couchdbApp._enable_db_on_boot = Mock()
self.assertRaises(RuntimeError, self.couchdbApp.start_db)
def test_install_when_couchdb_installed(self):
couchdb_service.packager.pkg_is_installed = Mock(return_value=True)
couchdb_service.utils.execute_with_timeout = Mock()
self.couchdbApp.install_if_needed(["package"])
self.assertTrue(couchdb_service.packager.pkg_is_installed.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class MongoDBAppTest(testtools.TestCase):
def fake_mongodb_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(MongoDBAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (mongo_service.
utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
self.orig_packager = mongo_system.PACKAGER
self.orig_service_discovery = operating_system.service_discovery
self.orig_os_unlink = os.unlink
operating_system.service_discovery = (
self.fake_mongodb_service_discovery)
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.mongoDbApp = mongo_service.MongoDBApp(self.appStatus)
time.sleep = Mock()
os.unlink = Mock()
def tearDown(self):
super(MongoDBAppTest, self).tearDown()
mongo_service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
time.sleep = self.orig_time_sleep
mongo_system.PACKAGER = self.orig_packager
operating_system.service_discovery = self.orig_service_discovery
os.unlink = self.orig_os_unlink
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stopdb(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mongoDbApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_with_db_update(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mongoDbApp.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
def test_stop_db_error(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.mongoDbApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.stop_db = Mock()
self.mongoDbApp.start_db = Mock()
self.mongoDbApp.restart()
self.assertTrue(self.mongoDbApp.stop_db.called)
self.assertTrue(self.mongoDbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'running'}))
def test_start_db(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_with_update(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.start_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'running'}))
def test_start_db_runs_forever(self):
mongo_service.utils.execute_with_timeout = Mock(
return_value=["ubuntu 17036 0.0 0.1 618960 "
"29232 pts/8 Sl+ Jan29 0:07 mongod", ""])
self.mongoDbApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
def test_start_db_error(self):
self.mongoDbApp._enable_db_on_boot = Mock()
mocked = Mock(side_effect=ProcessExecutionError('Error'))
mongo_service.utils.execute_with_timeout = mocked
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
def test_mongodb_error_in_write_config_verify_unlink(self):
configuration = {'config_contents': 'some junk'}
with patch.object(os.path, 'isfile', return_value=True):
with patch.object(operating_system, 'move',
side_effect=ProcessExecutionError):
self.assertRaises(ProcessExecutionError,
self.mongoDbApp.reset_configuration,
configuration=configuration)
self.assertEqual(1, operating_system.move.call_count)
self.assertEqual(1, os.unlink.call_count)
def test_start_db_with_conf_changes_db_is_running(self):
self.mongoDbApp.start_db = Mock()
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertRaises(RuntimeError,
self.mongoDbApp.start_db_with_conf_changes,
Mock())
def test_install_when_db_installed(self):
packager_mock = MagicMock()
packager_mock.pkg_is_installed = MagicMock(return_value=True)
mongo_system.PACKAGER = packager_mock
self.mongoDbApp.install_if_needed(['package'])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_install_when_db_not_installed(self):
packager_mock = MagicMock()
packager_mock.pkg_is_installed = MagicMock(return_value=False)
mongo_system.PACKAGER = packager_mock
self.mongoDbApp.install_if_needed(['package'])
packager_mock.pkg_install.assert_any_call(ANY, {}, ANY)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class VerticaAppStatusTest(testtools.TestCase):
def setUp(self):
super(VerticaAppStatusTest, self).setUp()
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
def tearDown(self):
super(VerticaAppStatusTest, self).tearDown()
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def test_get_actual_db_status(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(return_value=['db_srvr', None])):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status)
def test_get_actual_db_status_shutdown(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', None],
['db_srvr', None]])):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status)
def test_get_actual_db_status_error_crashed(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=ProcessExecutionError('problem'
))):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status)
class VerticaAppTest(testtools.TestCase):
def setUp(self):
super(VerticaAppTest, self).setUp()
self.FAKE_ID = 1000
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.app = VerticaApp(self.appStatus)
self.setread = VolumeDevice.set_readahead_size
self.Popen = subprocess.Popen
vertica_system.shell_execute = MagicMock(return_value=('', ''))
VolumeDevice.set_readahead_size = Mock()
subprocess.Popen = Mock()
self.test_config = ConfigParser.ConfigParser()
self.test_config.add_section('credentials')
self.test_config.set('credentials',
'dbadmin_password', 'some_password')
def tearDown(self):
super(VerticaAppTest, self).tearDown()
self.app = None
VolumeDevice.set_readahead_size = self.setread
subprocess.Popen = self.Popen
def test_install_if_needed_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=True):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
self.app.install_if_needed('vertica')
pkg.Package.pkg_is_installed.assert_any_call('vertica')
self.assertEqual(0, pkg.Package.pkg_install.call_count)
def test_install_if_needed_not_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=False):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
self.app.install_if_needed('vertica')
pkg.Package.pkg_is_installed.assert_any_call('vertica')
self.assertEqual(1, pkg.Package.pkg_install.call_count)
def test_prepare_for_install_vertica(self):
self.app.prepare_for_install_vertica()
arguments = vertica_system.shell_execute.call_args_list[0]
self.assertEqual(1, VolumeDevice.set_readahead_size.call_count)
expected_command = (
"VERT_DBA_USR=dbadmin VERT_DBA_HOME=/home/dbadmin "
"VERT_DBA_GRP=verticadba /opt/vertica/oss/python/bin/python"
" -m vertica.local_coerce")
arguments.assert_called_with(expected_command)
def test_failure_prepare_for_install_vertica(self):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaises(ProcessExecutionError,
self.app.prepare_for_install_vertica)
def test_install_vertica(self):
with patch.object(self.app, 'write_config',
return_value=None):
self.app.install_vertica(members='10.0.0.2')
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (
vertica_system.INSTALL_VERTICA % ('10.0.0.2', '/var/lib/vertica'))
arguments.assert_called_with(expected_command)
def test_failure_install_vertica(self):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('some exception')):
self.assertRaisesRegexp(RuntimeError, 'install_vertica failed.',
self.app.install_vertica,
members='10.0.0.2')
def test_create_db(self):
with patch.object(self.app, 'read_config',
return_value=self.test_config):
self.app.create_db(members='10.0.0.2')
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (vertica_system.CREATE_DB % ('10.0.0.2', 'db_srvr',
'/var/lib/vertica',
'/var/lib/vertica',
'some_password'))
arguments.assert_called_with(expected_command, 'dbadmin')
def test_failure_create_db(self):
with patch.object(self.app, 'read_config',
side_effect=RuntimeError('Error')):
self.assertRaisesRegexp(RuntimeError,
'Vertica database create failed.',
self.app.create_db)
# Because of an exception in read_config there was no shell execution.
self.assertEqual(0, vertica_system.shell_execute.call_count)
def test_vertica_write_config(self):
temp_file_handle = tempfile.NamedTemporaryFile(delete=False)
mock_mkstemp = MagicMock(return_value=(temp_file_handle))
mock_unlink = Mock(return_value=0)
self.app.write_config(config=self.test_config,
temp_function=mock_mkstemp,
unlink_function=mock_unlink)
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (
("install -o root -g root -m 644 %(source)s %(target)s"
) % {'source': temp_file_handle.name,
'target': vertica_system.VERTICA_CONF})
arguments.assert_called_with(expected_command)
self.assertEqual(1, mock_mkstemp.call_count)
configuration_data = ConfigParser.ConfigParser()
configuration_data.read(temp_file_handle.name)
self.assertEqual(
self.test_config.get('credentials', 'dbadmin_password'),
configuration_data.get('credentials', 'dbadmin_password'))
self.assertEqual(1, mock_unlink.call_count)
# delete the temporary_config_file
os.unlink(temp_file_handle.name)
def test_vertica_error_in_write_config_verify_unlink(self):
mock_unlink = Mock(return_value=0)
temp_file_handle = tempfile.NamedTemporaryFile(delete=False)
mock_mkstemp = MagicMock(return_value=temp_file_handle)
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('some exception')):
self.assertRaises(ProcessExecutionError,
self.app.write_config,
config=self.test_config,
temp_function=mock_mkstemp,
unlink_function=mock_unlink)
self.assertEqual(1, mock_unlink.call_count)
# delete the temporary_config_file
os.unlink(temp_file_handle.name)
def test_restart(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(VerticaApp, 'stop_db', return_value=None):
with patch.object(VerticaApp, 'start_db', return_value=None):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.restart()
mock_status.begin_restart.assert_any_call()
VerticaApp.stop_db.assert_any_call()
VerticaApp.start_db.assert_any_call()
def test_start_db(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_enable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.start_db()
agent_start, db_start = subprocess.Popen.call_args_list
agent_expected_command = [
'sudo', 'su', '-', 'root', '-c',
(vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'start')]
db_expected_cmd = [
'sudo', 'su', '-', 'dbadmin', '-c',
(vertica_system.START_DB % ('db_srvr', 'some_password'))]
self.assertTrue(mock_status.end_install_or_restart.called)
agent_start.assert_called_with(agent_expected_command)
db_start.assert_called_with(db_expected_cmd)
def test_start_db_failure(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
with patch.object(app, '_enable_db_on_boot',
side_effect=RuntimeError()):
with patch.object(app, 'read_config',
return_value=self.test_config):
self.assertRaises(RuntimeError, app.start_db)
def test_stop_db(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.stop_db()
self.assertEqual(
3, vertica_system.shell_execute.call_count)
# There are 3 shell-executions:
# a) stop vertica-agent service
# b) check daatabase status
# c) stop_db
# We are matcing that 3rd command called was stop_db
arguments = vertica_system.shell_execute.call_args_list[2]
expected_cmd = (vertica_system.STOP_DB % ('db_srvr',
'some_password'))
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
arguments.assert_called_with(expected_cmd, 'dbadmin')
def test_stop_db_do_not_start_on_reboot(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=True)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
app.stop_db(do_not_start_on_reboot=True)
self.assertEqual(
3, vertica_system.shell_execute.call_count)
app._disable_db_on_boot.assert_any_call()
def test_stop_db_database_not_running(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
app.stop_db()
# Since database stop command does not gets executed,
# so only 2 shell calls were there.
self.assertEqual(
2, vertica_system.shell_execute.call_count)
def test_stop_db_failure(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=None)
mock_status.end_install_or_restart = MagicMock(
return_value=None)
self.assertRaises(RuntimeError, app.stop_db)
def test_export_conf_to_members(self):
self.app._export_conf_to_members(members=['member1', 'member2'])
self.assertEqual(2, vertica_system.shell_execute.call_count)
def test_fail__export_conf_to_members(self):
app = VerticaApp(MagicMock())
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaises(ProcessExecutionError,
app._export_conf_to_members,
['member1', 'member2'])
def test_authorize_public_keys(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
self.app.authorize_public_keys(user=user, public_keys=keys)
self.assertEqual(2, vertica_system.shell_execute.call_count)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/authorized_keys')
def test_authorize_public_keys_authorized_file_not_exists(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
['', '']])):
self.app.authorize_public_keys(user=user, public_keys=keys)
self.assertEqual(2, vertica_system.shell_execute.call_count)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/authorized_keys')
def test_fail_authorize_public_keys(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
ProcessExecutionError('Some Error')
])):
self.assertRaises(ProcessExecutionError,
self.app.authorize_public_keys, user, keys)
def test_get_public_keys(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
self.app.get_public_keys(user=user)
self.assertEqual(2, vertica_system.shell_execute.call_count)
vertica_system.shell_execute.assert_any_call(
(vertica_system.SSH_KEY_GEN % ('/home/' + user)), user)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/id_rsa.pub')
def test_get_public_keys_if_key_exists(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
['some_key', None]])):
key = self.app.get_public_keys(user=user)
self.assertEqual(2, vertica_system.shell_execute.call_count)
self.assertEqual('some_key', key)
def test_fail_get_public_keys(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
ProcessExecutionError('Some Error')
])):
self.assertRaises(ProcessExecutionError,
self.app.get_public_keys, user)
def test_install_cluster(self):
with patch.object(self.app, 'read_config',
return_value=self.test_config):
self.app.install_cluster(members=['member1', 'member2'])
# Verifying nu,ber of shell calls,
# as command has already been tested in preceeding tests
self.assertEqual(5, vertica_system.shell_execute.call_count)
def test__enable_db_on_boot(self):
app = VerticaApp(MagicMock())
app._enable_db_on_boot()
restart_policy, agent_enable = subprocess.Popen.call_args_list
expected_restart_policy = [
'sudo', 'su', '-', 'dbadmin', '-c',
(vertica_system.SET_RESTART_POLICY % ('db_srvr', 'always'))]
expected_agent_enable = [
'sudo', 'su', '-', 'root', '-c',
(vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'enable')]
self.assertEqual(2, subprocess.Popen.call_count)
restart_policy.assert_called_with(expected_restart_policy)
agent_enable.assert_called_with(expected_agent_enable)
def test_failure__enable_db_on_boot(self):
with patch.object(subprocess, 'Popen', side_effect=OSError):
self.assertRaisesRegexp(RuntimeError,
'Could not enable db on boot.',
self.app._enable_db_on_boot)
def test__disable_db_on_boot(self):
app = VerticaApp(MagicMock())
app._disable_db_on_boot()
restart_policy, agent_disable = (
vertica_system.shell_execute.call_args_list)
expected_restart_policy = (
vertica_system.SET_RESTART_POLICY % ('db_srvr', 'never'))
expected_agent_disable = (
vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'disable')
self.assertEqual(2, vertica_system.shell_execute.call_count)
restart_policy.assert_called_with(expected_restart_policy, 'dbadmin')
agent_disable.assert_called_with(expected_agent_disable, 'root')
def test_failure__disable_db_on_boot(self):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaisesRegexp(RuntimeError,
'Could not disable db on boot.',
self.app._disable_db_on_boot)
def test_read_config(self):
app = VerticaApp(MagicMock())
with patch.object(ConfigParser, 'ConfigParser',
return_value=self.test_config):
test_config = app.read_config()
self.assertEqual('some_password',
test_config.get('credentials', 'dbadmin_password')
)
def test_fail_read_config(self):
with patch.object(ConfigParser.ConfigParser, 'read',
side_effect=ConfigParser.Error()):
self.assertRaises(RuntimeError, self.app.read_config)
def test_complete_install_or_restart(self):
app = VerticaApp(MagicMock())
app.complete_install_or_restart()
app.status.end_install_or_restart.assert_any_call()
def test_start_db_with_conf_changes(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, 'read_config',
return_value=self.test_config):
app.start_db_with_conf_changes('test_config_contents')
app.status.end_install_or_restart.assert_any_call()
class DB2AppTest(testtools.TestCase):
def setUp(self):
super(DB2AppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
db2service.utils.execute_with_timeout)
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.db2App = db2service.DB2App(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(DB2AppTest, self).tearDown()
db2service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
self.db2App = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
db2service.utils.execute_with_timeout = MagicMock(return_value=None)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.db2App.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_restart_server(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
mock_status = MagicMock(return_value=None)
app = db2service.DB2App(mock_status)
mock_status.begin_restart = MagicMock(return_value=None)
app.stop_db = MagicMock(return_value=None)
app.start_db = MagicMock(return_value=None)
app.restart()
self.assertTrue(mock_status.begin_restart.called)
self.assertTrue(app.stop_db.called)
self.assertTrue(app.start_db.called)
def test_start_db(self):
db2service.utils.execute_with_timeout = MagicMock(return_value=None)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
with patch.object(self.db2App, '_enable_db_on_boot',
return_value=None):
self.db2App.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class DB2AdminTest(testtools.TestCase):
def setUp(self):
super(DB2AdminTest, self).setUp()
self.db2Admin = db2service.DB2Admin()
self.orig_utils_execute_with_timeout = (
db2service.utils.execute_with_timeout)
def tearDown(self):
super(DB2AdminTest, self).tearDown()
db2service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
def test_delete_database(self):
with patch.object(
db2service, 'run_command',
MagicMock(
return_value=None,
side_effect=ProcessExecutionError('Error'))):
self.assertRaises(GuestError,
self.db2Admin.delete_database,
FAKE_DB)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 drop database testDB"
self.assertEqual(expected, args[0],
"Delete database queries are not the same")
def test_list_databases(self):
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
self.db2Admin.list_databases()
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 list database directory " \
"| grep -B6 -i indirect | grep 'Database name' | " \
"sed 's/.*= //'"
self.assertEqual(expected, args[0],
"Delete database queries are not the same")
def test_create_users(self):
with patch.object(db2service, 'run_command', MagicMock(
return_value=None)):
db2service.utils.execute_with_timeout = MagicMock(
return_value=None)
self.db2Admin.create_user(FAKE_USER)
self.assertTrue(db2service.utils.execute_with_timeout.called)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \
"ON DATABASE TO USER random; db2 connect reset"
self.assertEqual(
expected, args[0],
"Granting database access queries are not the same")
self.assertEqual(1, db2service.run_command.call_count)
def test_delete_users_with_db(self):
with patch.object(db2service, 'run_command',
MagicMock(return_value=None)):
with patch.object(db2service.DB2Admin, 'list_access',
MagicMock(return_value=None)):
utils.execute_with_timeout = MagicMock(return_value=None)
self.db2Admin.delete_user(FAKE_USER[0])
self.assertTrue(db2service.run_command.called)
self.assertTrue(db2service.utils.execute_with_timeout.called)
self.assertFalse(db2service.DB2Admin.list_access.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \
"ON DATABASE FROM USER random; db2 connect reset"
self.assertEqual(
expected, args[0],
"Revoke database access queries are not the same")
self.assertEqual(1, db2service.run_command.call_count)
def test_delete_users_without_db(self):
FAKE_USER.append(
{"_name": "random2", "_password": "guesswhat", "_databases": []})
with patch.object(db2service, 'run_command',
MagicMock(return_value=None)):
with patch.object(db2service.DB2Admin, 'list_access',
MagicMock(return_value=[FAKE_DB])):
utils.execute_with_timeout = MagicMock(return_value=None)
self.db2Admin.delete_user(FAKE_USER[1])
self.assertTrue(db2service.run_command.called)
self.assertTrue(db2service.DB2Admin.list_access.called)
self.assertTrue(
db2service.utils.execute_with_timeout.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT," \
"DATAACCESS ON DATABASE FROM USER random2; " \
"db2 connect reset"
self.assertEqual(
expected, args[0],
"Revoke database access queries are not the same")
self.assertEqual(1, db2service.run_command.call_count)
def test_list_users(self):
databases = []
databases.append(FAKE_DB)
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
with patch.object(self.db2Admin, "list_databases",
MagicMock(return_value=(databases, None))):
self.db2Admin.list_users()
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 +o connect to testDB; " \
"db2 -x select grantee, dataaccessauth " \
"from sysibm.sysdbauth; db2 connect reset"
self.assertEqual(expected, args[0],
"List database queries are not the same")
def test_get_user(self):
databases = []
databases.append(FAKE_DB)
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
with patch.object(self.db2Admin, "list_databases",
MagicMock(return_value=(databases, None))):
self.db2Admin._get_user('random', None)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 +o connect to testDB; " \
"db2 -x select grantee, dataaccessauth " \
"from sysibm.sysdbauth; db2 connect reset"
self.assertEqual(args[0], expected,
"Delete database queries are not the same")
| apache-2.0 | 275,580,507,818,071,580 | 42.310604 | 79 | 0.602201 | false | 3.921372 | true | false | false |
omegachysis/arche-engine | arche/image.py | 1 | 11087 |
_panda = False
try:
import pygame
from pygame import locals
except:
_panda = True
import logging
log = logging.getLogger("R.Surface")
def scaleImage(surface, width, height):
""" Return surface scaled to fit width and height. """
#log.debug("scaled image %s" % repr(surface))
return pygame.transform.smoothscale(surface, (width, height))
def profilerRecordImageSurfaces():
log.info("PERFORMANCE PROFILER ENGAGED: RecordImageSurfaces")
ImageSurface.debugRecordSurfaces = True
def profilerRevealPixelAlpha():
log.info("PERFORMANCE PROFILER ENGAGED: RevealPixelAlpha")
ImageSurface.debugRevealPixelAlpha = True
for surf in ImageSurface.imageSurfaces:
surf.refresh()
if not ImageSurface.debugRecordSurfaces:
log.warning("PERFORMANCE PROFILER FAILED: Not recording surfaces; "+\
"inconsistancies may occur.")
def createDefaultSurface():
surface = pygame.Surface((1,1))
surface.fill((255,255,255,255))
return surface
newDefaultSurface = createDefaultSurface
def newRectangle(width, height, color = (255,255,255)):
surface = pygame.Surface((width, height))
surface.fill(color)
return surface
class _ImageRect(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
class ImageSurfacePanda(object):
def __init__(self, surface, pixelAlpha=True):
if isinstance(surface, str):
self.surface = loader.loadTexture(surface)
def getSurface(self):
return self._surface
def setSurface(self, value):
self._surface = value
self._rect = _ImageRect(0, 0, self.width, self.height)
surface = property(getSurface, setSurface)
def getWidth(self):
return self._surface.getSimpleXSize()
def getHeight(self):
return self._surface.getSimpleYSize()
width = property(getWidth)
height = property(getHeight)
def rect(self):
try:
return self._rect
except:
return None
def refresh(self):
pass
class ImageCanvas(object):
def __init__(self, pygameSurface):
self.composite = pygameSurface.convert()
self.clip = None
def convert(self):
return self.composite.convert()
def convertAlpha(self):
return self.composite.convert_alpha()
def refresh(self):
pass
def rect(self):
return self.composite.get_rect()
def get(self):
return self.composite
class ImageSurface(object):
imageSurfaces = []
debugRecordSurfaces = False
debugRevealPixelAlpha = False
if debugRevealPixelAlpha:
log.debug("PERFORMANCE PROFILER ENGAGED: RevealPixelAlpha")
def __init__(self, surface, pixelAlpha=True):
if ImageSurface.debugRecordSurfaces:
ImageSurface.imageSurfaces.append(self)
if isinstance(surface, str):
surface = pygame.image.load(surface)
elif isinstance(surface, ImageSurface):
surface = surface.source
if surface:
if not pixelAlpha:
self._surface = surface.convert()
else:
self._surface = surface.convert_alpha()
else:
self._surface = None
self.composite = None
self._modScale = None
self._modColor = None
self._pixelAlpha = pixelAlpha
if self._surface:
self._width = self._surface.get_width()
self._height = self._surface.get_height()
else:
self._width = 0
self._height = 0
self._red = 255
self._green = 255
self._blue = 255
self._alpha = 255
if self._surface:
self.refresh()
_clip = None
_clipX = 0
_clipY = 0
def convert(self):
return self.composite.convert()
def convertAlpha(self):
return self.composite.convert_alpha()
def getPixel(self, x, y):
return self.get().get_at((x,y))
def copy(self):
return ImageSurface(self, self._pixelAlpha)
def resetClip(self):
self.setClip((0,0,self.getWidth(),self.getHeight()))
def removeClip(self):
self.setClip(None)
def getClip(self):
return self._clip
def setClip(self, value):
if value:
self._clipX = value[0]
self._clipY = value[1]
self.applyClip()
self._clip = value
clip = property(getClip, setClip)
def getClipX(self):
return self._clipX
def setClipX(self, value):
if not self._clip:
self.resetClip()
self._clipX = value
clip = self.getClip()
self.setClip((value, clip[1], clip[2], clip[3]))
clipX = property(getClipX, setClipX)
def getClipY(self):
return self._clipY
def setClipY(self, value):
if not self._clip:
self.resetClip()
self._clipY = value
clip = self.getClip()
self.setClip((clip[0], value, clip[2], clip[3]))
clipY = property(getClipY, setClipY)
def setAllowPixelAlpha(self, allowPixelAlpha):
if allowPixelAlpha != self._pixelAlpha:
if allowPixelAlpha:
self._surface = self._surface.convert_alpha()
else:
self._surface = self._surface.convert()
self._pixelAlpha = allowPixelAlpha
def getAllowPixelAlpha(self):
return self._pixelAlpha
allowPixelAlpha = property(getAllowPixelAlpha, setAllowPixelAlpha)
def _revealPixelAlpha(self):
if self._pixelAlpha:
surface = pygame.Surface((self._width, self._height)).convert_alpha()
surface.fill((255,0,0,255))
return surface
else:
surface = pygame.Surface((self._width, self._height)).convert()
surface.fill((0,255,0,255))
return surface
def refresh(self):
""" Apply all modified image parameters. """
if self.source:
self.applyScale()
def replace(self, surface, normalize=True):
""" Replace source surface with another. """
if ImageSurface.debugRevealPixelAlpha:
surface = self._revealPixelAlpha()
if not self._pixelAlpha:
self._surface = surface.convert()
else:
self._surface = surface.convert_alpha()
self.refresh()
if normalize:
self.normalize()
def permeate(self):
""" Set the source image surface to the current composite surface. """
self.source = self.composite
def normalize(self):
""" Reset scaling parameters to fit source surface. """
self.size = self._surface.get_size()
def get(self):
""" Get the finished composite surface. """
return self.composite
def rect(self):
""" Get rectangle of compsite surface. """
if self.composite:
return self.composite.get_rect()
else:
return pygame.Rect((0,0,1,1))
def convert(self):
""" Return a converted version of the source surface. """
if not self._pixelAlpha:
return self._surface.convert()
else:
return self._surface.convert_alpha()
def applyScale(self):
# This is a slow pass. Do this as little as possible.
self._modScale = scaleImage(self._surface, int(self._width), int(self._height))
if ImageSurface.debugRevealPixelAlpha:
if self._pixelAlpha:
self._modScale.fill((255,0,0,255))
else:
self._modScale.fill((0,255,0,255))
self.applyColor()
self.applyAlpha()
self.applyClip()
def applyColor(self):
# This is a semi fast pass. Use the scaling slow passed image.
if not ImageSurface.debugRevealPixelAlpha:
if not self._pixelAlpha:
self._modColor = self._modScale.convert()
self._modColor.fill((self._red, self._green, self._blue),
None, locals.BLEND_RGB_MULT)
self.applyAlpha()
else:
self._modColor = self._modScale.convert_alpha()
self._modColor.fill((self._red, self._green, self._blue, self._alpha),
None, locals.BLEND_RGBA_MULT)
self.composite = self._modColor
else:
self.composite = self._modScale
def applyAlpha(self):
# This is a fast pass. Use the double passed image from scale and color.
if not ImageSurface.debugRevealPixelAlpha:
if not self._pixelAlpha:
self._modColor.set_alpha(self._alpha)
self.composite = self._modColor
else:
self.applyColor()
else:
self.composite = self._modScale
def applyClip(self):
# This is a very fast pass. Use the triple passed image from scale, color, and alpha
image = self._modColor
image.set_clip(self._clip)
self.composite = image
def getSource(self):
return self._surface
def setSource(self, source):
self.replace(source, True)
source = property(getSource, setSource)
image = property(getSource, setSource)
def getWidth(self):
return self._width
def setWidth(self, width):
self._width = width
self.applyScale()
width = property(getWidth, setWidth)
def getHeight(self):
return self._height
def setHeight(self, height):
self._height = height
self.applyScale()
height = property(getHeight, setHeight)
def getSize(self):
return (self._width, self._height)
def setSize(self, size):
self._width = size[0]
self._height = size[1]
self.applyScale()
size = property(getSize, setSize)
def setScale(self, scalar):
self.setSize((self.getWidth() * scalar, self.getHeight() * scalar))
def getRed(self):
return self._red
def setRed(self, red):
self._red = red
self.applyColor()
red = property(getRed, setRed)
def getGreen(self):
return self._green
def setGreen(self, green):
self._green = green
self.applyColor()
green = property(getGreen, setGreen)
def getBlue(self):
return self._blue
def setBlue(self, blue):
self._blue = blue
self.applyColor()
blue = property(getBlue, setBlue)
def getAlpha(self):
return self._alpha
def setAlpha(self, alpha):
self._alpha = alpha
self.applyAlpha()
alpha = property(getAlpha, setAlpha)
def getColor(self):
return (self._red, self._green, self._blue)
def setColor(self, color):
self._red = color[0]
self._green = color[1]
self._blue = color[2]
self.applyColor()
color = property(getColor, setColor)
if _panda:
ImageSurface = ImageSurfacePanda | apache-2.0 | 5,942,455,946,340,129,000 | 29.378082 | 93 | 0.590872 | false | 4.08361 | false | false | false |
silly-wacky-3-town-toon/SOURCE-COD | toontown/battle/DistributedBattleFinal.py | 1 | 7784 | from panda3d.core import *
from panda3d.direct import *
from direct.interval.IntervalGlobal import *
from BattleBase import *
from direct.actor import Actor
from toontown.distributed import DelayDelete
from direct.directnotify import DirectNotifyGlobal
import DistributedBattleBase
import MovieUtil
from toontown.suit import Suit
import SuitBattleGlobals
from toontown.toonbase import ToontownBattleGlobals
from toontown.toonbase import ToontownGlobals
from direct.fsm import State
import random
from otp.nametag.NametagConstants import *
from otp.nametag import NametagGlobals
class DistributedBattleFinal(DistributedBattleBase.DistributedBattleBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleFinal')
def __init__(self, cr):
townBattle = cr.playGame.hood.loader.townBattle
DistributedBattleBase.DistributedBattleBase.__init__(self, cr, townBattle)
self.setupCollisions(self.uniqueBattleName('battle-collide'))
self.bossCog = None
self.bossCogRequest = None
self.streetBattle = 0
self.joiningSuitsName = self.uniqueBattleName('joiningSuits')
self.fsm.addState(State.State('ReservesJoining', self.enterReservesJoining, self.exitReservesJoining, ['WaitForJoin']))
offState = self.fsm.getStateNamed('Off')
offState.addTransition('ReservesJoining')
waitForJoinState = self.fsm.getStateNamed('WaitForJoin')
waitForJoinState.addTransition('ReservesJoining')
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('ReservesJoining')
return
def generate(self):
DistributedBattleBase.DistributedBattleBase.generate(self)
def disable(self):
DistributedBattleBase.DistributedBattleBase.disable(self)
base.cr.relatedObjectMgr.abortRequest(self.bossCogRequest)
self.bossCogRequest = None
self.bossCog = None
return
def delete(self):
DistributedBattleBase.DistributedBattleBase.delete(self)
self.removeCollisionData()
def setBossCogId(self, bossCogId):
self.bossCogId = bossCogId
if base.cr.doId2do.has_key(bossCogId):
tempBossCog = base.cr.doId2do[bossCogId]
self.__gotBossCog([tempBossCog])
else:
self.notify.debug('doing relatedObjectMgr.request for bossCog')
self.bossCogRequest = base.cr.relatedObjectMgr.requestObjects([bossCogId], allCallback=self.__gotBossCog)
def __gotBossCog(self, bossCogList):
self.bossCogRequest = None
self.bossCog = bossCogList[0]
currStateName = self.localToonFsm.getCurrentState().getName()
if currStateName == 'NoLocalToon' and self.bossCog.hasLocalToon():
self.enableCollision()
return
def setBattleNumber(self, battleNumber):
self.battleNumber = battleNumber
def setBattleSide(self, battleSide):
self.battleSide = battleSide
def setMembers(self, suits, suitsJoining, suitsPending, suitsActive, suitsLured, suitTraps, toons, toonsJoining, toonsPending, toonsActive, toonsRunning, timestamp):
if self.battleCleanedUp():
return
oldtoons = DistributedBattleBase.DistributedBattleBase.setMembers(self, suits, suitsJoining, suitsPending, suitsActive, suitsLured, suitTraps, toons, toonsJoining, toonsPending, toonsActive, toonsRunning, timestamp)
if len(self.toons) == 4 and len(oldtoons) < 4:
self.notify.debug('setMembers() - battle is now full of toons')
self.closeBattleCollision()
elif len(self.toons) < 4 and len(oldtoons) == 4:
self.openBattleCollision()
def makeSuitJoin(self, suit, ts):
self.notify.debug('makeSuitJoin(%d)' % suit.doId)
self.joiningSuits.append(suit)
if self.hasLocalToon():
self.d_joinDone(base.localAvatar.doId, suit.doId)
def showSuitsJoining(self, suits, ts, name, callback):
if self.bossCog == None:
return
if self.battleSide:
openDoor = Func(self.bossCog.doorB.request, 'open')
closeDoor = Func(self.bossCog.doorB.request, 'close')
else:
openDoor = Func(self.bossCog.doorA.request, 'open')
closeDoor = Func(self.bossCog.doorA.request, 'close')
suitTrack = Parallel()
delay = 0
for suit in suits:
suit.setState('Battle')
if suit.dna.dept == 'l':
suit.reparentTo(self.bossCog)
suit.setPos(0, 0, 0)
suit.setPos(self.bossCog, 0, 0, 0)
suit.headsUp(self)
suit.setScale(3.8 / suit.height)
if suit in self.joiningSuits:
i = len(self.pendingSuits) + self.joiningSuits.index(suit)
destPos, h = self.suitPendingPoints[i]
destHpr = VBase3(h, 0, 0)
else:
destPos, destHpr = self.getActorPosHpr(suit, self.suits)
suitTrack.append(Track((delay, self.createAdjustInterval(suit, destPos, destHpr)), (delay + 1.5, suit.scaleInterval(1.5, 1))))
delay += 1
if self.hasLocalToon():
camera.reparentTo(self)
if random.choice([0, 1]):
camera.setPosHpr(20, -4, 7, 60, 0, 0)
else:
camera.setPosHpr(-20, -4, 7, -60, 0, 0)
done = Func(callback)
track = Sequence(openDoor, suitTrack, closeDoor, done, name=name)
track.start(ts)
self.storeInterval(track, name)
return
def __playReward(self, ts, callback):
toonTracks = Parallel()
for toon in self.toons:
toonTracks.append(Sequence(Func(toon.loop, 'victory'), Wait(FLOOR_REWARD_TIMEOUT), Func(toon.loop, 'neutral')))
name = self.uniqueName('floorReward')
track = Sequence(toonTracks, name=name)
if self.hasLocalToon():
camera.setPos(0, 0, 1)
camera.setHpr(180, 10, 0)
track += [self.bossCog.makeEndOfBattleMovie(self.hasLocalToon()), Func(callback)]
self.storeInterval(track, name)
track.start(ts)
def enterReward(self, ts):
self.notify.debug('enterReward()')
self.disableCollision()
self.delayDeleteMembers()
self.__playReward(ts, self.__handleFloorRewardDone)
return None
def __handleFloorRewardDone(self):
return None
def exitReward(self):
self.notify.debug('exitReward()')
self.clearInterval(self.uniqueName('floorReward'), finish=1)
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
for toon in self.toons:
toon.startSmooth()
return None
def enterResume(self, ts = 0):
if self.hasLocalToon():
self.removeLocalToon()
self.fsm.requestFinalState()
def exitResume(self):
return None
def enterReservesJoining(self, ts = 0):
self.delayDeleteMembers()
self.showSuitsJoining(self.joiningSuits, ts, self.joiningSuitsName, self.__reservesJoiningDone)
def __reservesJoiningDone(self):
self._removeMembersKeep()
self.doneBarrier()
def exitReservesJoining(self):
self.clearInterval(self.joiningSuitsName)
def enterNoLocalToon(self):
self.notify.debug('enterNoLocalToon()')
if self.bossCog != None and self.bossCog.hasLocalToon():
self.enableCollision()
else:
self.disableCollision()
return
def exitNoLocalToon(self):
self.disableCollision()
return None
def enterWaitForServer(self):
self.notify.debug('enterWaitForServer()')
return None
def exitWaitForServer(self):
return None
| apache-2.0 | -5,185,984,711,244,480,000 | 37.534653 | 223 | 0.656475 | false | 3.53176 | false | false | false |
atmtools/typhon | doc/example_google.py | 1 | 8646 | # -*- coding: utf-8 -*-
"""Example Google style docstrings.
This module demonstrates documentation as specified by the `Google Python
Style Guide`_. Docstrings may extend over multiple lines. Sections are created
with a section header and a colon followed by a block of indented text.
Example:
Examples can be given using either the ``Example`` or ``Examples``
sections. Sections support any reStructuredText formatting, including
literal blocks::
$ python example_google.py
Section breaks are created by resuming unindented text. Section breaks
are also implicitly created anytime a new section starts.
Attributes:
module_level_variable1 (int): Module level variables may be documented in
either the ``Attributes`` section of the module docstring, or in an
inline docstring immediately following the variable.
Either form is acceptable, but the two should not be mixed. Choose
one convention to document module level variables and be consistent
with it.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
module_level_variable1 = 12345
module_level_variable2 = 98765
"""int: Module level variable documented inline.
The docstring may span multiple lines. The type may optionally be specified
on the first line, separated by a colon.
"""
def module_level_function(param1, param2=None, *args, **kwargs):
"""This is an example of a module level function.
Function parameters should be documented in the ``Args`` section. The name
of each parameter is required. The type and description of each parameter
is optional, but should be included if not obvious.
Parameter types -- if given -- should be specified according to
`PEP 484`_, though `PEP 484`_ conformance isn't required or enforced.
If \*args or \*\*kwargs are accepted,
they should be listed as ``*args`` and ``**kwargs``.
The format for a parameter is::
name (type): description
The description may span multiple lines. Following
lines should be indented. The "(type)" is optional.
Multiple paragraphs are supported in parameter
descriptions.
Args:
param1 (int): The first parameter.
param2 (Optional[str]): The second parameter. Defaults to None.
Second line of description should be indented.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
bool: True if successful, False otherwise.
The return type is optional and may be specified at the beginning of
the ``Returns`` section followed by a colon.
The ``Returns`` section may span multiple lines and paragraphs.
Following lines should be indented to match the first line.
The ``Returns`` section supports any reStructuredText formatting,
including literal blocks::
{
'param1': param1,
'param2': param2
}
Raises:
AttributeError: The ``Raises`` section is a list of all exceptions
that are relevant to the interface.
ValueError: If `param2` is equal to `param1`.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
if param1 == param2:
raise ValueError('param1 may not be equal to param2')
return True
def example_generator(n):
"""Generators have a ``Yields`` section instead of a ``Returns`` section.
Args:
n (int): The upper limit of the range to generate, from 0 to `n` - 1.
Yields:
int: The next number in the range of 0 to `n` - 1.
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
for i in range(n):
yield i
class ExampleError(Exception):
"""Exceptions are documented in the same way as classes.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
msg (str): Human readable string describing the exception.
code (Optional[int]): Error code.
Attributes:
msg (str): Human readable string describing the exception.
code (int): Exception error code.
"""
def __init__(self, msg, code):
self.msg = msg
self.code = code
class ExampleClass(object):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attribute and property types -- if given -- should be specified according
to `PEP 484`_, though `PEP 484`_ conformance isn't required or enforced.
Attributes:
attr1 (str): Description of `attr1`.
attr2 (Optional[int]): Description of `attr2`.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
def __init__(self, param1, param2, param3):
"""Example of docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1 (str): Description of `param1`.
param2 (Optional[int]): Description of `param2`. Multiple
lines are supported.
param3 (List[str]): Description of `param3`.
"""
self.attr1 = param1
self.attr2 = param2
self.attr3 = param3 #: Doc comment *inline* with attribute
#: List[str]: Doc comment *before* attribute, with type specified
self.attr4 = ['attr4']
self.attr5 = None
"""Optional[str]: Docstring *after* attribute, with type specified."""
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
return 'readonly_property'
@property
def readwrite_property(self):
"""List[str]: Properties with both a getter and setter should only
be documented in their getter method.
If the setter method contains notable behavior, it should be
mentioned here.
"""
return ['readwrite_property']
@readwrite_property.setter
def readwrite_property(self, value):
value
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output.
This behavior can be disabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = False
"""
pass
def __special_without_docstring__(self):
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
| mit | -1,395,894,020,954,367,700 | 30.67033 | 79 | 0.64978 | false | 4.616124 | false | false | false |
rjenc29/numerical | course/matplotlib/examples/fill_example.py | 1 | 2229 | """
Illustrate different ways of using the various fill functions.
"""
import numpy as np
import matplotlib.pyplot as plt
import example_utils
def main():
fig, axes = example_utils.setup_axes()
fill_example(axes[0])
fill_between_example(axes[1])
stackplot_example(axes[2])
example_utils.title(fig, 'fill/fill_between/stackplot: Filled polygons',
y=0.95)
fig.savefig('fill_example.png', facecolor='none')
plt.show()
def fill_example(ax):
# Use fill when you want a simple filled polygon between vertices
x, y = fill_data()
ax.fill(x, y, color='lightblue')
ax.margins(0.1)
example_utils.label(ax, 'fill')
def fill_between_example(ax):
# Fill between fills between two curves or a curve and a constant value
# It can be used in several ways. We'll illustrate a few below.
x, y1, y2 = sin_data()
# The most basic (and common) use of fill_between
err = np.random.rand(x.size)**2 + 0.1
y = 0.7 * x + 2
ax.fill_between(x, y + err, y - err, color='orange')
# Filling between two curves with different colors when they cross in
# different directions
ax.fill_between(x, y1, y2, where=y1>y2, color='lightblue')
ax.fill_between(x, y1, y2, where=y1<y2, color='forestgreen')
# Note that this is fillbetween*x*!
ax.fill_betweenx(x, -y1, where=y1>0, color='red', alpha=0.5)
ax.fill_betweenx(x, -y1, where=y1<0, color='blue', alpha=0.5)
ax.margins(0.15)
example_utils.label(ax, 'fill_between/x')
def stackplot_example(ax):
# Stackplot is equivalent to a series of ax.fill_between calls
x, y = stackplot_data()
ax.stackplot(x, y.cumsum(axis=0), alpha=0.5)
example_utils.label(ax, 'stackplot')
#-- Data generation ----------------------
def stackplot_data():
x = np.linspace(0, 10, 100)
y = np.random.normal(0, 1, (5, 100))
y = y.cumsum(axis=1)
y -= y.min(axis=0, keepdims=True)
return x, y
def sin_data():
x = np.linspace(0, 10, 100)
y = np.sin(x)
y2 = np.cos(x)
return x, y, y2
def fill_data():
t = np.linspace(0, 2*np.pi, 100)
r = np.random.normal(0, 1, 100).cumsum()
r -= r.min()
return r * np.cos(t), r * np.sin(t)
main()
| mit | -5,428,542,155,974,001,000 | 27.948052 | 76 | 0.623598 | false | 2.909922 | false | false | false |
seninp/saxpy | saxpy/hotsax.py | 1 | 4860 | """Implements HOT-SAX."""
import numpy as np
from saxpy.znorm import znorm
from saxpy.sax import sax_via_window
from saxpy.distance import euclidean
def find_discords_hotsax(series, win_size=100, num_discords=2, alphabet_size=3,
paa_size=3, znorm_threshold=0.01, sax_type='unidim'):
"""HOT-SAX-driven discords discovery."""
discords = list()
global_registry = set()
# Z-normalized versions for every subsequence.
znorms = np.array([znorm(series[pos: pos + win_size], znorm_threshold) for pos in range(len(series) - win_size + 1)])
# SAX words for every subsequence.
sax_data = sax_via_window(series, win_size=win_size, paa_size=paa_size, alphabet_size=alphabet_size,
nr_strategy=None, znorm_threshold=0.01, sax_type=sax_type)
"""[2.0] build the 'magic' array"""
magic_array = list()
for k, v in sax_data.items():
magic_array.append((k, len(v)))
"""[2.1] sort it ascending by the number of occurrences"""
magic_array = sorted(magic_array, key=lambda tup: tup[1])
while len(discords) < num_discords:
best_discord = find_best_discord_hotsax(series, win_size, global_registry, sax_data, magic_array, znorms)
if -1 == best_discord[0]:
break
discords.append(best_discord)
mark_start = max(0, best_discord[0] - win_size + 1)
mark_end = best_discord[0] + win_size
for i in range(mark_start, mark_end):
global_registry.add(i)
return discords
def find_best_discord_hotsax(series, win_size, global_registry, sax_data, magic_array, znorms):
"""Find the best discord with hotsax."""
"""[3.0] define the key vars"""
best_so_far_position = -1
best_so_far_distance = 0.
distance_calls = 0
visit_array = np.zeros(len(series), dtype=np.int)
"""[4.0] and we are off iterating over the magic array entries"""
for entry in magic_array:
"""[5.0] current SAX words and the number of other sequences mapping to the same SAX word."""
curr_word = entry[0]
occurrences = sax_data[curr_word]
"""[6.0] jumping around by the same word occurrences makes it easier to
nail down the possibly small distance value -- so we can be efficient
and all that..."""
for curr_pos in occurrences:
if curr_pos in global_registry:
continue
"""[7.0] we don't want an overlapping subsequence"""
mark_start = curr_pos - win_size + 1
mark_end = curr_pos + win_size
visit_set = set(range(mark_start, mark_end))
"""[8.0] here is our subsequence in question"""
cur_seq = znorms[curr_pos]
"""[9.0] let's see what is NN distance"""
nn_dist = np.inf
do_random_search = True
"""[10.0] ordered by occurrences search first"""
for next_pos in occurrences:
"""[11.0] skip bad pos"""
if next_pos in visit_set:
continue
else:
visit_set.add(next_pos)
"""[12.0] distance we compute"""
dist = euclidean(cur_seq, znorms[next_pos])
distance_calls += 1
"""[13.0] keep the books up-to-date"""
if dist < nn_dist:
nn_dist = dist
if dist < best_so_far_distance:
do_random_search = False
break
"""[13.0] if not broken above,
we shall proceed with random search"""
if do_random_search:
"""[14.0] build that random visit order array"""
curr_idx = 0
for i in range(0, (len(series) - win_size + 1)):
if not(i in visit_set):
visit_array[curr_idx] = i
curr_idx += 1
it_order = np.random.permutation(visit_array[0:curr_idx])
curr_idx -= 1
"""[15.0] and go random"""
while curr_idx >= 0:
rand_pos = it_order[curr_idx]
curr_idx -= 1
dist = euclidean(cur_seq, znorms[rand_pos])
distance_calls += 1
"""[16.0] keep the books up-to-date again"""
if dist < nn_dist:
nn_dist = dist
if dist < best_so_far_distance:
nn_dist = dist
break
"""[17.0] and BIGGER books"""
if (nn_dist > best_so_far_distance) and (nn_dist < np.inf):
best_so_far_distance = nn_dist
best_so_far_position = curr_pos
return best_so_far_position, best_so_far_distance
| gpl-2.0 | 5,871,183,956,478,835,000 | 33.721429 | 121 | 0.531893 | false | 3.755796 | false | false | false |
sighill/shade_app | apis/raw/017_raw/017_cleaner.py | 1 | 1278 | # -*- coding: utf-8 -*-
# 017_cleaner.py
# CODED TO BE EXECUTED SERVER SIDE :
# cd /home/common/shade
# python3 manage.py shell
import sys
from apis.voca import *
##################################
# Init des paths et noms de fichiers
AddLog('title' , 'Début du nettoyage du fichier')
work_dir = '/home/common/shade/apis/raw/017_raw/'
# Nom du fichier source
raw_file = 'src'
##################################
# Création de la liste brute
with open(work_dir + raw_file , 'r') as file:
raw_list = [i for i in file.read().splitlines()]
'''
##################################
# Formatage du texte
# Init de la list contenant la sortie de StringFormatter
AddLog('subtitle' , 'Début de la fonction StringFormatter')
formatted_list = [StringFormatter(line) for line in raw_list]
##################################
# going through oddities finder
AddLog('subtitle' , 'Début de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( formatted_list )
'''
ref_list = raw_list
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('/home/common/shade/apis/out/','017_src',ref_list,'AssetPlace;Pays clémentin , Ravénie , Lombrie') | mit | -8,009,218,853,207,783,000 | 30.641026 | 112 | 0.605822 | false | 3.011848 | false | false | false |
reprah/shy | shy.py | 1 | 2992 | #!/usr/bin/env python
import sys, os, re, subprocess
# begin loop:
# - reading from stdin
# - forking a child
# - executing a new process in the child
def main():
while True:
sys.stdout.write(os.environ['PROMPT'])
line = sys.stdin.readline()
commands = split_on_pipes(line)
placeholder_in = sys.stdin
placeholder_out = sys.stdout
pipe = []
pids = []
for line in commands:
args = [expand(string) for string in line.split()]
command = args[0]
if command in BUILTINS:
# run built-in instead of doing fork + exec
run_builtin(command, args)
else:
# if command is not the last command
if (commands.index(line) + 1) < len(commands):
pipe = os.pipe() # file descriptors
placeholder_out = pipe[1]
else:
placeholder_out = sys.stdout
pid = fork_and_exec(command, args, placeholder_out, placeholder_in)
pids.append(pid)
if type(placeholder_out) is int:
os.close(placeholder_out)
if type(placeholder_in) is int:
os.close(placeholder_in)
if commands.index(line) > 0:
placeholder_in = pipe[0]
for id in pids:
wait_for_child(id)
def wait_for_child(pid):
try:
os.waitpid(pid, 0)
except:
None
# returns PID of child process
def fork_and_exec(command, args, placeholder_out, placeholder_in):
pid = os.fork()
if pid == 0: # inside child process
if type(placeholder_out) is int:
sys.stdout = os.fdopen(placeholder_out, 'w')
os.close(placeholder_out)
if type(placeholder_in) is int:
sys.stdin = os.fdopen(placeholder_in, 'r')
os.close(placeholder_in)
try:
os.execvp(command, args) # actual exec
except:
print "%s: command not found" % command
sys.exit(1) # exit child
return pid
def run_builtin(command, args):
try:
BUILTINS[command](args[1])
except:
BUILTINS[command]()
# returns an array of command strings
def split_on_pipes(line):
matches = re.findall("([^\"'|]+)|[\"']([^\"']+)[\"']", line)
commands = []
for match in matches:
for string in match:
if string != '':
commands.append(string.strip())
return commands
# support different types of expansion
def expand(string):
# variable expansion
if re.match("\$\w+", string):
return os.environ[string[1:]]
# arithmetic expansion
elif re.match("\$\(\(([\w\W\s]*)\)\)", string):
expr = re.match("\$\(\(([\w\W\s]*)\)\)", string).group(1)
return str(eval(expr))
# command expansion
elif re.match("\$\(([\w\W\s]*)\)", string):
expr = re.match("\$\(([\w\W\s]*)\)", string).group(1)
p = subprocess.Popen([expr], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# get the output of the command
out, _ = p.communicate()
return out[0:-1]
else:
return string
def set(args):
key, value = args.split('=')
os.environ[key] = value
BUILTINS = {
'cd': lambda path: os.chdir(''.join(path)),
'exit': lambda exit_code=0: sys.exit(int(exit_code)),
'set': lambda args: set(args) # can't do variable assignment in Python lambda
}
os.environ['PROMPT'] = "=> "
main()
| mit | 7,146,024,717,470,157,000 | 22.559055 | 78 | 0.642714 | false | 2.941986 | false | false | false |
csiro-rds/casda-samples | cutouts_by_proj.py | 1 | 7634 | #############################################################################################
#
# Python script to demonstrate interacting with CASDA's SODA implementation to
# retrieve cutout images around a list of sources.
#
# This script creates a job to produce and download cutouts from the specified image at
# the positions provided in an input file (each line has an RA and DEC).
#
# Author: James Dempsey on 16 Apr 2016
#
# Written for python 2.7
# Note: astropy is available on galaxy via 'module load astropy'
# On other machines, try Anaconda https://www.continuum.io/downloads
#
# Modified: MH on 18th Dec 2020
# Take in proj name in TAP query of images. Proj argument should be text snippet of the project name in obscore. e.g. EMU for EMU, Rapid for RACS.
# Also now does RA and Dec search in the TAP query of images (not just in the SODA cutout command).
#
# Modified:MH on 12th Apr
# Use s_region for image region. This may contain NaN pixel borders, but better representation of whether a point is within an image. (Previously used fixed radius from image centre).
#
# Example usage:
# python cutouts_by_proj.py OPAL-username Rapid mysources.txt racs_output 0.1
# For RACS cutouts, with list of positions in a file mysources,txt, and cutout radius 0.1 degrees.
#
#############################################################################################
from __future__ import print_function, division, unicode_literals
import argparse
import os
from astropy.io import votable
from astropy.coordinates import SkyCoord
from astropy import units
import casda
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(description="Download cutouts of specific locations from the specified image")
parser.add_argument("opal_username",
help="Your user name on the ATNF's online proposal system (normally an email address)")
parser.add_argument("-p", "--opal_password", help="Your password on the ATNF's online proposal system")
parser.add_argument("--password_file", help="The file holding your password for the ATNF's online proposal system")
parser.add_argument("proj", help="The text in project name, e.g. EMU, or Rapid ")
parser.add_argument("source_list_file",
help="The file holding the list of positions, with one RA and Dec pair per line.")
parser.add_argument("destination_directory", help="The directory where the resulting files will be stored")
parser.add_argument("radius", help="Radius, in degrees, of the cutouts")
args = parser.parse_args()
return args
def parse_sources_file(filename):
"""
Read in a file of sources, with one source each line. Each source is specified as a
right ascension and declination pair separated by space.
e.g.
1:34:56 -45:12:30
320.20 -43.5
:param filename: The name of the file contining the list of sources
:return: A list of SkyCoord objects representing the parsed sources.
"""
sourcelist = []
with open(filename, 'r') as f:
for line in f:
if line and line[0] != '#':
parts = line.split()
if len(parts) > 1:
if parts[0].find(':') > -1 or parts[0].find('h') > -1:
sky_loc = SkyCoord(parts[0], parts[1], frame='icrs',
unit=(units.hourangle, units.deg))
else:
sky_loc = SkyCoord(parts[0], parts[1], frame='icrs',
unit=(units.deg, units.deg))
sourcelist.append(sky_loc)
return sourcelist
def produce_cutouts(source_list, proj, username, password, destination_dir, cutout_radius_degrees):
# Use CASDA VO (secure) to query for the images associated with the given scheduling_block_id
print ("\n\n** Retreiving image details for %s ... \n\n" % proj)
filename = destination_dir + str(proj) + ".xml"
#Do initial filter of images, allow for 3 deg cone around position (get ASKAP image which is ~30 sq deg).
src_num = 0
for sky_loc in source_list:
src_num = src_num + 1
ra = sky_loc.ra.degree
dec = sky_loc.dec.degree
data_product_id_query = "select * from ivoa.obscore where obs_collection LIKE '%" + proj + \
"%' and dataproduct_subtype = 'cont.restored.t0' and pol_states = '/I/' and 1 = CONTAINS(POINT('ICRS',"+ str(ra) + ","+ str(dec) + "),s_region)"
casda.sync_tap_query(data_product_id_query, filename, username=username, password=password)
image_cube_votable = votable.parse(filename, pedantic=False)
results_array = image_cube_votable.get_table_by_id('results').array
# For each of the image cubes, query datalink to get the secure datalink details
print ("\n\n** Retrieving datalink for each image containing source number " + str(src_num) + " ...\n\n")
authenticated_id_tokens = []
for image_cube_result in results_array:
image_cube_id = image_cube_result['obs_publisher_did'].decode('utf-8')
async_url, authenticated_id_token = casda.get_service_link_and_id(image_cube_id, username,
password,
service='cutout_service',
destination_dir=destination_dir)
if authenticated_id_token is not None:
authenticated_id_tokens.append(authenticated_id_token)
if len(authenticated_id_tokens) == 0:
print ("No image cubes found")
return 1
# Create the async job
job_location = casda.create_async_soda_job(authenticated_id_tokens)
# For each entry in the results of the catalogue query, add the position filter as a parameter to the async job
cutout_filters = []
circle = "CIRCLE " + str(ra) + " " + str(dec) + " " + str(cutout_radius_degrees)
cutout_filters.append(circle)
casda.add_params_to_async_job(job_location, 'pos', cutout_filters)
# Run the job
status = casda.run_async_job(job_location)
# Download all of the files, or alert if it didn't complete
if status == 'COMPLETED':
print ("\n\n** Downloading results...\n\n")
casda.download_all(job_location, destination_dir)
returnflag = 0
else:
print ("Job did not complete: Status was %s." % status)
returnflag = 1
if returnflag == 0:
return 0
else:
return 1
def main():
args = parseargs()
password = casda.get_opal_password(args.opal_password, args.password_file)
# Change this to choose which environment to use, prod is the default
# casda.use_dev()
destination_dir = args.destination_directory + "/" + str(args.proj) + "/" # directory where files will be saved
# 1) Read in the list of sources
print ("\n\n** Parsing the source list ...\n")
source_list = parse_sources_file(args.source_list_file)
print ("\n** Read %d sources...\n\n" % (len(source_list)))
# 2) Create the destination directory
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
# Do the work
return produce_cutouts(source_list, args.proj, args.opal_username, password, destination_dir, args.radius)
if __name__ == '__main__':
exit(main())
| apache-2.0 | 4,055,497,068,719,523,000 | 44.712575 | 184 | 0.61344 | false | 3.91688 | false | false | false |
tjhunter/phd-thesis-tjhunter | python/kdd/plot_network.py | 1 | 1065 |
__author__ = 'tjhunter'
import build
import json
import pylab as pl
from matplotlib.collections import LineCollection
# Draws the network as a pdf and SVG file.
def draw_network(ax, fd, link_style):
def decode_line(l):
#print l
dct = json.loads(l)
lats = dct['lats']
lons = dct['lons']
return zip(lons, lats)
lines = [decode_line(l) for l in fd]
#print lines
xmin = min([x for l in lines for x,y in l])
xmax = max([x for l in lines for x,y in l])
ymin = min([y for l in lines for x,y in l])
ymax = max([y for l in lines for x,y in l])
lc = LineCollection(lines, **link_style)
ax.add_collection(lc, autolim=True)
return ((xmin,xmax),(ymin,ymax))
fname = build.data_name('kdd/net_export_6.json')
fig = pl.figure("fig1",figsize=(10,10))
ax = fig.gca()
ax.set_axis_off()
style = {'colors':'k','linewidths':0.5}
with open(fname) as f:
(xlims, ylims) = draw_network(ax, f, style)
ax.set_xlim(*xlims)
ax.set_ylim(*ylims)
# Saving in pdf is a bit slow
build.save_figure(fig, 'figures-kdd/network_export_6',save_svg=True)
| apache-2.0 | -1,207,750,359,825,550,300 | 26.307692 | 68 | 0.656338 | false | 2.744845 | false | false | false |
interalia/cmsplugin_availablejobs | availablejob/views.py | 1 | 3330 | from django.views.generic.simple import direct_to_template
from django.views.generic.list_detail import object_detail
from django.contrib.sites.models import Site
from django.shortcuts import get_object_or_404
from django.contrib import messages
from models import EnableOpening, Opening, Candidate
from forms import ApplyForm
import datetime
import hashlib
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
def __create_candidate(form,op,file):
name = form.cleaned_data["name"]
email = form.cleaned_data["email"]
phone = form.cleaned_data["phone"]
cv = form.cleaned_data["cv"]
uploaded_file(file)
candidate, created = Candidate.objects.get_or_create(name = name, email = email)
if op:
candidate.opening = op
candidate.phone = phone
candidate.cv = cv
candidate.save()
return candidate
def index(request):
eopen = EnableOpening.objects.all()
for i in eopen:
print i
form = ApplyForm()
post=False
ios=mobile(request)
d = {"opens": eopen,'form': form,'post':post,'mobile':ios}
if request.method == "POST":
form= ApplyForm(request.POST,request.FILES)
if form.is_valid():
post=True
d.update({"form":form,"post":post})
name = form.cleaned_data.get("opening")
opening = EnableOpening.objects.filter(opening__title = name)
'''
for i in opening:
__create_candidate(form,i,request.FILES['cv'])
'''
else:
d.update({"form":form,"post":post})
return direct_to_template(request, template="vacancy/index.html",extra_context=d)
def detail(request, id):
qs = EnableOpening.objects.all()
d = {"queryset": qs, 'object_id': int(id), 'template_name': "vacancy/opening_detail.html" }
return object_detail(request,**d)
def _process_cv(request,opening):
applyform = ApplyForm()
if request.method == "POST":
form= ApplyForm(request.POST)
if form.is_valid():
vacante=form.save(commit=False)
vacante.save()
return direct_to_template(request, template = "vacancy/job_submit_success.html")
else:
return direct_to_template(request, template = "vacancy/job_form.html")
def show_form(request, id):
opening = get_object_or_404(EnableOpening, id = id)
return _process_cv(request, opening)
def send_cv(request):
opening = None
return _process_cv(request,opening)
def facebook(request):
ops = EnableOpening.objects.all()
today = hashlib.md5(str(datetime.datetime.now()))
SITE = Site.objects.get_current()
d = {"enable_openings": ops, "today": today.hexdigest(), 'SITE': SITE}
return direct_to_template(request, "vacancy/facebook.html", extra_context = d)
def mobile(request):
device = {}
mobile=False
ua=request.META.get('HTTP_USER_AGENT','').lower()
if ua.find("iphone") > 0:
mobile= True
if ua.find("ipad") > 0:
mobile= True
if mobile:
return True
else:
return False
def uploaded_file(filename):
fd=open(settings.MEDIA_CV+str(filename),'wb+')
for chunk in filename.chunks():
fd.write(chunk)
fd.close()
| bsd-3-clause | 7,716,668,419,080,434,000 | 30.415094 | 95 | 0.63033 | false | 3.679558 | false | false | false |
calio/cflow2dot | cflow2dot.py | 1 | 9602 | #!/usr/bin/env python
import os.path
import sys
import subprocess
import re
import argparse
import json
from sys import exit
from os import system
cflow_path = "/usr/local/bin/cflow"
dot_path = "/usr/local/bin/dot"
color = ["#eecc80", "#ccee80", "#80ccee", "#eecc80", "#80eecc"];
shape =["box", "ellipse", "octagon", "hexagon", "diamond"];
shape_len = len(shape)
pref = "cflow"
exts = ["svg", "png"]
index = {}
count = {}
stdlib = [
"assert", "isalnum", "isalpha", "iscntrl", "isdigit", "isgraph", "islower",
"isprint", "ispunct", "isspace", "isupper", "isxdigit", "toupper", "tolower",
"errno", "setlocale", "acos", "asin", "atan", "atan2", "ceil", "cos", "cosh",
"exp", "fabs", "floor", "fmod", "frexp", "ldexp", "log", "log10", "modf",
"pow", "sin", "sinh", "sqrt", "tan", "tanh", "stdlib.h", "setjmp", "longjmp",
"signal", "raise", "clearerr", "fclose", "feof", "fflush", "fgetc", "fgetpos",
"fgets", "fopen", "fprintf", "fputc", "fputs", "fread", "freopen", "fscanf",
"fseek", "fsetpos", "ftell", "fwrite", "getc", "getchar", "gets", "perror",
"printf", "putchar", "puts", "remove", "rewind", "scanf", "setbuf", "setvbuf",
"sprintf", "sscanf", "tmpfile", "tmpnam", "ungetc", "vfprintf", "vprintf",
"vsprintf", "abort", "abs", "atexit", "atof", "atoi", "atol", "bsearch",
"calloc", "div", "exit", "getenv", "free", "labs", "ldiv", "malloc", "mblen",
"mbstowcs", "mbtowc", "qsort", "rand", "realloc", "strtod", "strtol",
"strtoul", "srand", "system", "wctomb", "wcstombs", "memchr", "memcmp",
"memcpy", "memmove", "memset", "strcat", "strchr", "strcmp", "strcoll",
"strcpy", "strcspn", "strerror", "strlen", "strncat", "strncmp", "strncpy",
"strpbrk", "strrchr", "strspn", "strstr", "strtok", "strxfrm", "asctime",
"clock", "ctime", "difftime", "gmtime", "localtime", "mktime", "strftime",
"time","vsnprintf"]
pthreadlib = [
"pthread_atfork", "pthread_attr_destroy", "pthread_attr_getdetachstate",
"pthread_attr_getguardsize", "pthread_attr_getinheritsched",
"pthread_attr_getschedparam", "pthread_attr_getschedpolicy",
"pthread_attr_getscope", "pthread_attr_getstack", "pthread_attr_getstackaddr",
"pthread_attr_getstacksize", "pthread_attr_init",
"pthread_attr_setdetachstate", "pthread_attr_setguardsize",
"pthread_attr_setinheritsched", "pthread_attr_setschedparam",
"pthread_attr_setschedpolicy", "pthread_attr_setscope",
"pthread_attr_setstack", "pthread_attr_setstackaddr",
"pthread_attr_setstacksize", "pthread_barrier_destroy", "pthread_barrier_init",
"pthread_barrier_wait", "pthread_barrierattr_destroy",
"pthread_barrierattr_getpshared", "pthread_barrierattr_init",
"pthread_barrierattr_setpshared", "pthread_cancel", "pthread_cleanup_pop",
"pthread_cleanup_push", "pthread_cond_broadcast", "pthread_cond_destroy",
"pthread_cond_init", "pthread_cond_signal", "pthread_cond_timedwait",
"pthread_cond_wait", "pthread_condattr_destroy", "pthread_condattr_getclock",
"pthread_condattr_getpshared", "pthread_condattr_init",
"pthread_condattr_setclock", "pthread_condattr_setpshared", "pthread_create",
"pthread_detach", "pthread_equal", "pthread_exit", "pthread_getconcurrency",
"pthread_getcpuclockid", "pthread_getschedparam", "pthread_getspecific",
"pthread_join", "pthread_key_create", "pthread_key_delete", "pthread_kill",
"pthread_mutex_destroy", "pthread_mutex_getprioceiling", "pthread_mutex_init",
"pthread_mutex_lock", "pthread_mutex_setprioceiling",
"pthread_mutex_timedlock", "pthread_mutex_trylock", "pthread_mutex_unlock",
"pthread_mutexattr_destroy", "pthread_mutexattr_getprioceiling",
"pthread_mutexattr_getprotocol", "pthread_mutexattr_getpshared",
"pthread_mutexattr_gettype", "pthread_mutexattr_init",
"pthread_mutexattr_setprioceiling", "pthread_mutexattr_setprotocol",
"pthread_mutexattr_setpshared", "pthread_mutexattr_settype", "pthread_once",
"pthread_rwlock_destroy", "pthread_rwlock_init", "pthread_rwlock_rdlock",
"pthread_rwlock_timedrdlock", "pthread_rwlock_timedwrlock",
"pthread_rwlock_tryrdlock", "pthread_rwlock_trywrlock",
"pthread_rwlock_unlock", "pthread_rwlock_wrlock", "pthread_rwlockattr_destroy",
"pthread_rwlockattr_getpshared", "pthread_rwlockattr_init",
"pthread_rwlockattr_setpshared", "pthread_self", "pthread_setcancelstate",
"pthread_setcanceltype", "pthread_setconcurrency", "pthread_setschedparam",
"pthread_setschedprio", "pthread_setspecific", "pthread_sigmask",
"pthread_spin_destroy", "pthread_spin_init", "pthread_spin_lock",
"pthread_spin_trylock", "pthread_spin_unlock", "pthread_testcancel",
"pthread_setaffinity_np"
]
def get_parser():
ap = argparse.ArgumentParser(description="cflow2dot: generate call graph from C source code")
ap.add_argument("-e", "--exclude", metavar="symbols",
help="exclude these symbols (comma separated values) from output")
ap.add_argument("-m", "--main", metavar="NAME",
help="Assume main function to be called NAME")
ap.add_argument("-r", "--rank", default="LR", choices=["LR", "same"],
help="if rank is \"LR\", graph is left to right. If rank is \"same\", graph is top to bottom. Default value is \"LR\".")
ap.add_argument("-v", "--verbose", action="store_true",
help="increase verbosity level")
ap.add_argument("--no", metavar="NAME", action="append",
help="exclude NAME symbol set (configured in ~/.cflowdotrc) from output")
ap.add_argument("--no-pthreadlib", action="store_true",
help="exclude pthread lib symbols from output")
ap.add_argument("--no-stdlib", action="store_true",
help="exclude C stdlib symbols from output")
ap.add_argument("cflow_args", nargs=argparse.REMAINDER,
help="arguments that are passed to cflow")
return ap
def call_cflow(opts):
args = opts.cflow_args
args.insert(0, cflow_path)
args.insert(1, "-l")
if opts.main:
args.insert(1, "-m")
args.insert(2, opts.main)
if opts.verbose:
print "calling cflow with args: ", args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if stderr and not stdout:
exit(stderr)
return stdout
def build_excludes(opts):
res = {}
if opts.exclude:
exclude_symbols = opts.exclude.split(",")
for v in exclude_symbols:
res[v] = True
if opts.no_stdlib:
for v in stdlib:
res[v] = True
if opts.no_pthreadlib:
for v in pthreadlib:
res[v] = True
if opts.no:
rcfile = os.path.expanduser("~") + "/.cflow2dotrc"
print(rcfile)
if not os.path.isfile(rcfile):
print("no ~/.cflow2dotrc file found, --no argument is skipped")
return res
else:
fp = open(rcfile)
rcdata = json.load(fp)
for exclude_set in opts.no:
if rcdata.get(exclude_set):
for func_name in rcdata[exclude_set]:
res[func_name] = True
else:
print("no key \"" + exclude_set + "\" specified in " + rcfile)
fp.close()
return res
def get_output(opts, res):
output = []
skip = False
exclude_index = -1
lines = res.split('\n')
verbose = opts.verbose
exclude = build_excludes(opts)
for line in lines:
if line == '':
continue
line = re.sub("\(.*$", "", line)
line = re.sub("^\{\s*", "", line)
line = re.sub("\}\s*", "\t", line)
parts = line.split("\t")
# indent level
n = parts[0]
# function name of callee
f = parts[1]
index[n] = f
# test if callee is in exclude list
if skip:
# exclude all sub function calls from the excluded function. If we
# get another callee at the same indent level, then stop skipping
if int(n) > int(exclude_index):
if verbose:
print("exclude sub function: " + f)
continue
else:
skip = False
exclude_index = -1
if f in exclude:
skip = True
exclude_index = n
if verbose:
print("exclude function: " + f)
continue
if n != '0':
s = "%s->%s" % (index[str(int(n) - 1)], f)
if s not in count:
output.append("node [color=\"{0}\" shape={1}];edge [color=\"{2}\"];\n{3}\n".format(color[int(n) % shape_len], shape[int(n) % shape_len], color[int(n) % shape_len], s))
count[s] = True
else:
output.append("%s [shape=box];\n" % f)
output.insert(0, "digraph G {\nnode [peripheries=2 style=\"filled,rounded\" fontname=\"Vera Sans YuanTi Mono\" color=\"%s\"];\nrankdir=%s;\nlabel=\"%s\"\n" % (color[0], opts.rank, opts.cflow_args[2]))
output.append("}\n")
return output
def write_output(output):
f = open(pref + ".dot", "w")
f.write(''.join(output))
f.close()
print("dot output to %s.dot" % pref)
if os.path.isfile(dot_path):
for ext in exts:
system("dot -T%s %s.dot -o %s.%s" % (ext, pref, pref, ext))
print("%s output to %s.%s" % (ext, pref, ext))
else:
print("'dot(GraphViz)' not installed.")
if __name__ == '__main__':
ap = get_parser()
opts = ap.parse_args()
if not os.path.isfile(cflow_path):
exit('cflow not found on: %s' % cflow_path)
res = call_cflow(opts)
output = get_output(opts, res)
write_output(output)
| mit | -4,698,164,425,426,742,000 | 39.686441 | 204 | 0.613622 | false | 3.215673 | false | false | false |
kokosowy/vuadek | vuadek.py | 1 | 1180 | #!/usr/bin/python3.4
import sys
import os
import subprocess
zm_home = os.path.expanduser("~")
zm_pth_workdir = zm_home+"/.vuadek/"
if not os.path.exists(zm_pth_workdir):
os.makedirs(zm_pth_workdir)
zm_fl_remains = zm_pth_workdir+"remains"
pathname = os.path.dirname(sys.argv[1])
if not os.path.isfile(zm_fl_remains):
print('nie istnieje')
scan_result = subprocess.Popen(["uade123", "--scan", os.path.abspath(pathname)],stdout=subprocess.PIPE)
with open(zm_fl_remains, 'w') as f:
for line in scan_result.stdout:
f.write(line.decode('utf-8'))
f.closed
print('istnieje')
with open(zm_fl_remains, 'r') as f:
zm_input = [line.rstrip('\n') for line in f]
for item in zm_input:
head, tail = os.path.split(item)
subprocess.call(["uade123", "--detect-format-by-content", "-f",zm_pth_workdir+tail+'.wav', "--filter=A1200", "--normalise", "--speed-hack", "-v", "--headphones", item])
subprocess.call(["lame", "--verbose", "--preset", "standard", zm_pth_workdir+tail+'.wav', head+'/'+tail+'.mp3'])
subprocess.call(["rm", zm_pth_workdir+tail+'.wav'])
f.closed
#call(["lame", "--verbose", "--preset", "standard", zm_output, zm_mp3])
#call(["rm", zm_output])
| gpl-2.0 | -4,554,903,689,397,167,600 | 29.25641 | 170 | 0.65678 | false | 2.593407 | false | false | false |
teamosceola/bitbake | lib/bb/ui/knotty.py | 1 | 12691 | #
# BitBake (No)TTY UI Implementation
#
# Handling output to TTYs or files (no TTY)
#
# Copyright (C) 2006-2007 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import division
import os
import sys
import xmlrpclib
import logging
import progressbar
import bb.msg
from bb.ui import uihelper
logger = logging.getLogger("BitBake")
interactive = sys.stdout.isatty()
class BBProgress(progressbar.ProgressBar):
def __init__(self, msg, maxval):
self.msg = msg
widgets = [progressbar.Percentage(), ' ', progressbar.Bar(), ' ',
progressbar.ETA()]
progressbar.ProgressBar.__init__(self, maxval, [self.msg + ": "] + widgets)
class NonInteractiveProgress(object):
fobj = sys.stdout
def __init__(self, msg, maxval):
self.msg = msg
self.maxval = maxval
def start(self):
self.fobj.write("%s..." % self.msg)
self.fobj.flush()
return self
def update(self, value):
pass
def finish(self):
self.fobj.write("done.\n")
self.fobj.flush()
def new_progress(msg, maxval):
if interactive:
return BBProgress(msg, maxval)
else:
return NonInteractiveProgress(msg, maxval)
def pluralise(singular, plural, qty):
if(qty == 1):
return singular % qty
else:
return plural % qty
def main(server, eventHandler):
# Get values of variables which control our output
includelogs = server.runCommand(["getVariable", "BBINCLUDELOGS"])
loglines = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"])
consolelogfile = server.runCommand(["getVariable", "BB_CONSOLELOG"])
helper = uihelper.BBUIHelper()
console = logging.StreamHandler(sys.stdout)
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
bb.msg.addDefaultlogFilter(console)
console.setFormatter(format)
logger.addHandler(console)
if consolelogfile:
consolelog = logging.FileHandler(consolelogfile)
bb.msg.addDefaultlogFilter(consolelog)
consolelog.setFormatter(format)
logger.addHandler(consolelog)
try:
cmdline = server.runCommand(["getCmdLineAction"])
if not cmdline:
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
return 1
elif not cmdline['action']:
print(cmdline['msg'])
return 1
ret = server.runCommand(cmdline['action'])
if ret != True:
print("Couldn't get default commandline! %s" % ret)
return 1
except xmlrpclib.Fault as x:
print("XMLRPC Fault getting commandline:\n %s" % x)
return 1
parseprogress = None
cacheprogress = None
shutdown = 0
interrupted = False
return_value = 0
errors = 0
warnings = 0
taskfailures = []
while True:
try:
event = eventHandler.waitEvent(0.25)
if event is None:
if shutdown > 1:
break
continue
helper.eventHandler(event)
if isinstance(event, bb.runqueue.runQueueExitWait):
if not shutdown:
shutdown = 1
if shutdown and helper.needUpdate:
activetasks, failedtasks = helper.getTasks()
if activetasks:
print("Waiting for %s active tasks to finish:" % len(activetasks))
for tasknum, task in enumerate(activetasks):
print("%s: %s (pid %s)" % (tasknum, activetasks[task]["title"], task))
if isinstance(event, logging.LogRecord):
if event.levelno >= format.ERROR:
errors = errors + 1
return_value = 1
elif event.levelno == format.WARNING:
warnings = warnings + 1
# For "normal" logging conditions, don't show note logs from tasks
# but do show them if the user has changed the default log level to
# include verbose/debug messages
if event.taskpid != 0 and event.levelno <= format.NOTE:
continue
logger.handle(event)
continue
if isinstance(event, bb.build.TaskFailed):
return_value = 1
logfile = event.logfile
if logfile and os.path.exists(logfile):
print("ERROR: Logfile of failure stored in: %s" % logfile)
if includelogs and not event.errprinted:
print("Log data follows:")
f = open(logfile, "r")
lines = []
while True:
l = f.readline()
if l == '':
break
l = l.rstrip()
if loglines:
lines.append(' | %s' % l)
if len(lines) > int(loglines):
lines.pop(0)
else:
print('| %s' % l)
f.close()
if lines:
for line in lines:
print(line)
if isinstance(event, bb.build.TaskBase):
logger.info(event._message)
continue
if isinstance(event, bb.event.ParseStarted):
if event.total == 0:
continue
parseprogress = new_progress("Parsing recipes", event.total).start()
continue
if isinstance(event, bb.event.ParseProgress):
parseprogress.update(event.current)
continue
if isinstance(event, bb.event.ParseCompleted):
if not parseprogress:
continue
parseprogress.finish()
print(("Parsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors."
% ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)))
continue
if isinstance(event, bb.event.CacheLoadStarted):
cacheprogress = new_progress("Loading cache", event.total).start()
continue
if isinstance(event, bb.event.CacheLoadProgress):
cacheprogress.update(event.current)
continue
if isinstance(event, bb.event.CacheLoadCompleted):
cacheprogress.finish()
print("Loaded %d entries from dependency cache." % event.num_entries)
continue
if isinstance(event, bb.command.CommandFailed):
return_value = event.exitcode
errors = errors + 1
logger.error("Command execution failed: %s", event.error)
shutdown = 2
continue
if isinstance(event, bb.command.CommandExit):
if not return_value:
return_value = event.exitcode
continue
if isinstance(event, (bb.command.CommandCompleted, bb.cooker.CookerExit)):
shutdown = 2
continue
if isinstance(event, bb.event.MultipleProviders):
logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "",
event._item,
", ".join(event._candidates))
logger.info("consider defining a PREFERRED_PROVIDER entry to match %s", event._item)
continue
if isinstance(event, bb.event.NoProvider):
return_value = 1
errors = errors + 1
if event._runtime:
r = "R"
else:
r = ""
if event._dependees:
logger.error("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)", r, event._item, ", ".join(event._dependees), r)
else:
logger.error("Nothing %sPROVIDES '%s'", r, event._item)
if event._reasons:
for reason in event._reasons:
logger.error("%s", reason)
continue
if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
logger.info("Running setscene task %d of %d (%s)" % (event.stats.completed + event.stats.active + event.stats.failed + 1, event.stats.total, event.taskstring))
continue
if isinstance(event, bb.runqueue.runQueueTaskStarted):
if event.noexec:
tasktype = 'noexec task'
else:
tasktype = 'task'
logger.info("Running %s %s of %s (ID: %s, %s)",
tasktype,
event.stats.completed + event.stats.active +
event.stats.failed + 1,
event.stats.total, event.taskid, event.taskstring)
continue
if isinstance(event, bb.runqueue.runQueueTaskFailed):
taskfailures.append(event.taskstring)
logger.error("Task %s (%s) failed with exit code '%s'",
event.taskid, event.taskstring, event.exitcode)
continue
if isinstance(event, bb.runqueue.sceneQueueTaskFailed):
logger.warn("Setscene task %s (%s) failed with exit code '%s' - real task will be run instead",
event.taskid, event.taskstring, event.exitcode)
continue
# ignore
if isinstance(event, (bb.event.BuildBase,
bb.event.StampUpdate,
bb.event.ConfigParsed,
bb.event.RecipeParsed,
bb.event.RecipePreFinalise,
bb.runqueue.runQueueEvent,
bb.runqueue.runQueueExitWait,
bb.event.OperationStarted,
bb.event.OperationCompleted,
bb.event.OperationProgress)):
continue
logger.error("Unknown event: %s", event)
except EnvironmentError as ioerror:
# ignore interrupted io
if ioerror.args[0] == 4:
pass
except KeyboardInterrupt:
if shutdown == 1:
print("\nSecond Keyboard Interrupt, stopping...\n")
server.runCommand(["stateStop"])
if shutdown == 0:
interrupted = True
print("\nKeyboard Interrupt, closing down...\n")
server.runCommand(["stateShutdown"])
shutdown = shutdown + 1
pass
summary = ""
if taskfailures:
summary += pluralise("\nSummary: %s task failed:",
"\nSummary: %s tasks failed:", len(taskfailures))
for failure in taskfailures:
summary += "\n %s" % failure
if warnings:
summary += pluralise("\nSummary: There was %s WARNING message shown.",
"\nSummary: There were %s WARNING messages shown.", warnings)
if return_value:
summary += pluralise("\nSummary: There was %s ERROR message shown, returning a non-zero exit code.",
"\nSummary: There were %s ERROR messages shown, returning a non-zero exit code.", errors)
if summary:
print(summary)
if interrupted:
print("Execution was interrupted, returning a non-zero exit code.")
if return_value == 0:
return_value = 1
return return_value
| gpl-2.0 | -854,224,645,206,136,700 | 38.908805 | 175 | 0.532897 | false | 4.772847 | false | false | false |
MaxLinCode/tardy-HackIllinois-2017 | alexa/lambda_function.py | 1 | 6803 | """
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Built-in Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
from twilio.rest import TwilioRestClient
from loadData import rawToTime, getNumber
from config import *
accountSid = 'ACcf54ef49063aaa784c99aec82d7f16c2'
authToken = '31f817a48ee7cd461c07c57490eac6ce'
fromNumber = '19163183442'
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
session_attributes = {}
card_title = "Welcome"
speech_output = "Hello, welcome to the Tardy skill."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "You can ask me to send a message to your friends."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def sarah_intent_handler(intent):
card_title = "Sarah"
speech_output = "Sarah is the best"
return build_response(None, build_speechlet_response(
card_title, speech_output, None, False))
def formatMessage(userName, targetName, time, place):
return "Hello %s, %s would like to meet at %s at %s." % (targetName.title(), userName.title(), place.title(), time)
def getInfo(userId, target, time, place):
d = {}
time = rawToTime(time)
userName = ""
for x in target:
arr = getNumber(userId, target)
if userName == "":
username = arr[0]
d[arr[1]] = [arr[2], formatMessage(userName, a[1], time, place)]
for key in d:
sendText(d[key][0], d[key][1])
def twilio_intent_handler(intent):
card_title = "Twilio"
#print(intent['slots'])
target = intent["slots"]["nameSlot"]["value"]
time = intent["slots"]["timeSlot"]["value"]
place = intent["slots"]["placeSlot"]["value"]
#userID = kijZjJJ5ozPZxfeHYfjh3zd3TUh1
getInfo('kijZjJJ5ozPZxfeHYfjh3zd3TUh1', target, time, place)
#cellNumber = ""
#messageText = ""
#slots = intent['slots']
#cellNumber = slots['numberSlot']['value']
#messageText = slots['msgSlot']['value']
# call the method to send text
speech_output = "Message sent."
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(None, build_speechlet_response(
card_title, speech_output, None, False))
#number,message
def sendText(to_num, msg_text):
try:
client = TwilioRestClient(accountSid, authToken)
client.messages.create(
to=to_num,
from_=from_num,
body=msg_text
)
return True
except Exception as e:
print("Failed to send message: ")
print(e.code)
return False
def help_intent_handler(intent):
card_title = "Help"
speech_output = "Ask me to send someone a text."
return build_response(None, build_speechlet_response(
card_title, speech_output, None, False))
def misunderstood_handler(intent):
card_title = "Misunderstood"
speech_output = "Sorry, please try again."
return build_response(None, build_speechlet_response(
card_title, speech_output, None, True))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying our Tardy skill. " \
"Have a great time at Hack Illinois! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response(None, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Events ------------------
def on_launch(launch_request):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "SarahIntent":
return sarah_intent_handler(intent)
elif intent_name == "TwilioIntent":
return twilio_intent_handler(intent)
elif intent_name == "HelpIntent":
return help_intent_handler(intent)
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
return misunderstood_handler(intent)
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
session_attributes = {}
#applicationId = event['session']['application']['applicationId']
#if applicationId != TWILIO_APPLICATION_ID:
# should_end_session = True
# bad_request_output = "Bad Request"
# print("Bad ApplicationId Received: "+applicationId)
# return build_response(session_attributes, build_speechlet_response("Twilio", bad_request_output, None, should_end_session))
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request']) | mit | 3,407,677,043,786,212,400 | 32.850746 | 132 | 0.641482 | false | 3.68327 | false | false | false |
quru/qis | src/imageserver/errors.py | 1 | 2593 | #
# Quru Image Server
#
# Document: errors.py
# Date started: 31 Mar 2011
# By: Matt Fozard
# Purpose: Internal errors and exceptions
# Requires:
# Copyright: Quru Ltd (www.quru.com)
# Licence:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Last Changed: $Date$ $Rev$ by $Author$
#
# Notable modifications:
# Date By Details
# ========= ==== ============================================================
#
class ImageError(ValueError):
"""
An error resulting from an invalid or unsupported imaging operation.
"""
pass
class AlreadyExistsError(ValueError):
"""
An error resulting from a duplicate value or an attempt to create an
object that already exists.
"""
pass
class DoesNotExistError(ValueError):
"""
An error resulting from an attempt to use an object that does not exist.
"""
pass
class SecurityError(Exception):
"""
An error resulting from some unauthorised action.
"""
pass
class StartupError(Exception):
"""
An error that should prevent server startup.
"""
pass
class AuthenticationError(Exception):
"""
An error resulting from a failure to authenticate.
"""
pass
class DBError(Exception):
"""
An error resulting from a database operation.
Adds an optional extra 'sql' attribute.
"""
def __init__(self, message, sql=None):
Exception.__init__(self, message)
self.sql = sql if sql is not None else ''
class DBDataError(DBError):
"""
An error resulting from incorrect database data.
"""
pass
class ParameterError(ValueError):
"""
An error resulting from an invalid parameter value.
"""
pass
class TimeoutError(RuntimeError):
"""
An error resulting from an operation timeout.
"""
pass
class ServerTooBusyError(RuntimeError):
"""
Raised when the server is too busy to service a request.
"""
pass
| agpl-3.0 | 6,222,362,547,311,885,000 | 22.36036 | 79 | 0.649055 | false | 4.209416 | false | false | false |
lukas-bednar/python-rrmngmnt | rrmngmnt/ssh.py | 1 | 10209 | import os
import time
import socket
import paramiko
import contextlib
import subprocess
from rrmngmnt.executor import Executor
AUTHORIZED_KEYS = os.path.join("%s", ".ssh/authorized_keys")
KNOWN_HOSTS = os.path.join("%s", ".ssh/known_hosts")
ID_RSA_PUB = os.path.join("%s", ".ssh/id_rsa.pub")
ID_RSA_PRV = os.path.join("%s", ".ssh/id_rsa")
CONNECTIVITY_TIMEOUT = 600
CONNECTIVITY_SAMPLE_TIME = 20
class RemoteExecutor(Executor):
"""
Any resource which provides SSH service.
This class is meant to replace our current utilities.machine.LinuxMachine
classs. This allows you to lower access to communicate with ssh.
Like a live interaction, getting rid of True/False results, and
mixing stdout with stderr.
You can still use use 'run_cmd' method if you don't care.
But I would recommed you to work like this:
"""
TCP_TIMEOUT = 10.0
class LoggerAdapter(Executor.LoggerAdapter):
"""
Makes sure that all logs which are done via this class, has
appropriate prefix. [user@IP/password]
"""
def process(self, msg, kwargs):
return (
"[%s@%s/%s] %s" % (
self.extra['self'].user.name,
self.extra['self'].address,
self.extra['self'].user.password,
msg,
),
kwargs,
)
class Session(Executor.Session):
"""
Represents active ssh connection
"""
def __init__(self, executor, timeout=None, use_pkey=False):
super(RemoteExecutor.Session, self).__init__(executor)
if timeout is None:
timeout = RemoteExecutor.TCP_TIMEOUT
self._timeout = timeout
self._ssh = paramiko.SSHClient()
self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if use_pkey:
self.pkey = paramiko.RSAKey.from_private_key_file(
ID_RSA_PRV % os.path.expanduser('~')
)
self._executor.user.password = None
else:
self.pkey = None
def __exit__(self, type_, value, tb):
if type_ is socket.timeout:
self._update_timeout_exception(value)
try:
self.close()
except Exception as ex:
if type_ is None:
raise
else:
self._executor.logger.debug(
"Can not close ssh session %s", ex,
)
def open(self):
self._ssh.get_host_keys().clear()
try:
self._ssh.connect(
self._executor.address,
username=self._executor.user.name,
password=self._executor.user.password,
timeout=self._timeout,
pkey=self.pkey
)
except (socket.gaierror, socket.herror) as ex:
args = list(ex.args)
message = "%s: %s" % (self._executor.address, args[1])
args[1] = message
ex.strerror = message
ex.args = tuple(args)
raise
except socket.timeout as ex:
self._update_timeout_exception(ex)
raise
def close(self):
self._ssh.close()
def _update_timeout_exception(self, ex, timeout=None):
if getattr(ex, '_updated', False):
return
if timeout is None:
timeout = self._timeout
message = "%s: timeout(%s)" % (
self._executor.address, timeout
)
ex.args = (message,)
ex._updated = True
def command(self, cmd):
return RemoteExecutor.Command(cmd, self)
def run_cmd(self, cmd, input_=None, timeout=None):
cmd = self.command(cmd)
return cmd.run(input_, timeout)
@contextlib.contextmanager
def open_file(self, path, mode='r', bufsize=-1):
with contextlib.closing(self._ssh.open_sftp()) as sftp:
with contextlib.closing(
sftp.file(
path,
mode,
bufsize,
)
) as fh:
yield fh
class Command(Executor.Command):
"""
This class holds all data related to command execution.
- the command itself
- stdout/stderr streams
- out/err string which were produced by command
- returncode the exit status of command
"""
def __init__(self, cmd, session):
super(RemoteExecutor.Command, self).__init__(
subprocess.list2cmdline(cmd),
session,
)
self._in = None
self._out = None
self._err = None
def get_rc(self, wait=False):
if self._rc is None:
if self._out is not None:
if self._out.channel.exit_status_ready() or wait:
self._rc = self._out.channel.recv_exit_status()
return self._rc
@contextlib.contextmanager
def execute(self, bufsize=-1, timeout=None, get_pty=False):
"""
This method allows you to work directly with streams.
with cmd.execute() as in_, out, err:
# where in_, out and err are file-like objects
# where you can read data from these
"""
try:
self.logger.debug("Executing: %s", self.cmd)
self._in, self._out, self._err = self._ss._ssh.exec_command(
self.cmd,
bufsize=bufsize,
timeout=timeout,
get_pty=get_pty,
)
yield self._in, self._out, self._err
self.get_rc(True)
except socket.timeout as ex:
self._ss._update_timeout_exception(ex, timeout)
raise
finally:
if self._in is not None:
self._in.close()
if self._out is not None:
self._out.close()
if self._err is not None:
self._err.close()
self.logger.debug("Results of command: %s", self.cmd)
self.logger.debug(" OUT: %s", self.out)
self.logger.debug(" ERR: %s", self.err)
self.logger.debug(" RC: %s", self.rc)
def run(self, input_, timeout=None, get_pty=False):
with self.execute(
timeout=timeout, get_pty=get_pty
) as (in_, out, err):
if input_:
in_.write(input_)
in_.close()
self.out = out.read()
self.err = err.read()
return self.rc, self.out, self.err
def __init__(self, user, address, use_pkey=False):
"""
:param user: user
:type user: instance of User
:param address: ip / hostname
:type address: str
:param use_pkey: use ssh private key in the connection
:type use_pkey: bool
"""
super(RemoteExecutor, self).__init__(user)
self.address = address
self.use_pkey = use_pkey
def session(self, timeout=None):
"""
:param timeout: tcp timeout
:type timeout: float
:return: the session
:rtype: instance of RemoteExecutor.Session
"""
return RemoteExecutor.Session(self, timeout, self.use_pkey)
def run_cmd(self, cmd, input_=None, tcp_timeout=None, io_timeout=None):
"""
:param cmd: command
:type cmd: list
:param input_: input data
:type input_: str
:param tcp_timeout: tcp timeout
:type tcp_timeout: float
:param io_timeout: timeout for data operation (read/write)
:type io_timeout: float
:return: rc, out, err
:rtype: tuple (int, str, str)
"""
with self.session(tcp_timeout) as session:
return session.run_cmd(cmd, input_, io_timeout)
def is_connective(self, tcp_timeout=20.0):
"""
Check if address is connective via ssh
:param tcp_timeout: time to wait for response
:type tcp_timeout: float
:return: True if address is connective, False otherwise
:rtype: bool
"""
try:
self.logger.info(
"Check if address is connective via ssh in given timeout %s",
tcp_timeout
)
self.run_cmd(['true'], tcp_timeout=tcp_timeout)
return True
except (socket.timeout, socket.error) as e:
self.logger.debug("Socket error: %s", e)
except Exception as e:
self.logger.debug("SSH exception: %s", e)
return False
def wait_for_connectivity_state(
self, positive,
timeout=CONNECTIVITY_TIMEOUT,
sample_time=CONNECTIVITY_SAMPLE_TIME
):
"""
Wait until address will be connective or not via ssh
:param positive: wait for the positive or negative connective state
:type positive: bool
:param timeout: wait timeout
:type timeout: int
:param sample_time: sample the ssh each sample_time seconds
:type sample_time: int
:return: True, if positive and ssh is connective or
negative and ssh does not connective, otherwise False
:rtype: bool
"""
reachable = "unreachable" if positive else "reachable"
timeout_counter = 0
while self.is_connective() != positive:
if timeout_counter > timeout:
self.logger.error(
"Address %s is still %s via ssh, after %s seconds",
self.address, reachable, timeout
)
return False
time.sleep(sample_time)
timeout_counter += sample_time
return True
| gpl-2.0 | -7,273,027,268,052,574,000 | 33.962329 | 77 | 0.515526 | false | 4.467834 | false | false | false |
lipro-yocto/git-repo | subcmds/cherry_pick.py | 1 | 3421 | # Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from command import Command
from git_command import GitCommand
CHANGE_ID_RE = re.compile(r'^\s*Change-Id: I([0-9a-f]{40})\s*$')
class CherryPick(Command):
common = True
helpSummary = "Cherry-pick a change."
helpUsage = """
%prog <sha1>
"""
helpDescription = """
'%prog' cherry-picks a change from one branch to another.
The change id will be updated, and a reference to the old
change id will be added.
"""
def _Options(self, p):
pass
def ValidateOptions(self, opt, args):
if len(args) != 1:
self.Usage()
def Execute(self, opt, args):
reference = args[0]
p = GitCommand(None,
['rev-parse', '--verify', reference],
capture_stdout=True,
capture_stderr=True)
if p.Wait() != 0:
print(p.stderr, file=sys.stderr)
sys.exit(1)
sha1 = p.stdout.strip()
p = GitCommand(None, ['cat-file', 'commit', sha1], capture_stdout=True)
if p.Wait() != 0:
print("error: Failed to retrieve old commit message", file=sys.stderr)
sys.exit(1)
old_msg = self._StripHeader(p.stdout)
p = GitCommand(None,
['cherry-pick', sha1],
capture_stdout=True,
capture_stderr=True)
status = p.Wait()
print(p.stdout, file=sys.stdout)
print(p.stderr, file=sys.stderr)
if status == 0:
# The cherry-pick was applied correctly. We just need to edit the
# commit message.
new_msg = self._Reformat(old_msg, sha1)
p = GitCommand(None, ['commit', '--amend', '-F', '-'],
provide_stdin=True,
capture_stdout=True,
capture_stderr=True)
p.stdin.write(new_msg)
p.stdin.close()
if p.Wait() != 0:
print("error: Failed to update commit message", file=sys.stderr)
sys.exit(1)
else:
print('NOTE: When committing (please see above) and editing the commit '
'message, please remove the old Change-Id-line and add:')
print(self._GetReference(sha1), file=sys.stderr)
print(file=sys.stderr)
def _IsChangeId(self, line):
return CHANGE_ID_RE.match(line)
def _GetReference(self, sha1):
return "(cherry picked from commit %s)" % sha1
def _StripHeader(self, commit_msg):
lines = commit_msg.splitlines()
return "\n".join(lines[lines.index("") + 1:])
def _Reformat(self, old_msg, sha1):
new_msg = []
for line in old_msg.splitlines():
if not self._IsChangeId(line):
new_msg.append(line)
# Add a blank line between the message and the change id/reference
try:
if new_msg[-1].strip() != "":
new_msg.append("")
except IndexError:
pass
new_msg.append(self._GetReference(sha1))
return "\n".join(new_msg)
| apache-2.0 | 5,867,796,251,983,264,000 | 28.747826 | 78 | 0.621163 | false | 3.597266 | false | false | false |
CloudBoltSoftware/cloudbolt-forge | ui_extensions/multilevelapprovals/views.py | 1 | 14185 | from urllib.parse import urlparse
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.template import loader
from django.urls import reverse
from django.utils.translation import ugettext as _, ungettext
from accounts.models import UserProfile
from accounts.templatetags import account_tags
from cbhooks.exceptions import HookFailureException
from common.views import clear_cached_submenu
from costs.utils import (
is_rates_feature_enabled,
)
from cscv.models import CITConf, can_order_be_tested, CITTest
from orders.forms import DenyOrderForm
from orders.models import Order
from orders.templatetags.order_tags import order_pictograph, order_status_icon
from quota.exceptions import QuotaError
from servicecatalog.models import ServiceBlueprint
from utilities.decorators import json_view, dialog_view
from utilities.exceptions import (
InvalidCartException, InvalidConfigurationException,
CloudBoltException
)
from utilities.cb_http import django_sort_cols_from_datatable_request
from utilities.logger import ThreadLogger
from utilities.templatetags.helper_tags import link_or_label, how_long_ago
from utilities.views import access_denied
from .models import CustomOrder
from extensions.views import admin_extension
#@admin_extension(title='Multilevel Approvals Extension')
logger = ThreadLogger(__name__)
# Intentionally not protected at view level
@admin_extension(title='Multilevel Approvals Extension')
def order_list(request, message=""):
profile = request.get_user_profile()
# NOTE: order info will be sent via AJAX
return render(request, 'multilevelapprovals/templates/list.html', {
'pagetitle': _("Order List"),
'message': message,
'profile': profile,
'enable_rates_feature': is_rates_feature_enabled(),
})
# Intentionally not protected at view level
@json_view
def order_list_json(request, extra_context={}):
profile = request.get_user_profile()
# List of orders the user has permissions to view:
orders = Order.objects_for_profile(profile)
num_total_records = orders.count()
search = request.GET.get('sSearch')
if search:
orders = orders.search(search)
num_filtered_records = orders.count()
# Sorting: client passes column # which must be translated to model field
sort_cols = django_sort_cols_from_datatable_request(request, [
'id',
None,
'status',
'group',
# order by first & last which is how it's presented
['owner__user__first_name', 'owner__user__last_name'],
'create_date',
None, # Actions column is not sortable
])
orders = orders.order_by(*sort_cols)
# Pagination:
start = int(request.GET.get('iDisplayStart', None))
if start is not None:
end = int(start) + int(request.GET.get('iDisplayLength', 0))
orders = orders[start:end]
# Cache links to objects (since generating each requires a database hit):
_group_link_or_label_cache = {}
_owner_link_or_label_cache = {}
profiles_visible_to_this_profile = UserProfile.objects_for_profile(profile)
def cached_group_link_or_label(group):
try:
return _group_link_or_label_cache[group]
except KeyError:
rendered = link_or_label(group, profile)
_group_link_or_label_cache[group] = rendered
return rendered
def cached_owner_link_or_label(owner):
"""
Ensure that owner avatar and link-or-label is only constructed once
per page view.
"""
if not owner or not owner.user:
return ""
try:
rendered = _owner_link_or_label_cache[owner]
except KeyError:
rendered = account_tags.rich_gravatar(
owner,
size=20,
link=(owner in profiles_visible_to_this_profile),
full_name=True
)
_owner_link_or_label_cache[owner] = rendered
return rendered
actions_template = loader.get_template('multilevelapprovals/templates/actions.html')
rows = []
for order in orders:
# Render the actions column value as HTML:
actions_html = actions_template.render(context={
'order': order,
'profile': profile,
'is_owner': order.owner == profile,
'can_approve': profile.has_permission('order.approve', order),
'can_cancel': order.can_cancel(profile),
'can_save_to_catalog': order.can_save_to_catalog(profile),
}, request=request)
#approval_str = "" #SRM
#for dict in is_multilevel_approval(order):
# for key in dict.keys():
# strng = UserProfile.objects.get(id=dict[key]).user.username
# if not approval_str:
# approval_str = key + ":", strng
# else:
# approval_str += "<BR>" + key + ":", strng
row = [
# We know that the user has access to view this order already,
# so show URL instead of link_or_label:
'<a href="%s">%s</a>' % (order.get_absolute_url(),
order.nickname()),
order_pictograph(order),
order_status_icon(order),
cached_group_link_or_label(order.group),
cached_owner_link_or_label(order.owner),
how_long_ago(order.create_date),
actions_html,
]
rows.append(row)
return {
# unaltered from client-side value, but cast to int to avoid XSS
# http://datatables.net/usage/server-side
"sEcho": int(request.GET.get('sEcho', 1)),
"iTotalRecords": num_total_records,
"iTotalDisplayRecords": num_filtered_records,
"aaData": rows, # Orders for the current page only
}
def modify(request, order_id):
"""
POST requests from the order list and detail views go here.
"""
order = get_object_or_404(Order, pk=order_id)
profile = request.get_user_profile()
# action matches the button values in order_actions templatetag.
action = request.POST.get('action', [''])
logger.info(f'SRM: in modify: action == {action}')
if action in ['approve', 'deny']:
if not profile.has_permission('order.approve', order):
return access_denied(
request, _("You do not have permission to approve this item."))
msg = ""
redirect_url = request.META['HTTP_REFERER']
if action == 'submit':
if not profile.has_permission('order.submit', order):
return access_denied(
request, _("You do not have permission to submit this order."))
try:
order.submit()
msg += order.start_approval_process(request)
messages.info(request, msg)
except QuotaError as e: # could happen if order is auto-approved
messages.error(request, e)
except InvalidConfigurationException as e:
messages.error(request, e)
except HookFailureException as e:
messages.error(request, e)
redirect_url = reverse('order_detail', args=[order.id])
elif action == 'approve':
logger.info('SRM: in modify: action == approve (should work) -- b4 approve_my_grms')
logger.info(f'SRM: order = {order}')
logger.info(f'SRM: profile = {profile}')
if CustomOrder.is_multilevel_approval(order):
logger.info(f'SRM: is multilevel -- approving GRMs')
CustomOrder.approve_my_grms(order, profile)
if all(CustomOrder.is_multilevel_approval(order).values()):
logger.info(f'SRM: all values return true - can approve')
else:
logger.info(f'SRM: not all values return true - cant approve')
messages.info(request, "partial approval processed")
return HttpResponseRedirect(reverse('order_detail', args=[order.id]))
try:
jobs, extramsg = order.approve(profile)
if jobs:
# template tweaks the message based on where we are going next
redirect_parsed = urlparse(redirect_url)
msg = loader.render_to_string('orders/approved_msg.html', {
'order': order,
'autoapproved': False,
'num_jobs': len(jobs),
'extramsg': extramsg,
'request': request,
'redirect_url': redirect_parsed.path,
})
else:
msg = extramsg
messages.info(request, msg)
except QuotaError as e:
messages.error(request, e)
except CloudBoltException as e:
messages.warning(request, e)
except:
raise
elif action == 'cancel':
if not order.can_cancel(profile):
return access_denied(
request, _("You do not have permission to cancel this order."))
order.cancel()
if order.owner:
clear_cached_submenu(order.owner.user_id, 'orders')
msg = _("Order #{order_id} has been canceled.").format(order_id=order.id)
messages.info(request, msg)
elif action == 'clear':
order.group = None
order.blueprint = None
order.save()
for order_item in order.orderitem_set.all():
order_item.delete()
if order.owner:
clear_cached_submenu(order.owner.user_id, 'orders')
messages.success(request, _("Your current order has been cleared."))
elif action == 'remind':
logger.info(_("User requested order approval reminder for order {order_id}").format(order_id=order_id))
try:
msg = order.send_reminder(request)
logger.debug(msg)
messages.info(request, msg)
except InvalidConfigurationException as e:
messages.error(request, e)
elif action == 'duplicate':
# Global Viewers are a special case where objects_for_profile will
# return True since they can view all orders, but we don't want them to
# be able to do anything like duplicate it (unless they have additional
# permissions)
duplicable, reason = order.can_duplicate(profile)
if not duplicable:
if reason == 'permission':
return access_denied(
request, _("You do not have permission to duplicate this order."))
elif reason == 'group':
messages.error(request, _("Orders with no group cannot be duplicated."))
return HttpResponseRedirect(reverse('order_detail', args=[order.id]))
try:
profile = request.get_user_profile()
cart = profile.get_current_order()
cart = order.duplicate(cart)
items_duplicated = cart.items_duplicated
hostnames_updated = cart.hostnames_updated
msg = ungettext("Duplicated {num_items} order item under "
"<a href='{url}'>your current order</a>.",
"Duplicated {num_items} order items under "
"<a href='{url}'>your current order</a>.",
items_duplicated).format(num_items=items_duplicated,
url=cart.get_absolute_url())
if hostnames_updated:
uniq_msg = ungettext("{updated_count} order item was updated to "
"avoid creating identical hostnames.",
"{updated_count} order items were updated to "
"avoid creating identical hostnames.",
hostnames_updated).format(updated_count=hostnames_updated)
msg += uniq_msg
clear_cached_submenu(profile.user_id, 'orders')
messages.success(request, msg)
return HttpResponseRedirect(reverse('current_order'))
except InvalidCartException as e:
messages.error(request, e)
elif action == 'save_as_blueprint':
profile = request.get_user_profile()
if order.group and not profile.has_permission('blueprint.manage', order.group):
return access_denied(
request, _("You need to have blueprint management permission for "
"group '{group}' to create a blueprint from this order.").format(group=order.group))
bp = ServiceBlueprint.from_order(order)
clear_cached_submenu(profile.user_id, 'catalog')
messages.success(
request,
_("Successfully saved the <a href='{order_url}'>order</a> "
"as blueprint <a href='{blueprint_url}'>{blueprint_name}</a>").format(
order_url=order.get_absolute_url(),
blueprint_url=bp.get_absolute_url(),
blueprint_name=bp.name))
redirect_url = bp.get_absolute_url()
elif action == 'add_to_cit':
if can_order_be_tested(order):
cit_test = CITTest.objects.create(
name=order.name,
order=order,
cit_conf=CITConf.objects.first(),
expected_status=order.status,
)
messages.success(
request,
_('Created CIT test "{}". It will be automatically tested during '
'the text text run.'.format(link_or_label(cit_test, profile)))
)
else:
messages.error(request, "This order could not be added to CIT.")
return HttpResponseRedirect(redirect_url)
| apache-2.0 | -6,349,238,309,283,162,000 | 38.298295 | 111 | 0.582023 | false | 4.323377 | true | false | false |
LeandroRoberto/sapl | sapl/comissoes/views.py | 1 | 3353 |
from django.core.urlresolvers import reverse
from django.db.models import F
from django.views.generic import ListView
from sapl.crud.base import RP_DETAIL, RP_LIST, Crud, CrudAux, MasterDetailCrud
from sapl.materia.models import MateriaLegislativa, Tramitacao
from .models import (CargoComissao, Comissao, Composicao, Participacao,
Periodo, TipoComissao)
def pegar_url_composicao(pk):
participacao = Participacao.objects.get(id=pk)
comp_pk = participacao.composicao.pk
url = reverse('sapl.comissoes:composicao_detail', kwargs={'pk': comp_pk})
return url
CargoCrud = CrudAux.build(CargoComissao, 'cargo_comissao')
PeriodoComposicaoCrud = CrudAux.build(Periodo, 'periodo_composicao_comissao')
TipoComissaoCrud = CrudAux.build(
TipoComissao, 'tipo_comissao', list_field_names=[
'sigla', 'nome', 'natureza', 'dispositivo_regimental'])
class ParticipacaoCrud(MasterDetailCrud):
model = Participacao
parent_field = 'composicao__comissao'
public = [RP_DETAIL, ]
ListView = None
is_m2m = True
link_return_to_parent_field = True
class BaseMixin(MasterDetailCrud.BaseMixin):
list_field_names = ['composicao', 'parlamentar', 'cargo']
class ComposicaoCrud(MasterDetailCrud):
model = Composicao
parent_field = 'comissao'
model_set = 'participacao_set'
public = [RP_LIST, RP_DETAIL, ]
class ListView(MasterDetailCrud.ListView):
template_name = "comissoes/composicao_list.html"
paginate_by = None
def take_composicao_pk(self):
try:
return int(self.request.GET['pk'])
except:
return 0
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['composicao_pk'] = context['composicao_list'].last(
).pk if self.take_composicao_pk(
) == 0 else self.take_composicao_pk()
context['participacao_set'] = Participacao.objects.filter(
composicao__pk=context['composicao_pk']
).order_by('parlamentar')
return context
class ComissaoCrud(Crud):
model = Comissao
help_path = 'modulo_comissoes'
public = [RP_LIST, RP_DETAIL, ]
class BaseMixin(Crud.BaseMixin):
list_field_names = ['nome', 'sigla', 'tipo', 'data_criacao', 'ativa']
ordering = '-ativa', 'sigla'
class MateriasTramitacaoListView(ListView):
template_name = "comissoes/materias_em_tramitacao.html"
paginate_by = 10
def get_queryset(self):
# FIXME: Otimizar consulta
ts = Tramitacao.objects.order_by(
'materia', '-data_tramitacao', '-id').annotate(
comissao=F('unidade_tramitacao_destino__comissao')).distinct(
'materia').values_list('materia', 'comissao')
ts = list(filter(lambda x: x[1] == int(self.kwargs['pk']), ts))
ts = list(zip(*ts))
ts = ts[0] if ts else []
materias = MateriaLegislativa.objects.filter(
pk__in=ts).order_by('tipo', '-ano', '-numero')
return materias
def get_context_data(self, **kwargs):
context = super(
MateriasTramitacaoListView, self).get_context_data(**kwargs)
context['object'] = Comissao.objects.get(id=self.kwargs['pk'])
return context
| gpl-3.0 | 4,696,481,446,960,925,000 | 32.53 | 78 | 0.638831 | false | 3.012579 | false | false | false |
mdovgialo/steam-vr-wheel | steam_vr_wheel/pyvjoy/_wrapper.py | 1 | 2789 | import os
import sys
from ctypes import *
dll_filename = "vJoyInterface.dll"
dll_path = os.path.dirname(__file__) + os.sep + dll_filename
try:
_vj = cdll.LoadLibrary(dll_path)
except OSError:
sys.exit("Unable to load vJoy SDK DLL. Ensure that %s is present" % dll_filename)
def vJoyEnabled():
"""Returns True if vJoy is installed and enabled"""
result = _vj.vJoyEnabled()
if result == 0:
raise vJoyNotEnabledException()
else:
return True
def DriverMatch():
"""Check if the version of vJoyInterface.dll and the vJoy Driver match"""
result = _vj.DriverMatch()
if result == 0:
raise vJoyDriverMismatch()
else:
return True
def GetVJDStatus(rID):
"""Get the status of a given vJoy Device"""
return _vj.GetVJDStatus(rID)
def AcquireVJD(rID):
"""Attempt to acquire a vJoy Device"""
result = _vj.AcquireVJD(rID)
if result == 0:
#Check status
status = GetVJDStatus(rID)
if status != VJD_STAT_FREE:
raise vJoyFailedToAcquireException("Cannot acquire vJoy Device because it is not in VJD_STAT_FREE")
else:
raise vJoyFailedToAcquireException()
else:
return True
def RelinquishVJD(rID):
"""Relinquish control of a vJoy Device"""
result = _vj.RelinquishVJD(rID)
if result == 0:
raise vJoyFailedToRelinquishException()
else:
return True
def SetBtn(state,rID,buttonID):
"""Sets the state of vJoy Button to on or off. SetBtn(state,rID,buttonID)"""
result = _vj.SetBtn(state,rID,buttonID)
if result == 0:
raise vJoyButtonError()
else:
return True
def SetDiscPov(PovValue, rID, PovID):
"""Write Value to a given discrete POV defined in the specified VDJ"""
if PovValue < -1 or PovValue > 3:
raise vJoyInvalidPovValueException()
if PovID < 1 or PovID > 4:
raise vJoyInvalidPovIDException
return _vj.SetDiscPov(PovValue,rID,PovID)
def SetContPov(PovValue, rID, PovID):
"""Write Value to a given continuous POV defined in the specified VDJ"""
if PovValue < -1 or PovValue > 35999:
raise vJoyInvalidPovValueException()
if PovID < 1 or PovID > 4:
raise vJoyInvalidPovIDException
return _vj.SetContPov(PovValue,rID,PovID)
def SetBtn(state,rID,buttonID):
"""Sets the state of vJoy Button to on or off. SetBtn(state,rID,buttonID)"""
result = _vj.SetBtn(state,rID,buttonID)
if result == 0:
raise vJoyButtonError()
else:
return True
def ResetVJD(rID):
"""Reset all axes and buttons to default for specified vJoy Device"""
return _vj.ResetVJD(rID)
def ResetButtons(rID):
"""Reset all buttons to default for specified vJoy Device"""
return _vj.ResetButtons(rID)
def ResetPovs(rID):
"""Reset all POV hats to default for specified vJoy Device"""
return _vj.ResetButtons(rID)
| mit | 8,956,066,866,461,069,000 | 21.241667 | 102 | 0.688777 | false | 2.783433 | false | false | false |
millken/simple-rtmp-server | trunk/research/community/server.py | 1 | 4633 | #!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2013-2014 winlin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
"""
the community is a default demo server for srs
"""
import sys
# reload sys model to enable the getdefaultencoding method.
reload(sys)
# set the default encoding to utf-8
# using exec to set the encoding, to avoid error in IDE.
exec("sys.setdefaultencoding('utf-8')")
assert sys.getdefaultencoding().lower() == "utf-8"
import os, json, time, datetime, cherrypy, threading
# simple log functions.
def trace(msg):
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print "[%s][trace] %s"%(date, msg)
# enable crossdomain access for js-client
# define the following method:
# def OPTIONS(self, *args, **kwargs)
# enable_crossdomain()
# invoke this method to enable js to request crossdomain.
def enable_crossdomain():
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
cherrypy.response.headers["Access-Control-Allow-Methods"] = "GET, POST, HEAD, PUT, DELETE"
# generate allow headers for crossdomain.
allow_headers = ["Cache-Control", "X-Proxy-Authorization", "X-Requested-With", "Content-Type"]
cherrypy.response.headers["Access-Control-Allow-Headers"] = ",".join(allow_headers)
# error codes definition
class Error:
# ok, success, completed.
success = 0
# HTTP RESTful path.
class Root(object):
exposed = True
def __init__(self):
self.api = Api()
def GET(self):
enable_crossdomain();
return json.dumps({"code":Error.success, "urls":{"api":"the api root"}})
def OPTIONS(self, *args, **kwargs):
enable_crossdomain();
# HTTP RESTful path.
class Api(object):
exposed = True
def __init__(self):
self.v1 = V1()
def GET(self):
enable_crossdomain();
return json.dumps({"code":Error.success,
"urls": {
"v1": "the api version 1.0"
}
});
def OPTIONS(self, *args, **kwargs):
enable_crossdomain();
# HTTP RESTful path. to access as:
# http://127.0.0.1:8085/api/v1/clients
class V1(object):
exposed = True
def __init__(self):
pass;
def OPTIONS(self, *args, **kwargs):
enable_crossdomain();
'''
main code start.
'''
# donot support use this module as library.
if __name__ != "__main__":
raise Exception("embed not support")
# check the user options
if len(sys.argv) <= 1:
print "SRS community server, Copyright (c) 2013-2014 winlin"
print "Usage: python %s <port>"%(sys.argv[0])
print " port: the port to listen at."
print "For example:"
print " python %s 1949"%(sys.argv[0])
print ""
print "See also: https://github.com/winlinvip/simple-rtmp-server"
sys.exit(1)
# parse port from user options.
port = int(sys.argv[1])
static_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "static-dir"))
trace("api server listen at port: %s, static_dir: %s"%(port, static_dir))
# cherrypy config.
conf = {
'global': {
'server.shutdown_timeout': 1,
'server.socket_host': '0.0.0.0',
'server.socket_port': port,
'tools.encode.on': True,
'tools.staticdir.on': True,
'tools.encode.encoding': "utf-8",
#'server.thread_pool': 2, # single thread server.
},
'/': {
'tools.staticdir.dir': static_dir,
'tools.staticdir.index': "index.html",
# for cherrypy RESTful api support
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
}
}
# start cherrypy web engine
trace("start cherrypy server")
root = Root()
cherrypy.quickstart(root, '/', conf)
| mit | -3,654,521,204,986,091,500 | 31.398601 | 98 | 0.668465 | false | 3.671157 | false | false | false |
golya/FuzzLabs | engine/tests/steps/modules.py | 1 | 1961 | from behave import *
import os
import sys
import inspect
ROOT_DIR = os.path.dirname(
os.path.abspath(
inspect.getfile(inspect.currentframe()
)))
sys.path.append(ROOT_DIR + "/../../classes")
from ConfigurationHandler import ConfigurationHandler
from ModuleHandler import ModuleHandler
@given('we have root and config')
def step_impl(context):
assert os.path.isfile(ROOT_DIR + "/../../etc/engine.config")
context.root = ROOT_DIR + "/../../"
config_file = ROOT_DIR + "/../../etc/engine.config"
context.config_data = ConfigurationHandler(config_file).get()
@when('we load the modules')
def step_impl(context):
context.module_inst = ModuleHandler(context.root, context.config_data)
context.modules_list = context.module_inst.loaded_modules
@then('we get a list of modules')
def step_impl(context):
status = type(context.modules_list) == list
if status:
for module in context.modules_list:
if not module.get('instance') or \
not module.get('name') or \
not module.get('mtime') or \
not module.get('type'):
status = False
break
context.module_inst.unload_modules()
assert status
@given('we have modules loaded')
def step_impl(context):
assert os.path.isfile(ROOT_DIR + "/../../etc/engine.config")
root_dir = ROOT_DIR + "/../../"
config_file = ROOT_DIR + "/../../etc/engine.config"
config_data = ConfigurationHandler(config_file).get()
context.module_inst = ModuleHandler(root_dir, config_data)
context.modules_list = context.module_inst.loaded_modules
status = type(context.modules_list) == list
assert status
@when('we unload the modules')
def step_impl(context):
context.module_inst.unload_modules()
@then('we get an empty list')
def step_impl(context):
assert context.module_inst.loaded_modules == []
| gpl-2.0 | 9,094,677,830,503,925,000 | 29.640625 | 74 | 0.63743 | false | 3.793037 | true | false | false |
amanzi/ats-dev | tools/utils/transect_data.py | 2 | 7741 | """Loads and/or plots 2D, topologlically structured data on quadrilaterals using matplotlib.
"""
import sys,os
import numpy as np
import h5py
import mesh
import colors
def fullname(varname):
fullname = varname
if not '.cell.' in fullname:
fullname = fullname+'.cell.0'
return fullname
def transect_data(varnames, keys='all', directory=".", filename="visdump_data.h5",
mesh_filename="visdump_mesh.h5", coord_order=None, deformable=False, return_map=False):
"""Pulls simulation output into structured 2D arrays for transect-based, (i,j) indexing.
Input:
varnames | A list of variable names to pull, e.g.
| ['saturation_liquid', 'saturation_ice'], or a single variable
| name, e.g. 'saturation_liquid'
keys | Indices of timesteps to pull. Either an int (i.e. 0, -1, etc)
| for the kth timestep, or a list of ints, or 'all'.
directory | Directory of the run. Defaults to '.'
filename | Filename of the run. Defaults to 'visdump_data.h5'
mesh_filename | Filename of the mesh. Defaults to 'visdump_mesh.h5'
coord_order | Order of the transect coordinates. Defaults to ['x','z']. The
| mesh is sorted in this order.
deformable | Is the mesh deforming?
return_map | See return value below.
Output:
Output is an array of shape:
( len(varnames+2), len(keys), n_cells_coord_order[0], n_cells_coord_order[1] )
data[0,0,:,:] is the coord_order[0] centroid
data[1,0,:,:] is the coord_order[1] centroid
data[i+2,k,:,:] is the ith varname data at the kth requested timestep, sorted in
the same way as the centroids.
Note that the data is re-ordered in INCREASING coordinate, i.e. bottom to top in z.
If return_map is True, then returns a tuple, (data, map) where
map is a (NX,NZ) array of integers specifying which global id
corresponds to the (i,j) cell. This is useful for mapping input
data back INTO the unstructured mesh.
Example usage:
Calculate and plot the thaw depth at step 5.
// Pull saturation ice -- TD is where sat ice = 0."
data = transect_data(['saturation_ice', 5)
// x coordinate for plotting
x = data[0,0,:,0]
// for each column, find highest z where sat_ice > 0.
td_i = np.array([np.where(data[2,0,i,:] > 0.)[0][-1] for i in range(data.shape[2])])
// now that we have an index into the highest cell with ice, determine td as the
// mean of the highest cell with ice and the one above that. Note this assumes
// all columns have some thawing.
td_z = np.array( [ (dat[1,0,i,td_i[i]] + dat[1,0,i,td_i[i+1]]) / 2.
for i in range(len(td_i)) ] )
plt.plot(x, td_z)
"""
if coord_order is None:
coord_order = ['x','z']
if type(varnames) is str:
varnames = [varnames,]
# get centroids
xyz = mesh.meshElemCentroids(mesh_filename, directory)
# round to avoid issues
xyz = np.round(xyz, decimals=5)
# get ordering of centroids
dtype = [(coord_order[0], float), (coord_order[1], float)]
num_order = []
for i in coord_order:
if i == 'x':
num_order.append(0)
elif i == 'y':
num_order.append(1)
elif i == 'z':
num_order.append(2)
xyz_sort_order = np.array([tuple([xyz[i,x] for x in num_order]) for i in range(len(xyz))], dtype=dtype)
xyz_sorting = xyz_sort_order.argsort(order=coord_order)
with h5py.File(os.path.join(directory,filename),'r') as dat:
keys_avail = dat[fullname(varnames[0])].keys()
keys_avail.sort(lambda a,b: int.__cmp__(int(a),int(b)))
if keys == 'all':
keys = keys_avail
elif type(keys) is str:
keys = [keys,]
elif type(keys) is int:
keys = [keys_avail[keys],]
elif type(keys) is slice:
keys = keys_avail[keys]
elif type(keys) is list:
if all(type(k) is int for k in keys):
keys = [keys_avail[k] for k in keys]
elif all(type(k) is str for k in keys):
pass
else:
raise RuntimeError("Keys requested cannot be processed -- should be 'all', int, or str key, or list of ints or strs.")
# get data
vals = np.zeros((len(varnames)+2, len(keys), len(xyz)), 'd')
for i,key in enumerate(keys):
if deformable:
xyz = mesh.meshElemCentroids(mesh_filename, directory)
vals[0,i,:] = xyz[xyz_sorting,num_order[0]]
vals[1,i,:] = xyz[xyz_sorting,num_order[1]]
for j,varname in enumerate(varnames):
vals[j+2,i,:] = dat[fullname(varname)][key][:,0][xyz_sorting]
# reshape the data
# determine nx
nx = len(set(vals[0,0,:]))
nz = vals.shape[2] / nx
if (nx * nz != vals.shape[2]):
raise RuntimeError("Assumption about first coordinate being cleanly binnable is falling apart -- ask Ethan to rethink this algorithm!")
shp = vals.shape
if not return_map:
return vals.reshape(shp[0], shp[1], nx, nz)
else:
return vals.reshape(shp[0], shp[1], nx, nz), xyz_sorting.reshape(nx, nz)
def plot(dataset, ax, cax=None, vmin=None, vmax=None, cmap="jet",
label=None, mesh_filename="visdump_mesh.h5", directory=".", y_coord=0.0,
linewidths=1):
"""Draws a dataset on an ax."""
import matplotlib.collections
from matplotlib import pyplot as plt
if vmin is None:
vmin = dataset.min()
if vmax is None:
vmax = dataset.max()
# get the mesh and collapse to 2D
etype, coords, conn = mesh.meshElemXYZ(filename=mesh_filename, directory=directory)
if etype is not 'HEX':
raise RuntimeError("Only works for Hexs")
coords2 = np.array([[coords[i][0::2] for i in c[1:] if abs(coords[i][1] - y_coord) < 1.e-8] for c in conn])
try:
assert coords2.shape[2] == 2
assert coords2.shape[1] == 4
except AssertionError:
print(coords2.shape)
for c in conn:
if len(c) != 9:
print c
raise RuntimeError("what is a conn?")
coords3 = np.array([coords[i][:] for i in c[1:] if abs(coords[i][1] - y_coord) < 1.e-8])
if coords3.shape[0] != 4:
print coords
raise RuntimeError("Unable to squash to 2D")
# reorder anti-clockwise
for i,c in enumerate(coords2):
centroid = c.mean(axis=0)
def angle(p1,p2):
a1 = np.arctan2((p1[1]-centroid[1]),(p1[0]-centroid[0]))
a2 = np.arctan2((p2[1]-centroid[1]),(p2[0]-centroid[0]))
if a1 < a2:
return -1
elif a2 < a1:
return 1
else:
return 0
c2 = np.array(sorted(c,angle))
coords2[i] = c2
polygons = matplotlib.collections.PolyCollection(coords2, edgecolor='k', cmap=cmap, linewidths=linewidths)
polygons.set_array(dataset)
polygons.set_clim(vmin,vmax)
ax.add_collection(polygons)
xmin = min(c[0] for c in coords.itervalues())
xmax = max(c[0] for c in coords.itervalues())
zmin = min(c[2] for c in coords.itervalues())
zmax = max(c[2] for c in coords.itervalues())
ax.set_xlim(xmin,xmax)
ax.set_ylim(zmin,zmax)
if cax is not None:
cb = plt.colorbar(polygons, cax=cax)
if label is not None:
cb.set_label(label)
return ((xmin,xmax),(zmin,zmax))
| bsd-3-clause | -6,002,022,548,617,249,000 | 35.687204 | 143 | 0.575119 | false | 3.435863 | false | false | false |
gunan/tensorflow | tensorflow/python/keras/layers/preprocessing/benchmarks/categorical_encoding_benchmark.py | 1 | 3177 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras categorical_encoding preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import flags
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.keras.layers.preprocessing import categorical_encoding
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
v2_compat.enable_v2_behavior()
class BenchmarkLayer(benchmark.Benchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, output_mode, batch_size, sequence_length,
max_tokens):
input_t = keras.Input(shape=(sequence_length,), dtype=dtypes.int32)
layer = categorical_encoding.CategoricalEncoding(
max_tokens=max_tokens, output_mode=output_mode)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_tensor_slices(
random_ops.random_uniform([batch_size * 10, sequence_length],
minval=0,
maxval=max_tokens - 1,
dtype=dtypes.int32))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer(i)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = "categorical_encoding|batch_%s|seq_length_%s|%s_max_tokens" % (
batch_size, sequence_length, max_tokens)
self.report_benchmark(iters=num_repeats, wall_time=avg_time, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 256, 2048]:
for sequence_length in [10, 1000]:
for num_tokens in [100, 1000, 20000]:
self.run_dataset_implementation(
output_mode="count",
batch_size=batch,
sequence_length=sequence_length,
max_tokens=num_tokens)
if __name__ == "__main__":
test.main()
| apache-2.0 | 5,262,048,413,489,094,000 | 35.517241 | 80 | 0.652188 | false | 4.052296 | false | false | false |
mommermi/callhorizons | callhorizons/callhorizons.py | 1 | 60381 | """CALLHORIZONS - a Python interface to access JPL HORIZONS
ephemerides and orbital elements.
This module provides a convenient python interface to the JPL
HORIZONS system by directly accessing and parsing the HORIZONS
website. Ephemerides can be obtained through get_ephemerides,
orbital elements through get_elements. Function
export2pyephem provides an interface to the PyEphem module.
michael.mommert (at) nau.edu, latest version: v1.0.5, 2017-05-05.
This code is inspired by code created by Alex Hagen.
* v1.
* v1.0.5: 15-epoch limit for set_discreteepochs removed
* v1.0.4: improved asteroid and comet name parsing
* v1.0.3: ObsEclLon and ObsEclLat added to get_ephemerides
* v1.0.2: Python 3.5 compatibility implemented
* v1.0.1: get_ephemerides fixed
* v1.0: bugfixes completed, planets/satellites accessible, too
* v0.9: first release
"""
from __future__ import (print_function, unicode_literals)
import re
import sys
import time
import numpy as np
import warnings
try:
# Python 3
import urllib.request as urllib
except ImportError:
# Python 2
import urllib2 as urllib
warnings.filterwarnings('once', category=DeprecationWarning)
warnings.warn(('CALLHORIZONS is not maintained anymore; please use '
'astroquery.jplhorizons instead (https://github.com/'
'astropy/astroquery)'),
DeprecationWarning)
def _char2int(char):
""" translate characters to integer values (upper and lower case)"""
if char.isdigit():
return int(float(char))
if char.isupper():
return int(char, 36)
else:
return 26 + int(char, 36)
class query():
# constructor
def __init__(self, targetname, smallbody=True, cap=True, nofrag=False,
comet=False, asteroid=False):
"""Initialize query to Horizons
:param targetname: HORIZONS-readable target number, name, or designation
:param smallbody: boolean use ``smallbody=False`` if targetname is a
planet or spacecraft (optional, default: `True`);
also use `True` if the targetname is exact and
should be queried as is
:param cap: set to `True` to return the current apparition for
comet targets
:param nofrag: set to `True` to disable HORIZONS's comet
fragment search
:param comet: set to `True` if this is a comet (will override
automatic targetname parsing)
:param asteroid: set to `True` if this is an asteroid (will override
automatic targetname parsing)
:return: None
"""
self.targetname = str(targetname)
self.not_smallbody = not smallbody
self.cap = cap
self.nofrag = nofrag
self.comet = comet # is this object a comet?
self.asteroid = asteroid # is this object an asteroid?
self.start_epoch = None
self.stop_epoch = None
self.step_size = None
self.discreteepochs = None
self.url = None
self.data = None
assert not (
self.comet and self.asteroid), 'Only one of comet or asteroid can be `True`.'
return None
# small body designation parsing
def parse_comet(self):
"""Parse `targetname` as if it were a comet.
:return: (string or None, int or None, string or None);
The designation, number and prefix, and name of the comet as derived
from `self.targetname` are extracted into a tuple; each element that
does not exist is set to `None`. Parenthesis in `self.targetname`
will be ignored.
:example: the following table shows the result of the parsing:
+--------------------------------+--------------------------------+
|targetname |(desig, prefixnumber, name) |
+================================+================================+
|1P/Halley |(None, '1P', 'Halley') |
+--------------------------------+--------------------------------+
|3D/Biela |(None, '3D', 'Biela') |
+--------------------------------+--------------------------------+
|9P/Tempel 1 |(None, '9P', 'Tempel 1') |
+--------------------------------+--------------------------------+
|73P/Schwassmann Wachmann 3 C |(None, '73P', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-C/Schwassmann Wachmann 3 C |(None, '73P-C', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-BB |(None, '73P-BB', None) |
+--------------------------------+--------------------------------+
|322P |(None, '322P', None) |
+--------------------------------+--------------------------------+
|X/1106 C1 |('1166 C1', 'X', None) |
+--------------------------------+--------------------------------+
|P/1994 N2 (McNaught-Hartley) |('1994 N2', 'P', |
| |'McNaught-Hartley') |
+--------------------------------+--------------------------------+
|P/2001 YX127 (LINEAR) |('2001 YX127', 'P', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/-146 P1 |('-146 P1', 'C', None) |
+--------------------------------+--------------------------------+
|C/2001 A2-A (LINEAR) |('2001 A2-A', 'C', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/2013 US10 |('2013 US10', 'C', None) |
+--------------------------------+--------------------------------+
|C/2015 V2 (Johnson) |('2015 V2', 'C', 'Johnson') |
+--------------------------------+--------------------------------+
|C/2016 KA (Catalina) |('2016 KA', 'C', 'Catalina') |
+--------------------------------+--------------------------------+
"""
import re
pat = ('^(([1-9]+[PDCXAI](-[A-Z]{1,2})?)|[PDCXAI]/)' + # prefix [0,1,2]
'|([-]?[0-9]{3,4}[ _][A-Z]{1,2}([0-9]{1,3})?(-[1-9A-Z]{0,2})?)' +
# designation [3,4]
('|(([A-Z][a-z]?[A-Z]*[a-z]*[ -]?[A-Z]?[1-9]*[a-z]*)' +
'( [1-9A-Z]{1,2})*)') # name [5,6]
)
m = re.findall(pat, self.targetname.strip())
# print(m)
prefixnumber = None
desig = None
name = None
if len(m) > 0:
for el in m:
# prefix/number
if len(el[0]) > 0:
prefixnumber = el[0].replace('/', '')
# designation
if len(el[3]) > 0:
desig = el[3].replace('_', ' ')
# name
if len(el[5]) > 0:
if len(el[5]) > 1:
name = el[5]
return (desig, prefixnumber, name)
def parse_asteroid(self):
"""Parse `targetname` as if it were a asteroid.
:return: (string or None, int or None, string or None);
The designation, number, and name of the asteroid as derived from
`self.targetname` are extracted into a tuple; each element that
does not exist is set to `None`. Parenthesis in `self.targetname`
will be ignored. Packed designations and numbers are unpacked.
:example: the following table shows the result of the parsing:
+--------------------------------+---------------------------------+
|targetname |(desig, number, name) |
+================================+=================================+
|1 |(None, 1, None) |
+--------------------------------+---------------------------------+
|2 Pallas |(None, 2, Pallas) |
+--------------------------------+---------------------------------+
|\(2001\) Einstein |(None, 2001, Einstein) |
+--------------------------------+---------------------------------+
|1714 Sy |(None, 1714, Sy) |
+--------------------------------+---------------------------------+
|2014 MU69 |(2014 MU69, None, None) |
+--------------------------------+---------------------------------+
|(228195) 6675 P-L |(6675 P-L, 228195, None) |
+--------------------------------+---------------------------------+
|4101 T-3 |(4101 T-3, None, None) |
+--------------------------------+---------------------------------+
|4015 Wilson-Harrington (1979 VA)|(1979 VA, 4015, Wilson-Harrington|
+--------------------------------+---------------------------------+
|J95X00A |(1995 XA, None, None) |
+--------------------------------+---------------------------------+
|K07Tf8A |(2007 TA418, None, None) |
+--------------------------------+---------------------------------+
|G3693 |(None, 163693, None) |
+--------------------------------+---------------------------------+
|2017 U1 |(None, None, None) |
+--------------------------------+---------------------------------+
"""
pat = ('(([1-2][0-9]{0,3}[ _][A-Z]{2}[0-9]{0,3})' # designation [0,1]
'|([1-9][0-9]{3}[ _](P-L|T-[1-3])))' # Palomar-Leiden [0,2,3]
'|([IJKL][0-9]{2}[A-Z][0-9a-z][0-9][A-Z])' # packed desig [4]
'|([A-Za-z][0-9]{4})' # packed number [5]
'|([A-Z][A-Z]*[a-z][a-z]*[^0-9]*'
'[ -]?[A-Z]?[a-z]*[^0-9]*)' # name [6]
'|([1-9][0-9]*(\b|$))') # number [7,8]
# regex patterns that will be ignored as they might cause
# confusion
non_pat = ('([1-2][0-9]{0,3}[ _][A-Z][0-9]*(\b|$))') # comet desig
if sys.version_info > (3, 0):
raw = self.targetname.translate(str.maketrans('()', ' ')).strip()
else:
import string
raw = self.targetname.translate(string.maketrans('()',
' ')).strip()
# reject non_pat patterns
non_m = re.findall(non_pat, raw)
# print('reject', raw, non_m)
if len(non_m) > 0:
for ps in non_m:
for p in ps:
if p == '':
continue
raw = raw[:raw.find(p)] + raw[raw.find(p)+len(p):]
# match target patterns
m = re.findall(pat, raw)
# print(raw, m)
desig = None
number = None
name = None
if len(m) > 0:
for el in m:
# designation
if len(el[0]) > 0:
desig = el[0]
# packed designation (unpack here)
elif len(el[4]) > 0:
ident = el[4]
# old designation style, e.g.: 1989AB
if (len(ident.strip()) < 7 and ident[:4].isdigit() and
ident[4:6].isalpha()):
desig = ident[:4]+' '+ident[4:6]
# Palomar Survey
elif ident.find("PLS") == 0:
desig = ident[3:] + " P-L"
# Trojan Surveys
elif ident.find("T1S") == 0:
desig = ident[3:] + " T-1"
elif ident.find("T2S") == 0:
desig = ident[3:] + " T-2"
elif ident.find("T3S") == 0:
desig = ident[3:] + " T-3"
# insert blank in designations
elif (ident[0:4].isdigit() and ident[4:6].isalpha() and
ident[4] != ' '):
desig = ident[:4]+" "+ident[4:]
# MPC packed 7-digit designation
elif (ident[0].isalpha() and ident[1:3].isdigit() and
ident[-1].isalpha() and ident[-2].isdigit()):
yr = str(_char2int(ident[0]))+ident[1:3]
let = ident[3]+ident[-1]
num = str(_char2int(ident[4]))+ident[5]
num = num.lstrip("0")
desig = yr+' '+let+num
# nothing to do
else:
desig = ident
# packed number (unpack here)
elif len(el[5]) > 0:
ident = el[5]
number = ident = int(str(_char2int(ident[0]))+ident[1:])
# number
elif len(el[7]) > 0:
if sys.version_info > (3, 0):
number = int(float(el[7].translate(str.maketrans('()',
' '))))
else:
import string
number = int(float(el[7].translate(string.maketrans('()',
' '))))
# name (strip here)
elif len(el[6]) > 0:
if len(el[6].strip()) > 1:
name = el[6].strip()
return (desig, number, name)
def isorbit_record(self):
"""`True` if `targetname` appears to be a comet orbit record number.
NAIF record numbers are 6 digits, begin with a '9' and can
change at any time.
"""
import re
test = re.match('^9[0-9]{5}$', self.targetname.strip()) is not None
return test
def iscomet(self):
"""`True` if `targetname` appears to be a comet. """
# treat this object as comet if there is a prefix/number
if self.comet is not None:
return self.comet
elif self.asteroid is not None:
return not self.asteroid
else:
return (self.parse_comet()[0] is not None or
self.parse_comet()[1] is not None)
def isasteroid(self):
"""`True` if `targetname` appears to be an asteroid."""
if self.asteroid is not None:
return self.asteroid
elif self.comet is not None:
return not self.comet
else:
return any(self.parse_asteroid()) is not None
# set epochs
def set_epochrange(self, start_epoch, stop_epoch, step_size):
"""Set a range of epochs, all times are UT
:param start_epoch: str;
start epoch of the format 'YYYY-MM-DD [HH-MM-SS]'
:param stop_epoch: str;
final epoch of the format 'YYYY-MM-DD [HH-MM-SS]'
:param step_size: str;
epoch step size, e.g., '1d' for 1 day, '10m' for 10 minutes...
:return: None
:example: >>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-26', '2016-10-25', '1d')
Note that dates are mandatory; if no time is given, midnight is assumed.
"""
self.start_epoch = start_epoch
self.stop_epoch = stop_epoch
self.step_size = step_size
return None
def set_discreteepochs(self, discreteepochs):
"""Set a list of discrete epochs, epochs have to be given as Julian
Dates
:param discreteepochs: array_like
list or 1D array of floats or strings
:return: None
:example: >>> import callhorizons
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_discreteepochs([2457446.177083, 2457446.182343])
"""
if not isinstance(discreteepochs, (list, np.ndarray)):
discreteepochs = [discreteepochs]
self.discreteepochs = list(discreteepochs)
# data access functions
@property
def fields(self):
"""returns list of available properties for all epochs"""
try:
return self.data.dtype.names
except AttributeError:
return []
def __len__(self):
"""returns total number of epochs that have been queried"""
try:
# Cast to int because a long is returned from shape on Windows.
return int(self.data.shape[0])
except AttributeError:
return 0
@property
def dates(self):
"""returns list of epochs that have been queried (format 'YYYY-MM-DD HH-MM-SS')"""
try:
return self.data['datetime']
except:
return []
@property
def query(self):
"""returns URL that has been used in calling HORIZONS"""
try:
return self.url
except:
return []
@property
def dates_jd(self):
"""returns list of epochs that have been queried (Julian Dates)"""
try:
return self.data['datetime_jd']
except:
return []
def __repr__(self):
"""returns brief query information"""
return "<callhorizons.query object: %s>" % self.targetname
def __str__(self):
"""returns information on the current query as string"""
output = "targetname: %s\n" % self.targetname
if self.discreteepochs is not None:
output += "discrete epochs: %s\n" % \
" ".join([str(epoch) for epoch in self.discreteepochs])
if (self.start_epoch is not None and self.stop_epoch is not None and
self.step_size is not None):
output += "epoch range from %s to %s in steps of %s\n" % \
(self.start_epoch, self.stop_epoch, self.step_size)
output += "%d data sets queried with %d different fields" % \
(len(self), len(self.fields))
return output
def __getitem__(self, key):
"""provides access to query data
:param key: str/int;
epoch index or property key
:return: query data according to key
"""
# check if data exist
if self.data is None or len(self.data) == 0:
print('CALLHORIZONS ERROR: run get_ephemerides or get_elements',
'first')
return None
return self.data[key]
# call functions
def get_ephemerides(self, observatory_code,
airmass_lessthan=99,
solar_elongation=(0, 180),
skip_daylight=False):
"""Call JPL HORIZONS website to obtain ephemerides based on the
provided targetname, epochs, and observatory_code. For a list
of valid observatory codes, refer to
http://minorplanetcenter.net/iau/lists/ObsCodesF.html
:param observatory_code: str/int;
observer's location code according to Minor Planet Center
:param airmass_lessthan: float;
maximum airmass (optional, default: 99)
:param solar_elongation: tuple;
permissible solar elongation range (optional, deg)
:param skip_daylight: boolean;
crop daylight epoch during query (optional)
:result: int; number of epochs queried
:example: >>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-23 00:00', '2016-02-24 00:00', '1h')
>>> print (ceres.get_ephemerides(568), 'epochs queried')
The queried properties and their definitions are:
+------------------+-----------------------------------------------+
| Property | Definition |
+==================+===============================================+
| targetname | official number, name, designation [string] |
+------------------+-----------------------------------------------+
| H | absolute magnitude in V band (float, mag) |
+------------------+-----------------------------------------------+
| G | photometric slope parameter (float) |
+------------------+-----------------------------------------------+
| datetime | epoch date and time (str, YYYY-MM-DD HH:MM:SS)|
+------------------+-----------------------------------------------+
| datetime_jd | epoch Julian Date (float) |
+------------------+-----------------------------------------------+
| solar_presence | information on Sun's presence (str) |
+------------------+-----------------------------------------------+
| lunar_presence | information on Moon's presence (str) |
+------------------+-----------------------------------------------+
| RA | target RA (float, J2000.0) |
+------------------+-----------------------------------------------+
| DEC | target DEC (float, J2000.0) |
+------------------+-----------------------------------------------+
| RA_rate | target rate RA (float, arcsec/s) |
+------------------+-----------------------------------------------+
| DEC_rate | target RA (float, arcsec/s, includes cos(DEC))|
+------------------+-----------------------------------------------+
| AZ | Azimuth meas East(90) of North(0) (float, deg)|
+------------------+-----------------------------------------------+
| EL | Elevation (float, deg) |
+------------------+-----------------------------------------------+
| airmass | target optical airmass (float) |
+------------------+-----------------------------------------------+
| magextinct | V-mag extinction due airmass (float, mag) |
+------------------+-----------------------------------------------+
| V | V magnitude (comets: total mag) (float, mag) |
+------------------+-----------------------------------------------+
| illumination | fraction of illuminated disk (float) |
+------------------+-----------------------------------------------+
| EclLon | heliocentr. ecl. long. (float, deg, J2000.0) |
+------------------+-----------------------------------------------+
| EclLat | heliocentr. ecl. lat. (float, deg, J2000.0) |
+------------------+-----------------------------------------------+
| ObsEclLon | obscentr. ecl. long. (float, deg, J2000.0) |
+------------------+-----------------------------------------------+
| ObsEclLat | obscentr. ecl. lat. (float, deg, J2000.0) |
+------------------+-----------------------------------------------+
| r | heliocentric distance (float, au) |
+------------------+-----------------------------------------------+
| r_rate | heliocentric radial rate (float, km/s) |
+------------------+-----------------------------------------------+
| delta | distance from the observer (float, au) |
+------------------+-----------------------------------------------+
| delta_rate | obs-centric radial rate (float, km/s) |
+------------------+-----------------------------------------------+
| lighttime | one-way light time (float, s) |
+------------------+-----------------------------------------------+
| elong | solar elongation (float, deg) |
+------------------+-----------------------------------------------+
| elongFlag | app. position relative to Sun (str) |
+------------------+-----------------------------------------------+
| alpha | solar phase angle (float, deg) |
+------------------+-----------------------------------------------+
| sunTargetPA | PA of Sun->target vector (float, deg, EoN) |
+------------------+-----------------------------------------------+
| velocityPA | PA of velocity vector (float, deg, EoN) |
+------------------+-----------------------------------------------+
| GlxLon | galactic longitude (float, deg) |
+------------------+-----------------------------------------------+
| GlxLat | galactic latitude (float, deg) |
+------------------+-----------------------------------------------+
| RA_3sigma | 3sigma pos. unc. in RA (float, arcsec) |
+------------------+-----------------------------------------------+
| DEC_3sigma | 3sigma pos. unc. in DEC (float, arcsec) |
+------------------+-----------------------------------------------+
"""
# queried fields (see HORIZONS website for details)
# if fields are added here, also update the field identification below
quantities = '1,3,4,8,9,10,18,19,20,21,23,24,27,31,33,36'
# encode objectname for use in URL
objectname = urllib.quote(self.targetname.encode("utf8"))
# construct URL for HORIZONS query
url = "https://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l" \
+ "&TABLE_TYPE='OBSERVER'" \
+ "&QUANTITIES='" + str(quantities) + "'" \
+ "&CSV_FORMAT='YES'" \
+ "&ANG_FORMAT='DEG'" \
+ "&CAL_FORMAT='BOTH'" \
+ "&SOLAR_ELONG='" + str(solar_elongation[0]) + "," \
+ str(solar_elongation[1]) + "'" \
+ "&CENTER='"+str(observatory_code)+"'"
if self.not_smallbody:
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "'"
elif self.cap and self.comet:
for ident in self.parse_comet():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='DES=" + \
urllib.quote(ident.encode("utf8")) + "%3B" + \
("CAP'" if self.cap else "'")
elif self.isorbit_record():
# Comet orbit record. Do not use DES, CAP. This test must
# occur before asteroid test.
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "%3B'"
elif self.isasteroid() and not self.comet:
# for asteroids, use 'DES="designation";'
for ident in self.parse_asteroid():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='" + \
urllib.quote(str(ident).encode("utf8")) + "%3B'"
elif self.iscomet() and not self.asteroid:
# for comets, potentially append the current apparition
# (CAP) parameter, or the fragmentation flag (NOFRAG)
for ident in self.parse_comet():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='DES=" + \
urllib.quote(ident.encode("utf8")) + "%3B" + \
("NOFRAG%3B" if self.nofrag else "") + \
("CAP'" if self.cap else "'")
# elif (not self.targetname.replace(' ', '').isalpha() and not
# self.targetname.isdigit() and not
# self.targetname.islower() and not
# self.targetname.isupper()):
# # lower case + upper case + numbers = pot. case sensitive designation
# url += "&COMMAND='DES=" + \
# urllib.quote(self.targetname.encode("utf8")) + "%3B'"
else:
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "%3B'"
if self.discreteepochs is not None:
url += "&TLIST="
for date in self.discreteepochs:
url += "'" + str(date) + "'"
elif (self.start_epoch is not None and self.stop_epoch is not None and
self.step_size is not None):
url += "&START_TIME='" \
+ urllib.quote(self.start_epoch.encode("utf8")) + "'" \
+ "&STOP_TIME='" \
+ urllib.quote(self.stop_epoch.encode("utf8")) + "'" \
+ "&STEP_SIZE='" + str(self.step_size) + "'"
else:
raise IOError('no epoch information given')
if airmass_lessthan < 99:
url += "&AIRMASS='" + str(airmass_lessthan) + "'"
if skip_daylight:
url += "&SKIP_DAYLT='YES'"
else:
url += "&SKIP_DAYLT='NO'"
self.url = url
# print (url)
# call HORIZONS
i = 0 # count number of connection tries
while True:
try:
src = urllib.urlopen(url).readlines()
break
except urllib.URLError:
time.sleep(0.1)
# in case the HORIZONS website is blocked (due to another query)
# wait 0.1 second and try again
i += 1
if i > 50:
return 0 # website could not be reached
# disseminate website source code
# identify header line and extract data block (ephemerides data)
# also extract targetname, absolute mag. (H), and slope parameter (G)
headerline = []
datablock = []
in_datablock = False
H, G = np.nan, np.nan
for idx, line in enumerate(src):
line = line.decode('UTF-8')
if "Date__(UT)__HR:MN" in line:
headerline = line.split(',')
if "$$EOE\n" in line:
in_datablock = False
if in_datablock:
datablock.append(line)
if "$$SOE\n" in line:
in_datablock = True
if "Target body name" in line:
targetname = line[18:50].strip()
if ("rotational period in hours)" in
src[idx].decode('UTF-8')):
HGline = src[idx+2].decode('UTF-8').split('=')
if 'B-V' in HGline[2] and 'G' in HGline[1]:
try:
H = float(HGline[1].rstrip('G'))
except ValueError:
pass
try:
G = float(HGline[2].rstrip('B-V'))
except ValueError:
pass
if ("Multiple major-bodies match string" in
src[idx].decode('UTF-8') or
("Matching small-bodies" in src[idx].decode('UTF-8') and not
"No matches found" in src[idx+1].decode('UTF-8'))):
raise ValueError('Ambiguous target name; check URL: %s' %
url)
if ("Matching small-bodies" in src[idx].decode('UTF-8') and
"No matches found" in src[idx+1].decode('UTF-8')):
raise ValueError('Unknown target; check URL: %s' % url)
# field identification for each line
ephemerides = []
for line in datablock:
line = line.split(',')
# ignore line that don't hold any data
if len(line) < len(quantities.split(',')):
continue
this_eph = []
fieldnames = []
datatypes = []
# create a dictionary for each date (each line)
for idx, item in enumerate(headerline):
if ('Date__(UT)__HR:MN' in item):
this_eph.append(line[idx].strip())
fieldnames.append('datetime')
datatypes.append(object)
if ('Date_________JDUT' in item):
this_eph.append(np.float64(line[idx]))
fieldnames.append('datetime_jd')
datatypes.append(np.float64)
# read out and convert solar presence
try:
this_eph.append({'*': 'daylight', 'C': 'civil twilight',
'N': 'nautical twilight',
'A': 'astronomical twilight',
' ': 'dark',
't': 'transiting'}[line[idx+1]])
except KeyError:
this_eph.append('n.a.')
fieldnames.append('solar_presence')
datatypes.append(object)
# read out and convert lunar presence
try:
this_eph.append({'m': 'moonlight',
' ': 'dark'}[line[idx+2]])
except KeyError:
this_eph.append('n.a.')
fieldnames.append('lunar_presence')
datatypes.append(object)
if (item.find('R.A._(ICRF/J2000.0)') > -1):
this_eph.append(np.float64(line[idx]))
fieldnames.append('RA')
datatypes.append(np.float64)
if (item.find('DEC_(ICRF/J2000.0)') > -1):
this_eph.append(np.float64(line[idx]))
fieldnames.append('DEC')
datatypes.append(np.float64)
if (item.find('dRA*cosD') > -1):
try:
this_eph.append(np.float64(line[idx])/3600.) # "/s
except ValueError:
this_eph.append(np.nan)
fieldnames.append('RA_rate')
datatypes.append(np.float64)
if (item.find('d(DEC)/dt') > -1):
try:
this_eph.append(np.float64(line[idx])/3600.) # "/s
except ValueError:
this_eph.append(np.nan)
fieldnames.append('DEC_rate')
datatypes.append(np.float64)
if (item.find('Azi_(a-app)') > -1):
try: # if AZ not given, e.g. for space telescopes
this_eph.append(np.float64(line[idx]))
fieldnames.append('AZ')
datatypes.append(np.float64)
except ValueError:
pass
if (item.find('Elev_(a-app)') > -1):
try: # if EL not given, e.g. for space telescopes
this_eph.append(np.float64(line[idx]))
fieldnames.append('EL')
datatypes.append(np.float64)
except ValueError:
pass
if (item.find('a-mass') > -1):
try: # if airmass not given, e.g. for space telescopes
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('airmass')
datatypes.append(np.float64)
if (item.find('mag_ex') > -1):
try: # if mag_ex not given, e.g. for space telescopes
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('magextinct')
datatypes.append(np.float64)
if (item.find('APmag') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('V')
datatypes.append(np.float64)
if (item.find('Illu%') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('illumination')
datatypes.append(np.float64)
if (item.find('hEcl-Lon') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('EclLon')
datatypes.append(np.float64)
if (item.find('hEcl-Lat') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('EclLat')
datatypes.append(np.float64)
if (item.find('ObsEcLon') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('ObsEclLon')
datatypes.append(np.float64)
if (item.find('ObsEcLat') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('ObsEclLat')
datatypes.append(np.float64)
if (item.find(' r') > -1) and \
(headerline[idx+1].find("rdot") > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('r')
datatypes.append(np.float64)
if (item.find('rdot') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('r_rate')
datatypes.append(np.float64)
if (item.find('delta') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('delta')
datatypes.append(np.float64)
if (item.find('deldot') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('delta_rate')
datatypes.append(np.float64)
if (item.find('1-way_LT') > -1):
try:
this_eph.append(np.float64(line[idx])*60.) # seconds
except ValueError:
this_eph.append(np.nan)
fieldnames.append('lighttime')
datatypes.append(np.float64)
if (item.find('S-O-T') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('elong')
datatypes.append(np.float64)
# in the case of space telescopes, '/r S-T-O' is used;
# ground-based telescopes have both parameters in separate
# columns
if (item.find('/r S-T-O') > -1):
this_eph.append({'/L': 'leading', '/T': 'trailing'}
[line[idx].split()[0]])
fieldnames.append('elongFlag')
datatypes.append(object)
try:
this_eph.append(np.float64(line[idx].split()[1]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('alpha')
datatypes.append(np.float64)
elif (item.find('S-T-O') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('alpha')
datatypes.append(np.float64)
elif (item.find('/r') > -1):
this_eph.append({'/L': 'leading', '/T': 'trailing',
'/?': 'not defined'}
[line[idx]])
fieldnames.append('elongFlag')
datatypes.append(object)
if (item.find('PsAng') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('sunTargetPA')
datatypes.append(np.float64)
if (item.find('PsAMV') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('velocityPA')
datatypes.append(np.float64)
if (item.find('GlxLon') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('GlxLon')
datatypes.append(np.float64)
if (item.find('GlxLat') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('GlxLat')
datatypes.append(np.float64)
if (item.find('RA_3sigma') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('RA_3sigma')
datatypes.append(np.float64)
if (item.find('DEC_3sigma') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('DEC_3sigma')
datatypes.append(np.float64)
# in the case of a comet, use total mag for V
if (item.find('T-mag') > -1):
try:
this_eph.append(np.float64(line[idx]))
except ValueError:
this_eph.append(np.nan)
fieldnames.append('V')
datatypes.append(np.float64)
# append target name
this_eph.append(targetname)
fieldnames.append('targetname')
datatypes.append(object)
# append H
this_eph.append(H)
fieldnames.append('H')
datatypes.append(np.float64)
# append G
this_eph.append(G)
fieldnames.append('G')
datatypes.append(np.float64)
if len(this_eph) > 0:
ephemerides.append(tuple(this_eph))
if len(ephemerides) == 0:
return 0
# combine ephemerides with column names and data types into ndarray
assert len(ephemerides[0]) == len(fieldnames) == len(datatypes)
self.data = np.array(ephemerides,
dtype=[(str(fieldnames[i]), datatypes[i]) for i
in range(len(fieldnames))])
return len(self)
def get_elements(self, center='500@10', asteroid=False, comet=False):
"""Call JPL HORIZONS website to obtain orbital elements based on the
provided targetname, epochs, and center code. For valid center
codes, please refer to http://ssd.jpl.nasa.gov/horizons.cgi
:param center: str;
center body (default: 500@10 = Sun)
:result: int; number of epochs queried
:example: >>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-23 00:00', '2016-02-24 00:00', '1h')
>>> print (ceres.get_elements(), 'epochs queried')
The queried properties and their definitions are:
+------------------+-----------------------------------------------+
| Property | Definition |
+==================+===============================================+
| targetname | official number, name, designation [string] |
+------------------+-----------------------------------------------+
| H | absolute magnitude in V band (float, mag) |
+------------------+-----------------------------------------------+
| G | photometric slope parameter (float) |
+------------------+-----------------------------------------------+
| datetime_jd | epoch Julian Date (float) |
+------------------+-----------------------------------------------+
| e | eccentricity (float) |
+------------------+-----------------------------------------------+
| p | periapsis distance (float, au) |
+------------------+-----------------------------------------------+
| a | semi-major axis (float, au) |
+------------------+-----------------------------------------------+
| incl | inclination (float, deg) |
+------------------+-----------------------------------------------+
| node | longitude of Asc. Node (float, deg) |
+------------------+-----------------------------------------------+
| argper | argument of the perifocus (float, deg) |
+------------------+-----------------------------------------------+
| Tp | time of periapsis (float, Julian Date) |
+------------------+-----------------------------------------------+
| meananomaly | mean anomaly (float, deg) |
+------------------+-----------------------------------------------+
| trueanomaly | true anomaly (float, deg) |
+------------------+-----------------------------------------------+
| period | orbital period (float, Earth yr) |
+------------------+-----------------------------------------------+
| Q | apoapsis distance (float, au) |
+------------------+-----------------------------------------------+
"""
# encode objectname for use in URL
objectname = urllib.quote(self.targetname.encode("utf8"))
# call Horizons website and extract data
url = "https://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l" \
+ "&TABLE_TYPE='ELEMENTS'" \
+ "&CSV_FORMAT='YES'" \
+ "&CENTER='" + str(center) + "'" \
+ "&OUT_UNITS='AU-D'" \
+ "&REF_PLANE='ECLIPTIC'" \
+ "REF_SYSTEM='J2000'" \
+ "&TP_TYPE='ABSOLUTE'" \
+ "&ELEM_LABELS='YES'" \
+ "CSV_FORMAT='YES'" \
+ "&OBJ_DATA='YES'"
# check if self.targetname is a designation
# lower case + upper case + numbers = pot. case sensitive designation
if self.not_smallbody:
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "'"
elif self.isorbit_record():
# Comet orbit record. Do not use DES, CAP. This test must
# occur before asteroid test.
url += "&COMMAND='" + \
urllib.quote(self.targetname.encode("utf8")) + "%3B'"
elif self.isasteroid() and not self.comet:
# for asteroids, use 'DES="designation";'
for ident in self.parse_asteroid():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='" + \
urllib.quote(str(ident).encode("utf8")) + "%3B'"
elif self.iscomet() and not self.asteroid:
# for comets, potentially append the current apparition
# (CAP) parameter, or the fragmentation flag (NOFRAG)
for ident in self.parse_comet():
if ident is not None:
break
if ident is None:
ident = self.targetname
url += "&COMMAND='DES=" + \
urllib.quote(ident.encode("utf8")) + "%3B" + \
("NOFRAG%3B" if self.nofrag else "") + \
("CAP'" if self.cap else "'")
# elif (not self.targetname.replace(' ', '').isalpha() and not
# self.targetname.isdigit() and not
# self.targetname.islower() and not
# self.targetname.isupper()):
# url += "&COMMAND='DES=" + str(objectname) + "%3B'"
else:
url += "&COMMAND='" + str(objectname) + "%3B'"
if self.discreteepochs is not None:
url += "&TLIST="
for date in self.discreteepochs:
url += "'" + str(date) + "'"
elif (self.start_epoch is not None and self.stop_epoch is not None and
self.step_size is not None):
url += "&START_TIME='" \
+ urllib.quote(self.start_epoch.encode("utf8")) + "'" \
+ "&STOP_TIME='" \
+ urllib.quote(self.stop_epoch.encode("utf8")) + "'" \
+ "&STEP_SIZE='" + str(self.step_size) + "'"
else:
raise IOError('no epoch information given')
self.url = url
i = 0 # count number of connection tries
while True:
try:
src = urllib.urlopen(url).readlines()
break
except urllib.URLError:
time.sleep(0.1)
# in case the HORIZONS website is blocked (due to another query)
# wait 1 second and try again
i += 1
if i > 50:
return 0 # website could not be reached
# disseminate website source code
# identify header line and extract data block (elements data)
# also extract targetname, abs. magnitude (H), and slope parameter (G)
headerline = []
datablock = []
in_datablock = False
H, G = np.nan, np.nan
for idx, line in enumerate(src):
line = line.decode('UTF-8')
if 'JDTDB,' in line:
headerline = line.split(',')
if "$$EOE\n" in line:
in_datablock = False
if in_datablock:
datablock.append(line)
if "$$SOE\n" in line:
in_datablock = True
if "Target body name" in line:
targetname = line[18:50].strip()
if "rotational period in hours)" in src[idx].decode('UTF-8'):
HGline = src[idx+2].decode('UTF-8').split('=')
if 'B-V' in HGline[2] and 'G' in HGline[1]:
try:
H = float(HGline[1].rstrip('G'))
except ValueError:
pass
try:
G = float(HGline[2].rstrip('B-V'))
except ValueError:
pass
if ("Multiple major-bodies match string" in src[idx].decode('UTF-8') or
("Matching small-bodies" in src[idx].decode('UTF-8') and not
"No matches found" in src[idx+1].decode('UTF-8'))):
raise ValueError('Ambiguous target name; check URL: %s' %
url)
if ("Matching small-bodies" in src[idx].decode('UTF-8') and
"No matches found" in src[idx+1].decode('UTF-8')):
raise ValueError('Unknown target; check URL: %s' % url)
# field identification for each line in
elements = []
for line in datablock:
line = line.split(',')
this_el = []
fieldnames = []
datatypes = []
# create a dictionary for each date (each line)
for idx, item in enumerate(headerline):
if (item.find('JDTDB') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('datetime_jd')
datatypes.append(np.float64)
if (item.find('EC') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('e')
datatypes.append(np.float64)
if (item.find('QR') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('p')
datatypes.append(np.float64)
if (item.find('A') > -1) and len(item.strip()) == 1:
this_el.append(np.float64(line[idx]))
fieldnames.append('a')
datatypes.append(np.float64)
if (item.find('IN') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('incl')
datatypes.append(np.float64)
if (item.find('OM') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('node')
datatypes.append(np.float64)
if (item.find('W') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('argper')
datatypes.append(np.float64)
if (item.find('Tp') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('Tp')
datatypes.append(np.float64)
if (item.find('MA') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('meananomaly')
datatypes.append(np.float64)
if (item.find('TA') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('trueanomaly')
datatypes.append(np.float64)
if (item.find('PR') > -1):
# Earth years
this_el.append(np.float64(line[idx])/(365.256))
fieldnames.append('period')
datatypes.append(np.float64)
if (item.find('AD') > -1):
this_el.append(np.float64(line[idx]))
fieldnames.append('Q')
datatypes.append(np.float64)
# append targetname
this_el.append(targetname)
fieldnames.append('targetname')
datatypes.append(object)
# append H
this_el.append(H)
fieldnames.append('H')
datatypes.append(np.float64)
# append G
this_el.append(G)
fieldnames.append('G')
datatypes.append(np.float64)
if len(this_el) > 0:
elements.append(tuple(this_el))
if len(elements) == 0:
return 0
# combine elements with column names and data types into ndarray
assert len(elements[0]) == len(fieldnames) == len(datatypes)
self.data = np.array(elements,
dtype=[(str(fieldnames[i]), datatypes[i]) for i
in range(len(fieldnames))])
return len(self)
def export2pyephem(self, center='500@10', equinox=2000.):
"""Call JPL HORIZONS website to obtain orbital elements based on the
provided targetname, epochs, and center code and create a
PyEphem (http://rhodesmill.org/pyephem/) object. This function
requires PyEphem to be installed.
:param center: str;
center body (default: 500@10 = Sun)
:param equinox: float;
equinox (default: 2000.0)
:result: list;
list of PyEphem objects, one per epoch
:example: >>> import callhorizons
>>> import numpy
>>> import ephem
>>>
>>> ceres = callhorizons.query('Ceres')
>>> ceres.set_epochrange('2016-02-23 00:00', '2016-02-24 00:00', '1h')
>>> ceres_pyephem = ceres.export2pyephem()
>>>
>>> nau = ephem.Observer() # setup observer site
>>> nau.lon = -111.653152/180.*numpy.pi
>>> nau.lat = 35.184108/180.*numpy.pi
>>> nau.elevation = 2100 # m
>>> nau.date = '2015/10/5 01:23' # UT
>>> print ('next rising: %s' % nau.next_rising(ceres_pyephem[0]))
>>> print ('next transit: %s' % nau.next_transit(ceres_pyephem[0]))
>>> print ('next setting: %s' % nau.next_setting(ceres_pyephem[0]))
"""
try:
import ephem
except ImportError:
raise ImportError(
'export2pyephem requires PyEphem to be installed')
# obtain orbital elements
self.get_elements(center)
objects = []
for el in self.data:
n = 0.9856076686/np.sqrt(el['a']**3) # mean daily motion
epoch_djd = el['datetime_jd']-2415020.0 # Dublin Julian date
epoch = ephem.date(epoch_djd)
epoch_str = "%d/%f/%d" % (epoch.triple()[1], epoch.triple()[2],
epoch.triple()[0])
# export to PyEphem
objects.append(ephem.readdb("%s,e,%f,%f,%f,%f,%f,%f,%f,%s,%i,%f,%f" %
(el['targetname'], el['incl'], el['node'],
el['argper'], el['a'], n, el['e'],
el['meananomaly'], epoch_str, equinox,
el['H'], el['G'])))
return objects
| mit | 2,673,770,013,249,938,400 | 44.952055 | 90 | 0.409781 | false | 4.403194 | false | false | false |
torkelsson/meta-package-manager | meta_package_manager/managers/mas.py | 1 | 5319 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2018 Kevin Deldycke <[email protected]>
# and contributors.
# All Rights Reserved.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import re
from boltons.cacheutils import cachedproperty
from ..base import PackageManager
from ..platform import MACOS
class MAS(PackageManager):
platforms = frozenset([MACOS])
# 'mas outdated' output has been changed in 1.3.1: https://github.com
# /mas-cli/mas/commit/ca72ee42b1c5f482513b1d2fbf780b0bf3d9618b
requirement = '>= 1.3.1'
name = "Mac AppStore"
def get_version(self):
""" Fetch version from ``mas version`` output."""
return self.run([self.cli_path, 'version'])
@cachedproperty
def installed(self):
""" Fetch installed packages from ``mas list`` output.
Raw CLI output samples:
.. code-block:: shell-session
$ mas list
408981434 iMovie (10.1.4)
747648890 Telegram (2.30)
"""
installed = {}
output = self.run([self.cli_path] + self.cli_args + ['list'])
if output:
regexp = re.compile(r'(\d+) (.*) \((\S+)\)$')
for package in output.split('\n'):
match = regexp.match(package)
if match:
package_id, package_name, installed_version = \
match.groups()
installed[package_id] = {
'id': package_id,
'name': package_name,
# Normalize unknown version. See:
# https://github.com/mas-cli/mas/commit
# /1859eaedf49f6a1ebefe8c8d71ec653732674341
'installed_version': (
installed_version if installed_version != 'unknown'
else None)}
return installed
def search(self, query):
""" Fetch matching packages from ``mas search`` output.
Raw CLI output samples:
.. code-block:: shell-session
$ mas search python
689176796 Python Runner
630736088 Learning Python
945397020 Run Python
891162632 Python Lint
1025391371 Tutorial for Python
1164498373 PythonGames
"""
matches = {}
output = self.run([self.cli_path] + self.cli_args + [
'search', query])
if output:
regexp = re.compile(r'(\d+) (.*)$')
for package in output.split('\n'):
match = regexp.match(package)
if match:
package_id, package_name = match.groups()
matches[package_id] = {
'id': package_id,
'name': package_name,
'latest_version': None,
'exact': self.exact_match(query, package_name)}
return matches
@cachedproperty
def outdated(self):
""" Fetch outdated packages from ``mas outdated`` output.
Raw CLI output samples:
.. code-block:: shell-session
$ mas outdated
.. todo
An example of ``mas outdated`` output is missing above.
"""
outdated = {}
output = self.run([self.cli_path] + self.cli_args + ['outdated'])
if output:
regexp = re.compile(r'(\d+) (.*) \((\S+) -> (\S+)\)$')
for package in output.split('\n'):
match = regexp.match(package)
if match:
package_id, package_name, installed_version, \
latest_version = match.groups()
outdated[package_id] = {
'id': package_id,
'name': package_name,
'latest_version': latest_version,
# Normalize unknown version. See:
# https://github.com/mas-cli/mas/commit
# /1859eaedf49f6a1ebefe8c8d71ec653732674341
'installed_version': (
installed_version if installed_version != 'unknown'
else None)}
return outdated
def upgrade_cli(self, package_id=None):
cmd = [self.cli_path] + self.cli_args + ['upgrade']
if package_id:
cmd.append(package_id)
return cmd
def upgrade_all_cli(self):
return self.upgrade_cli()
| gpl-2.0 | -8,493,799,302,312,990,000 | 31.432927 | 79 | 0.536943 | false | 4.275723 | false | false | false |
apuigsech/CryptoAPI | CryptoAPI/CryptoAPI.py | 1 | 8197 | #!/usr/bin/env python
# CryptoAPI: Python Crypto API implementation
#
# Copyright (c) 2014 - Albert Puigsech Galicia ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from CryptsyAPI import CryptsyAPI
from BittrexAPI import BittrexAPI
class CryptoAPI_iface(object):
def balances(self, currency=None, cached=None):
raise NotImplementedError( "Method not implemented" )
def marketstatus(self, market=None, depth_level=None, cached=None):
raise NotImplementedError( "Method not implemented" )
def orders(self, market=None, cached=None):
raise NotImplementedError( "Method not implemented" )
def putorder(self, market, type, pricetype, amount, price=None, simulation=None):
raise NotImplementedError( "Method not implemented" )
def delorder(self, order_id=None, simulation=None):
raise NotImplementedError( "Method not implemented" )
class CryptoAPI_cryptsy(CryptsyAPI, CryptoAPI_iface):
def __init__(self, key, secret, simulation=False, cached=False):
super(CryptoAPI_cryptsy, self).__init__(key, secret, simulation, cached)
CryptoAPI_iface.__init__(self)
def balances(self, currency=None, cached=None):
if cached == None:
cached = self.cached
ret = {
'available': {},
'hold': {},
'total': {},
}
info = self.getinfo(cached)['return']
for i in info['balances_available']:
if i == currency or (currency == None and (float(info['balances_available'][i]) > 0 or info['balances_hold'].has_key(i))):
ret['available'][i] = float(info['balances_available'][i])
ret['hold'][i] = float(info['balances_hold'][i]) if info['balances_hold'].has_key(i) else float(0)
ret['total'][i] = ret['available'][i] + ret['hold'][i]
return ret
def marketstatus(self, market=None, depth_level=None, cached=None):
if cached == None:
cached = self.cached
status = self.getmarkets(cached)['return']
ret = {}
for i in status:
marketname = '{0}-{1}'.format(i['secondary_currency_code'], i['primary_currency_code'])
if marketname == market or i['primary_currency_code'] == market or i['secondary_currency_code'] == market or market == None:
ret[marketname] = {
'id': int(i['marketid']),
'last_price': float(i['last_trade']),
'high_price': float(i['high_trade']),
'low_price': float(i['low_trade']),
'volume': float(i['current_volume']),
'depth': None
}
if depth_level != None and depth_level > 0:
depth = self.depth(i['marketid'], cached)['return']
ret[marketname]['depth'] = {
'buy': [],
'sell': [],
}
for j in depth['buy'][0:depth_level]:
ret[marketname]['depth']['buy'].append([float(j[0]),float(j[1])])
for j in depth['sell'][0:depth_level]:
ret[marketname]['depth']['sell'].append([float(j[0]),float(j[1])])
return ret
def orders(self, market=None, cached=None):
if cached == None:
cached = self.cached
orders = self.allmyorders(cached)['return']
ret = []
for i in orders:
marketname = self._getmarketfromid(i['marketid'])
ret.append({
'id': int(i['orderid']),
'market': 'TBD',
'price': i['price'],
'amount': i['orig_quantity'],
'remaining_amount': i['quantity'],
})
return ret
def putorder(self, market, type, pricetype, amount, price=None, simulation=None):
if simulation == None:
simulation = self.simulation
status = self.marketstatus(market, 1)
print status
if pricetype == 'market':
price = 4294967296
elif pricetype == 'best':
if type == 'buy':
price = status[market]['depth']['sell'][0][0]
elif type == 'sell':
price = status[market]['depth']['buy'][0][0]
elif pricetype == 'border' or pricetype == 'overboder':
if type == 'buy':
price = status[market]['depth']['buy'][0][0]
elif type == 'sell':
price = status[market]['depth']['sell'][0][0]
if pricetype == 'overboder':
if type == 'buy':
price += 0.00000001
elif type == 'sell':
price -= 0.00000001
return self.createorder(status[market]['id'], type, amount, price)
def delorder(self, order_id=None, simulation=None):
return None
def _getmarketfromid(self, id):
markets = self.marketstatus(cached=True)
for marketname in markets:
if markets[marketname]['id'] == id:
return marketname
return None
def _getidfrommarket(self, market):
markets = self.marketstatus(cached=True)
if markets.has_key(market):
return markets[market]['id']
else:
return None
class CryptoAPI_bittrex(BittrexAPI, CryptoAPI_iface):
def __init__(self, key, secret, simulation=False, cached=False):
super(CryptoAPI_bittrex, self).__init__(key, secret, simulation, cached)
def balances(self, currency=None, cached=None):
if cached == None:
cached = self.cached
ret = {
'available': {},
'hold': {},
'total': {},
}
if currency==None:
info = self.getbalances(cached)['result']
else:
pass
info = [self.getbalance(currency, cached)['result']]
for i in info:
ret['available'][i['Currency']] = float(i['Available'])
ret['hold'][i['Currency']] = float(i['Pending'])
ret['total'][i['Currency']] = float(i['Balance'])
return ret
def marketstatus(self, market=None, depth_level=None, cached=None):
if cached == None:
cached = self.cached
ret = {}
status = self.getmarkets(cached)['result']
status = self.getmarketsummaries(cached)['result']
for i in status:
marketname = i['MarketName']
#if marketname == market or market == i['BaseCurrency'] or market == i['MarketCurrency'] or market == None:
if marketname == market or market in marketname or market == None:
if i['Volume'] == None:
i['Volume'] = 0
ret[marketname] = {
'id': marketname,
'last_price': float(i['Last']),
'high_price': float(str(i['High'])), # FIX a bug on Bittrex data returned
'low_price': float(i['Low']),
'volume': float(i['Volume']),
'depth': None
}
if depth_level != None and depth_level > 0:
depth = self.getorderbook(marketname, 'both', depth_level, cached)['result']
ret[marketname]['depth'] = {
'buy': [],
'sell': [],
}
for j in depth['buy'][0:depth_level]:
ret[marketname]['depth']['buy'].append([float(j['Rate']),float(j['Quantity'])])
for j in depth['sell'][0:depth_level]:
ret[marketname]['depth']['sell'].append([float(j['Rate']),float(j['Quantity'])])
return ret
def orders(self, market=None, cached=None):
if cached == None:
cached = self.cached
ret = []
orders = self.getopenorders(market, cached)['return']
for i in orders:
marketname = self._getmarketfromid(i['marketid'])
ret.append({
'id': int(i['orderid']),
'market': 'TBD',
'price': i['price'],
'amount': i['orig_quantity'],
'remaining_amount': i['quantity'],
})
return ret
pass
def putorder(self, market, type, pricetype, amount, price=None, simulation=None):
pass
def delorder(self, order_id=None, simulation=None):
pass
def CryptoAPI(type, key, secret, simulation=False, cached=False):
# TODO Security: type validation
code = 'CryptoAPI_{0}(key, secret, simulation, cached)'.format(type)
api = eval(code)
return api | gpl-3.0 | 8,747,078,000,745,754,000 | 27.866197 | 127 | 0.654508 | false | 3.276179 | false | false | false |
jbq/ufw | src/backend.py | 1 | 24402 | '''backend.py: interface for ufw backends'''
#
# Copyright 2008-2011 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import errno
import os
import re
import stat
import sys
import ufw.util
from ufw.util import warn, debug
from ufw.common import UFWError, config_dir, iptables_dir, UFWRule
import ufw.applications
class UFWBackend:
'''Interface for backends'''
def __init__(self, name, dryrun, extra_files=None):
self.defaults = None
self.name = name
self.dryrun = dryrun
self.rules = []
self.rules6 = []
self.files = {'defaults': os.path.join(config_dir, 'default/ufw'),
'conf': os.path.join(config_dir, 'ufw/ufw.conf'),
'apps': os.path.join(config_dir, 'ufw/applications.d') }
if extra_files != None:
self.files.update(extra_files)
self.loglevels = {'off': 0,
'low': 100,
'medium': 200,
'high': 300,
'full': 400 }
self.do_checks = True
try:
self._do_checks()
self._get_defaults()
self._read_rules()
except Exception:
raise
self.profiles = ufw.applications.get_profiles(self.files['apps'])
self.iptables = os.path.join(iptables_dir, "iptables")
self.iptables_restore = os.path.join(iptables_dir, "iptables-restore")
self.ip6tables = os.path.join(iptables_dir, "ip6tables")
self.ip6tables_restore = os.path.join(iptables_dir, \
"ip6tables-restore")
self.iptables_version = ufw.util.get_iptables_version(self.iptables)
def is_enabled(self):
'''Is firewall configured as enabled'''
if self.defaults.has_key('enabled') and \
self.defaults['enabled'] == 'yes':
return True
return False
def use_ipv6(self):
'''Is firewall configured to use IPv6'''
if self.defaults.has_key('ipv6') and \
self.defaults['ipv6'] == 'yes' and \
os.path.exists("/proc/sys/net/ipv6"):
return True
return False
def _get_default_policy(self, primary="input"):
'''Get default policy for specified primary chain'''
policy = "default_" + primary + "_policy"
rstr = ""
if self.defaults[policy] == "accept":
rstr = "allow"
elif self.defaults[policy] == "accept_no_track":
rstr = "allow-without-tracking"
elif self.defaults[policy] == "reject":
rstr = "reject"
else:
rstr = "deny"
return rstr
def _do_checks(self):
'''Perform basic security checks:
is setuid or setgid (for non-Linux systems)
checks that script is owned by root
checks that every component in absolute path are owned by root
warn if script is group writable
warn if part of script path is group writable
Doing this at the beginning causes a race condition with later
operations that don't do these checks. However, if the user running
this script is root, then need to be root to exploit the race
condition (and you are hosed anyway...)
'''
if not self.do_checks:
err_msg = _("Checks disabled")
warn(err_msg)
return True
# Not needed on Linux, but who knows the places we will go...
if os.getuid() != os.geteuid():
err_msg = _("ERROR: this script should not be SUID")
raise UFWError(err_msg)
if os.getgid() != os.getegid():
err_msg = _("ERROR: this script should not be SGID")
raise UFWError(err_msg)
uid = os.getuid()
if uid != 0:
err_msg = _("You need to be root to run this script")
raise UFWError(err_msg)
# Use these so we only warn once
warned_world_write = {}
warned_group_write = {}
warned_owner = {}
profiles = []
if not os.path.isdir(self.files['apps']):
warn_msg = _("'%s' does not exist") % (self.files['apps'])
warn(warn_msg)
else:
pat = re.compile(r'^\.')
for profile in os.listdir(self.files['apps']):
if not pat.search(profile):
profiles.append(os.path.join(self.files['apps'], profile))
for path in self.files.values() + [ os.path.abspath(sys.argv[0]) ] + \
profiles:
while True:
debug("Checking " + path)
if path == self.files['apps'] and \
not os.path.isdir(self.files['apps']):
break
try:
statinfo = os.stat(path)
mode = statinfo[stat.ST_MODE]
except OSError:
err_msg = _("Couldn't stat '%s'") % (path)
raise UFWError(err_msg)
except Exception:
raise
if statinfo.st_uid != 0 and not warned_owner.has_key(path):
warn_msg = _("uid is %(uid)s but '%(path)s' is owned by " \
"%(st_uid)s") % ({'uid': str(uid), \
'path': path, \
'st_uid': str(statinfo.st_uid)})
warn(warn_msg)
warned_owner[path] = True
if mode & stat.S_IWOTH and not warned_world_write.has_key(path):
warn_msg = _("%s is world writable!") % (path)
warn(warn_msg)
warned_world_write[path] = True
if mode & stat.S_IWGRP and not warned_group_write.has_key(path):
warn_msg = _("%s is group writable!") % (path)
warn(warn_msg)
warned_group_write[path] = True
if path == "/":
break
path = os.path.dirname(path)
if not path:
raise OSError(errno.ENOENT, "Could not find '%s'" % (path))
for f in self.files:
if f != 'apps' and not os.path.isfile(self.files[f]):
err_msg = _("'%(f)s' file '%(name)s' does not exist") % \
({'f': f, 'name': self.files[f]})
raise UFWError(err_msg)
def _get_defaults(self):
'''Get all settings from defaults file'''
self.defaults = {}
for f in [self.files['defaults'], self.files['conf']]:
try:
orig = ufw.util.open_file_read(f)
except Exception:
err_msg = _("Couldn't open '%s' for reading") % (f)
raise UFWError(err_msg)
pat = re.compile(r'^\w+="?\w+"?')
for line in orig:
if pat.search(line):
tmp = re.split(r'=', line.strip())
self.defaults[tmp[0].lower()] = tmp[1].lower().strip('"\'')
orig.close()
# do some default policy sanity checking
policies = ['accept', 'accept_no_track', 'drop', 'reject']
for c in [ 'input', 'output', 'forward' ]:
if not self.defaults.has_key('default_%s_policy' % (c)):
err_msg = _("Missing policy for '%s'" % (c))
raise UFWError(err_msg)
p = self.defaults['default_%s_policy' % (c)]
if p not in policies or \
(p == 'accept_no_track' and c == 'forward'):
err_msg = _("Invalid policy '%(policy)s' for '%(chain)s'" % \
({'policy': p, 'chain': c}))
raise UFWError(err_msg)
def set_default(self, fn, opt, value):
'''Sets option in defaults file'''
if not re.match(r'^[\w_]+$', opt):
err_msg = _("Invalid option")
raise UFWError(err_msg)
# Perform this here so we can present a nice error to the user rather
# than a traceback
if not os.access(fn, os.W_OK):
err_msg = _("'%s' is not writable" % (fn))
raise UFWError(err_msg)
try:
fns = ufw.util.open_files(fn)
except Exception:
raise
fd = fns['tmp']
found = False
pat = re.compile(r'^' + opt + '=')
for line in fns['orig']:
if pat.search(line):
ufw.util.write_to_file(fd, opt + "=" + value + "\n")
found = True
else:
ufw.util.write_to_file(fd, line)
# Add the entry if not found
if not found:
ufw.util.write_to_file(fd, opt + "=" + value + "\n")
try:
ufw.util.close_files(fns)
except Exception:
raise
# Now that the files are written out, update value in memory
self.defaults[opt.lower()] = value.lower().strip('"\'')
def set_default_application_policy(self, policy):
'''Sets default application policy of firewall'''
if not self.dryrun:
if policy == "allow":
try:
self.set_default(self.files['defaults'], \
"DEFAULT_APPLICATION_POLICY", \
"\"ACCEPT\"")
except Exception:
raise
elif policy == "deny":
try:
self.set_default(self.files['defaults'], \
"DEFAULT_APPLICATION_POLICY", \
"\"DROP\"")
except Exception:
raise
elif policy == "reject":
try:
self.set_default(self.files['defaults'], \
"DEFAULT_APPLICATION_POLICY", \
"\"REJECT\"")
except Exception:
raise
elif policy == "skip":
try:
self.set_default(self.files['defaults'], \
"DEFAULT_APPLICATION_POLICY", \
"\"SKIP\"")
except Exception:
raise
else:
err_msg = _("Unsupported policy '%s'") % (policy)
raise UFWError(err_msg)
rstr = _("Default application policy changed to '%s'") % (policy)
return rstr
def get_app_rules_from_template(self, template):
'''Return a list of UFWRules based on the template rule'''
rules = []
profile_names = self.profiles.keys()
if template.dport in profile_names and template.sport in profile_names:
dports = ufw.applications.get_ports(self.profiles[template.dport])
sports = ufw.applications.get_ports(self.profiles[template.sport])
for i in dports:
tmp = template.dup_rule()
tmp.dapp = ""
tmp.set_port("any", "src")
try:
(port, proto) = ufw.util.parse_port_proto(i)
tmp.set_protocol(proto)
tmp.set_port(port, "dst")
except Exception:
raise
tmp.dapp = template.dapp
if template.dport == template.sport:
# Just use the same ports as dst for src when they are the
# same to avoid duplicate rules
tmp.sapp = ""
try:
(port, proto) = ufw.util.parse_port_proto(i)
tmp.set_protocol(proto)
tmp.set_port(port, "src")
except Exception:
raise
tmp.sapp = template.sapp
rules.append(tmp)
else:
for j in sports:
rule = tmp.dup_rule()
rule.sapp = ""
try:
(port, proto) = ufw.util.parse_port_proto(j)
rule.set_protocol(proto)
rule.set_port(port, "src")
except Exception:
raise
if rule.protocol == "any":
rule.set_protocol(tmp.protocol)
rule.sapp = template.sapp
rules.append(rule)
elif template.sport in profile_names:
for p in ufw.applications.get_ports(self.profiles[template.sport]):
rule = template.dup_rule()
rule.sapp = ""
try:
(port, proto) = ufw.util.parse_port_proto(p)
rule.set_protocol(proto)
rule.set_port(port, "src")
except Exception:
raise
rule.sapp = template.sapp
rules.append(rule)
elif template.dport in profile_names:
for p in ufw.applications.get_ports(self.profiles[template.dport]):
rule = template.dup_rule()
rule.dapp = ""
try:
(port, proto) = ufw.util.parse_port_proto(p)
rule.set_protocol(proto)
rule.set_port(port, "dst")
except Exception:
raise
rule.dapp = template.dapp
rules.append(rule)
if len(rules) < 1:
err_msg = _("No rules found for application profile")
raise UFWError(err_msg)
return rules
def update_app_rule(self, profile):
'''Update rule for profile in place. Returns result string and bool
on whether or not the profile is used in the current ruleset.
'''
updated_rules = []
updated_rules6 = []
last_tuple = ""
rstr = ""
updated_profile = False
# Remember, self.rules is from user[6].rules, and not the running
# firewall.
for r in self.rules + self.rules6:
if r.dapp == profile or r.sapp == profile:
# We assume that the rules are in app rule order. Specifically,
# if app rule has multiple rules, they are one after the other.
# If the rule ordering changes, the below will have to change.
tupl = r.get_app_tuple()
if tupl == last_tuple:
# Skip the rule if seen this tuple already (ie, it is part
# of a known tuple).
continue
else:
# Have a new tuple, so find and insert new app rules here
template = r.dup_rule()
template.set_protocol("any")
if template.dapp != "":
template.set_port(template.dapp, "dst")
if template.sapp != "":
template.set_port(template.sapp, "src")
try:
new_app_rules = self.get_app_rules_from_template(\
template)
except Exception:
raise
for new_r in new_app_rules:
new_r.normalize()
if new_r.v6:
updated_rules6.append(new_r)
else:
updated_rules.append(new_r)
last_tuple = tupl
updated_profile = True
else:
if r.v6:
updated_rules6.append(r)
else:
updated_rules.append(r)
if updated_profile:
self.rules = updated_rules
self.rules6 = updated_rules6
rstr += _("Rules updated for profile '%s'") % (profile)
try:
self._write_rules(False) # ipv4
self._write_rules(True) # ipv6
except Exception:
err_msg = _("Couldn't update application rules")
raise UFWError(err_msg)
return (rstr, updated_profile)
def find_application_name(self, profile_name):
'''Find the application profile name for profile_name'''
if self.profiles.has_key(profile_name):
return profile_name
match = ""
matches = 0
for n in self.profiles.keys():
if n.lower() == profile_name.lower():
match = n
matches += 1
debug_msg = "'%d' matches for '%s'" % (matches, profile_name)
debug(debug_msg)
if matches == 1:
return match
elif matches > 1:
err_msg = _("Found multiple matches for '%s'. Please use exact profile name") % \
(profile_name)
err_msg = _("Could not find a profile matching '%s'") % (profile_name)
raise UFWError(err_msg)
def find_other_position(self, position, v6):
'''Return the absolute position in the other list of the rule with the
user position of the given list. For example, find_other_position(4,
True) will return the absolute position of the rule in the ipv4 list
matching the user specified '4' rule in the ipv6 list.
'''
# Invalid search (v6 rule with too low position)
if v6 and position > len(self.rules6):
raise ValueError()
# Invalid search (v4 rule with too high position)
if not v6 and position > len(self.rules):
raise ValueError()
if position < 1:
raise ValueError()
rules = []
if v6:
rules = self.rules6
else:
rules = self.rules
# self.rules[6] is a list of tuples. Some application rules have
# multiple tuples but the user specifies by ufw rule, not application
# tuple, so we need to find how many tuples there are leading up to
# the specified position, which we can then use as an offset for
# getting the proper match_rule.
app_rules = {}
tuple_offset = 0
for i, r in enumerate(rules):
if i >= position:
break
tupl = ""
if r.dapp != "" or r.sapp != "":
tupl = r.get_app_tuple()
if app_rules.has_key(tupl):
tuple_offset += 1
else:
app_rules[tupl] = True
rules = []
if v6:
rules = self.rules
match_rule = self.rules6[position - 1 + tuple_offset].dup_rule()
match_rule.set_v6(False)
else:
rules = self.rules6
match_rule = self.rules[position - 1 + tuple_offset].dup_rule()
match_rule.set_v6(True)
count = 1
for r in rules:
if UFWRule.match(r, match_rule) == 0:
return count
count += 1
return 0
def get_loglevel(self):
'''Gets current log level of firewall'''
level = 0
rstr = _("Logging: ")
if not self.defaults.has_key('loglevel') or \
self.defaults['loglevel'] not in self.loglevels.keys():
level = -1
rstr += _("unknown")
else:
level = self.loglevels[self.defaults['loglevel']]
if level == 0:
rstr += "off"
else:
rstr += "on (%s)" % (self.defaults['loglevel'])
return (level, rstr)
def set_loglevel(self, level):
'''Sets log level of firewall'''
if level not in self.loglevels.keys() + ['on']:
err_msg = _("Invalid log level '%s'") % (level)
raise UFWError(err_msg)
new_level = level
if level == "on":
if not self.defaults.has_key('loglevel') or \
self.defaults['loglevel'] == "off":
new_level = "low"
else:
new_level = self.defaults['loglevel']
try:
self.set_default(self.files['conf'], "LOGLEVEL", new_level)
self.update_logging(new_level)
except Exception:
raise
if new_level == "off":
return _("Logging disabled")
else:
return _("Logging enabled")
def get_rules(self):
'''Return list of all rules'''
return self.rules + self.rules6
def get_rules_count(self, v6):
'''Return number of ufw rules (not iptables rules)'''
rules = []
if v6:
rules = self.rules6
else:
rules = self.rules
count = 0
app_rules = {}
for r in rules:
tupl = ""
if r.dapp != "" or r.sapp != "":
tupl = r.get_app_tuple()
if app_rules.has_key(tupl):
debug("Skipping found tuple '%s'" % (tupl))
continue
else:
app_rules[tupl] = True
count += 1
return count
def get_rule_by_number(self, num):
'''Return rule specified by number seen via "status numbered"'''
rules = self.get_rules()
count = 1
app_rules = {}
for r in rules:
tupl = ""
if r.dapp != "" or r.sapp != "":
tupl = r.get_app_tuple()
if app_rules.has_key(tupl):
debug("Skipping found tuple '%s'" % (tupl))
continue
else:
app_rules[tupl] = True
if count == int(num):
return r
count += 1
return None
def get_matching(self, rule):
'''See if there is a matching rule in the existing ruleset. Note this
does not group rules by tuples.'''
matched = []
count = 0
for r in self.get_rules():
count += 1
ret = rule.fuzzy_dst_match(r)
if ret < 1:
matched.append(count)
return matched
# API overrides
def set_default_policy(self, policy, direction):
'''Set default policy for specified direction'''
raise UFWError("UFWBackend.set_default_policy: need to override")
def get_running_raw(self, rules_type):
'''Get status of running firewall'''
raise UFWError("UFWBackend.get_running_raw: need to override")
def get_status(self, verbose, show_count):
'''Get managed rules'''
raise UFWError("UFWBackend.get_status: need to override")
def set_rule(self, rule, allow_reload):
'''Update firewall with rule'''
raise UFWError("UFWBackend.set_rule: need to override")
def start_firewall(self):
'''Start the firewall'''
raise UFWError("UFWBackend.start_firewall: need to override")
def stop_firewall(self):
'''Stop the firewall'''
raise UFWError("UFWBackend.stop_firewall: need to override")
def get_app_rules_from_system(self, template, v6):
'''Get a list if rules based on template'''
raise UFWError("UFWBackend.get_app_rules_from_system: need to " + \
"override")
def update_logging(self, level):
'''Update loglevel of running firewall'''
raise UFWError("UFWBackend.update_logging: need to override")
def reset(self):
'''Reset the firewall'''
raise UFWError("UFWBackend.reset: need to override")
| gpl-3.0 | 7,539,033,067,947,464,000 | 35.475336 | 93 | 0.489017 | false | 4.356722 | false | false | false |
craigderington/studentloan5 | tests/engine.py | 1 | 5088 | from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("studentloan5", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("studentloan5", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
| bsd-3-clause | -3,658,331,897,922,075,600 | 33.378378 | 114 | 0.612225 | false | 4.076923 | false | false | false |
opnsense/core | src/opnsense/scripts/netflow/lib/flowparser.py | 1 | 8475 | """
Copyright (c) 2019 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
flowd log parser
"""
import struct
import syslog
from socket import inet_ntop, AF_INET, AF_INET6, ntohl
class FlowParser:
# fields in order of appearance, use bitmask compare
field_definition_order = [
'tag',
'recv_time',
'proto_flags_tos',
'agent_addr4',
'agent_addr6',
'src_addr4',
'src_addr6',
'dst_addr4',
'dst_addr6',
'gateway_addr4',
'gateway_addr6',
'srcdst_port',
'packets',
'octets',
'if_indices',
'agent_info',
'flow_times',
'as_info',
'flow_engine_info'
]
# extract definition, integer values are read as rawdata (not parsed)
field_definition = {
'tag': 'I',
'recv_time': '>II',
'proto_flags_tos': 'BBBB',
'agent_addr4': 4,
'agent_addr6': 16,
'src_addr4': 4,
'src_addr6': 16,
'dst_addr4': 4,
'dst_addr6': 16,
'gateway_addr4': 4,
'gateway_addr6': 16,
'srcdst_port': '>HH',
'packets': '>Q',
'octets': '>Q',
'if_indices': '>II',
'agent_info': '>IIIHH',
'flow_times': '>II',
'as_info': 'IIBBH',
'flow_engine_info': 'HHII'
}
def __init__(self, filename, recv_stamp=None):
self._filename = filename
self._recv_stamp = recv_stamp
# cache formatter vs byte length
self._fmt_cache = dict()
# pre-calculate powers of 2
self._pow = dict()
for idx in range(len(self.field_definition_order)):
self._pow[idx] = pow(2, idx)
def calculate_size(self, fmt):
if fmt not in self._fmt_cache:
fmts = {'B': 1, 'H': 2, 'I': 4, 'Q': 8}
self._fmt_cache[fmt] = 0
for key in fmt:
if key in fmts:
self._fmt_cache[fmt] += fmts[key]
return self._fmt_cache[fmt]
def _parse_binary(self, raw_data, data_fields):
""" parse binary record
:param raw_data: binary data record
:param data_fields: field bitmask, provided by header
:return: dict
"""
raw_data_idx = 0
raw_record = dict()
for idx in range(len(self.field_definition_order)):
if self._pow[idx] & data_fields:
fieldname = self.field_definition_order[idx]
if fieldname in self.field_definition:
if type(self.field_definition[fieldname]) is int:
fsize = self.field_definition[fieldname]
raw_record[fieldname] = raw_data[raw_data_idx:raw_data_idx + fsize]
else:
fsize = self.calculate_size(self.field_definition[fieldname])
try:
content = struct.unpack(
self.field_definition[fieldname],
raw_data[raw_data_idx:raw_data_idx + fsize]
)
raw_record[fieldname] = content[0] if len(content) == 1 else content
except struct.error as e:
# the flowd record doesn't appear to be as expected, log for now.
syslog.syslog(syslog.LOG_NOTICE, "flowparser failed to unpack %s (%s)" % (fieldname, e))
raw_data_idx += fsize
return raw_record
def __iter__(self):
""" iterate flowd log file
:return:
"""
# pre-compile address formatters to save time
with open(self._filename, 'rb') as flowh:
while True:
# header [version, len_words, reserved, fields]
hdata = flowh.read(8)
if hdata == b'':
break
header = struct.unpack('BBHI', hdata)
record = self._parse_binary(
raw_data=flowh.read(header[1] * 4),
data_fields=ntohl(header[3])
)
if 'recv_time' not in record or 'agent_info' not in record:
# XXX invalid (empty?) flow record.
continue
record['recv_sec'] = record['recv_time'][0]
if self._recv_stamp is not None and record['recv_sec'] < self._recv_stamp:
# self._recv_stamp can contain the last received timestamp, in which case
# we should not return older data. The exact timestamp will be returned, so the
# consumer knows it doesn't have to read other, older, flowd log files
continue
record['sys_uptime_ms'] = record['agent_info'][0]
record['netflow_ver'] = record['agent_info'][3]
record['recv'] = record['recv_sec']
record['recv_usec'] = record['recv_time'][1]
record['if_ndx_in'] = -1
record['if_ndx_out'] = -1
record['src_port'] = 0
record['dst_port'] = 0
record['protocol'] = 0
if 'proto_flags_tos' in record:
record['tcp_flags'] = record['proto_flags_tos'][0]
record['protocol'] = record['proto_flags_tos'][1]
record['tos'] = record['proto_flags_tos'][2]
if 'flow_times' in record:
record['flow_start'] = record['flow_times'][0]
record['flow_finish'] = record['flow_times'][1]
else:
record['flow_start'] = record['sys_uptime_ms']
record['flow_finish'] = record['sys_uptime_ms']
if 'if_indices' in record:
record['if_ndx_in'] = record['if_indices'][0]
record['if_ndx_out'] = record['if_indices'][1]
if 'srcdst_port' in record:
record['src_port'] = record['srcdst_port'][0]
record['dst_port'] = record['srcdst_port'][1]
# concat ipv4/v6 fields into field without [4,6]
for key in self.field_definition_order:
if key in record:
if key[-1] == '4':
record[key[:-1]] = inet_ntop(AF_INET, record[key])
elif key[-1] == '6':
record[key[:-1]] = inet_ntop(AF_INET6, record[key])
# calculated values
record['flow_end'] = record['recv_sec'] - (record['sys_uptime_ms'] - record['flow_finish']) / 1000.0
record['duration_ms'] = (record['flow_finish'] - record['flow_start'])
record['flow_start'] = record['flow_end'] - record['duration_ms'] / 1000.0
if 'packets' not in record or 'octets' not in record or 'src_addr' not in record or 'dst_addr' not in record:
# this can't be useful data, skip record
continue
yield record
| bsd-2-clause | 991,364,815,688,320,300 | 41.80303 | 125 | 0.520354 | false | 4.2375 | false | false | false |
paulbersch/django-locus | locus/utils/location.py | 1 | 1360 | import math
# add back later
# import GeoIP
nauticalMilePerLat = 60.00721
nauticalMilePerLongitude = 60.10793
rad = math.pi / 180.0
milesPerNauticalMile = 1.15078
def calcDistance(lat1, lon1, lat2, lon2):
"""
Caclulate distance between two lat lons in NM
"""
lat1 = float(lat1)
lat2 = float(lat2)
lon1 = float(lon1)
lon2 = float(lon2)
yDistance = (lat2 - lat1) * nauticalMilePerLat
xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * (nauticalMilePerLongitude / 2)
distance = math.sqrt( yDistance**2 + xDistance**2 )
return distance * milesPerNauticalMile
def milesBox( lat, lon, radius ):
"""
Returns two lat/lon pairs as (lat1, lon2, lat2, lon2) which define a box that exactly surrounds
a circle of radius of the given amount in miles.
"""
# this gives us a tuple of values that can easily be used to get a list of "possibly close"
# dealers. then we use the calcDistance function to check if it's ACTUALLY within the radius.
latRange = radius / ( milesPerNauticalMile * 60.0 )
lonRange = radius / ( math.cos(lat * rad) * milesPerNauticalMile * 60.0)
return ( lat - latRange, lon - lonRange, lat + latRange, lon + lonRange )
def revLookup(ip):
return False
"""
gi = GeoIP.open("/usr/local/share/GeoIP/GeoLiteCity.dat",GeoIP.GEOIP_STANDARD)
return gi.record_by_addr(ip)
"""
| mit | 4,176,555,150,693,318,000 | 28.565217 | 107 | 0.696324 | false | 2.792608 | false | false | false |
MinoMino/minqlx | python/minqlx/_handlers.py | 1 | 18535 | # minqlx - Extends Quake Live's dedicated server with extra functionality and scripting.
# Copyright (C) 2015 Mino <[email protected]>
# This file is part of minqlx.
# minqlx is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# minqlx is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with minqlx. If not, see <http://www.gnu.org/licenses/>.
import minqlx
import collections
import sched
import re
# ====================================================================
# REGULAR EXPRESSIONS
# ====================================================================
_re_say = re.compile(r"^say +\"?(?P<msg>.+)\"?$", flags=re.IGNORECASE)
_re_say_team = re.compile(r"^say_team +\"?(?P<msg>.+)\"?$", flags=re.IGNORECASE)
_re_callvote = re.compile(r"^(?:cv|callvote) +(?P<cmd>[^ ]+)(?: \"?(?P<args>.+?)\"?)?$", flags=re.IGNORECASE)
_re_vote = re.compile(r"^vote +(?P<arg>.)", flags=re.IGNORECASE)
_re_team = re.compile(r"^team +(?P<arg>.)", flags=re.IGNORECASE)
_re_vote_ended = re.compile(r"^print \"Vote (?P<result>passed|failed).\n\"$")
_re_userinfo = re.compile(r"^userinfo \"(?P<vars>.+)\"$")
# ====================================================================
# LOW-LEVEL HANDLERS
# These are all called by the C code, not within Python.
# ====================================================================
def handle_rcon(cmd):
"""Console commands that are to be processed as regular pyminqlx
commands as if the owner executes it. This allows the owner to
interact with the Python part of minqlx without having to connect.
"""
try:
minqlx.COMMANDS.handle_input(minqlx.RconDummyPlayer(), cmd, minqlx.CONSOLE_CHANNEL)
except:
minqlx.log_exception()
return True
def handle_client_command(client_id, cmd):
"""Client commands are commands such as "say", "say_team", "scores",
"disconnect" and so on. This function parses those and passes it
on to the event dispatcher.
:param client_id: The client identifier.
:type client_id: int
:param cmd: The command being ran by the client.
:type cmd: str
"""
try:
# Dispatch the "client_command" event before further processing.
player = minqlx.Player(client_id)
retval = minqlx.EVENT_DISPATCHERS["client_command"].dispatch(player, cmd)
if retval is False:
return False
elif isinstance(retval, str):
# Allow plugins to modify the command before passing it on.
cmd = retval
res = _re_say.match(cmd)
if res:
msg = res.group("msg").replace("\"", "")
channel = minqlx.CHAT_CHANNEL
if minqlx.EVENT_DISPATCHERS["chat"].dispatch(player, msg, channel) is False:
return False
return cmd
res = _re_say_team.match(cmd)
if res:
msg = res.group("msg").replace("\"", "")
if player.team == "free": # I haven't tried this, but I don't think it's even possible.
channel = minqlx.FREE_CHAT_CHANNEL
elif player.team == "red":
channel = minqlx.RED_TEAM_CHAT_CHANNEL
elif player.team == "blue":
channel = minqlx.BLUE_TEAM_CHAT_CHANNEL
else:
channel = minqlx.SPECTATOR_CHAT_CHANNEL
if minqlx.EVENT_DISPATCHERS["chat"].dispatch(player, msg, channel) is False:
return False
return cmd
res = _re_callvote.match(cmd)
if res and not minqlx.Plugin.is_vote_active():
vote = res.group("cmd")
args = res.group("args") if res.group("args") else ""
# Set the caller for vote_started in case the vote goes through.
minqlx.EVENT_DISPATCHERS["vote_started"].caller(player)
if minqlx.EVENT_DISPATCHERS["vote_called"].dispatch(player, vote, args) is False:
return False
return cmd
res = _re_vote.match(cmd)
if res and minqlx.Plugin.is_vote_active():
arg = res.group("arg").lower()
if arg == "y" or arg == "1":
if minqlx.EVENT_DISPATCHERS["vote"].dispatch(player, True) is False:
return False
elif arg == "n" or arg == "2":
if minqlx.EVENT_DISPATCHERS["vote"].dispatch(player, False) is False:
return False
return cmd
res = _re_team.match(cmd)
if res:
arg = res.group("arg").lower()
target_team = ""
if arg == player.team[0]:
# Don't trigger if player is joining the same team.
return cmd
elif arg == "f":
target_team = "free"
elif arg == "r":
target_team = "red"
elif arg == "b":
target_team = "blue"
elif arg == "s":
target_team = "spectator"
elif arg == "a":
target_team = "any"
if target_team:
if minqlx.EVENT_DISPATCHERS["team_switch_attempt"].dispatch(player, player.team, target_team) is False:
return False
return cmd
res = _re_userinfo.match(cmd)
if res:
new_info = minqlx.parse_variables(res.group("vars"), ordered=True)
old_info = player.cvars
changed = {}
for key in new_info:
if key not in old_info or (key in old_info and new_info[key] != old_info[key]):
changed[key] = new_info[key]
if changed:
ret = minqlx.EVENT_DISPATCHERS["userinfo"].dispatch(player, changed)
if ret is False:
return False
elif isinstance(ret, dict):
for key in ret:
new_info[key] = ret[key]
cmd = "userinfo \"{}\"".format("".join(["\\{}\\{}".format(key, new_info[key]) for key in new_info]))
return cmd
except:
minqlx.log_exception()
return True
def handle_server_command(client_id, cmd):
try:
# Dispatch the "server_command" event before further processing.
try:
player = minqlx.Player(client_id) if client_id >= 0 else None
except minqlx.NonexistentPlayerError:
return True
retval = minqlx.EVENT_DISPATCHERS["server_command"].dispatch(player, cmd)
if retval is False:
return False
elif isinstance(retval, str):
cmd = retval
res = _re_vote_ended.match(cmd)
if res:
if res.group("result") == "passed":
minqlx.EVENT_DISPATCHERS["vote_ended"].dispatch(True)
else:
minqlx.EVENT_DISPATCHERS["vote_ended"].dispatch(False)
return cmd
except:
minqlx.log_exception()
return True
# Executing tasks right before a frame, by the main thread, will often be desirable to avoid
# weird behavior if you were to use threading. This list will act as a task queue.
# Tasks can be added by simply adding the @minqlx.next_frame decorator to functions.
frame_tasks = sched.scheduler()
next_frame_tasks = collections.deque()
def handle_frame():
"""This will be called every frame. To allow threads to call stuff from the
main thread, tasks can be scheduled using the :func:`minqlx.next_frame` decorator
and have it be executed here.
"""
while True:
# This will run all tasks that are currently scheduled.
# If one of the tasks throw an exception, it'll log it
# and continue execution of the next tasks if any.
try:
frame_tasks.run(blocking=False)
break
except:
minqlx.log_exception()
continue
try:
minqlx.EVENT_DISPATCHERS["frame"].dispatch()
except:
minqlx.log_exception()
return True
try:
while True:
func, args, kwargs = next_frame_tasks.popleft()
frame_tasks.enter(0, 0, func, args, kwargs)
except IndexError:
pass
_zmq_warning_issued = False
_first_game = True
_ad_round_number = 0
def handle_new_game(is_restart):
# This is called early in the launch process, so it's a good place to initialize
# minqlx stuff that needs QLDS to be initialized.
global _first_game
if _first_game:
minqlx.late_init()
_first_game = False
# A good place to warn the owner if ZMQ stats are disabled.
global _zmq_warning_issued
if not bool(int(minqlx.get_cvar("zmq_stats_enable"))) and not _zmq_warning_issued:
logger = minqlx.get_logger()
logger.warning("Some events will not work because ZMQ stats is not enabled. "
"Launch the server with \"zmq_stats_enable 1\"")
_zmq_warning_issued = True
minqlx.set_map_subtitles()
if not is_restart:
try:
minqlx.EVENT_DISPATCHERS["map"].dispatch(
minqlx.get_cvar("mapname"),
minqlx.get_cvar("g_factory"))
except:
minqlx.log_exception()
return True
try:
minqlx.EVENT_DISPATCHERS["new_game"].dispatch()
except:
minqlx.log_exception()
return True
def handle_set_configstring(index, value):
"""Called whenever the server tries to set a configstring. Can return
False to stop the event.
"""
global _ad_round_number
try:
res = minqlx.EVENT_DISPATCHERS["set_configstring"].dispatch(index, value)
if res is False:
return False
elif isinstance(res, str):
value = res
# VOTES
if index == 9 and value:
cmd = value.split()
vote = cmd[0] if cmd else ""
args = " ".join(cmd[1:]) if len(cmd) > 1 else ""
minqlx.EVENT_DISPATCHERS["vote_started"].dispatch(vote, args)
return
# GAME STATE CHANGES
elif index == 0:
old_cs = minqlx.parse_variables(minqlx.get_configstring(index))
if not old_cs:
return
new_cs = minqlx.parse_variables(value)
old_state = old_cs["g_gameState"]
new_state = new_cs["g_gameState"]
if old_state != new_state:
if old_state == "PRE_GAME" and new_state == "IN_PROGRESS":
pass
elif old_state == "PRE_GAME" and new_state == "COUNT_DOWN":
_ad_round_number = 1
minqlx.EVENT_DISPATCHERS["game_countdown"].dispatch()
elif old_state == "COUNT_DOWN" and new_state == "IN_PROGRESS":
pass
#minqlx.EVENT_DISPATCHERS["game_start"].dispatch()
elif old_state == "IN_PROGRESS" and new_state == "PRE_GAME":
pass
elif old_state == "COUNT_DOWN" and new_state == "PRE_GAME":
pass
else:
logger = minqlx.get_logger()
logger.warning("UNKNOWN GAME STATES: {} - {}".format(old_state, new_state))
# ROUND COUNTDOWN AND START
elif index == 661:
cvars = minqlx.parse_variables(value)
if cvars:
if "turn" in cvars:
# it is A&D
if int(cvars["state"]) == 0:
return
# round cvar appears only on round countdown
# and first round is 0, not 1
try:
round_number = int(cvars["round"]) * 2 + 1 + int(cvars["turn"])
_ad_round_number = round_number
except KeyError:
round_number = _ad_round_number
else:
# it is CA
round_number = int(cvars["round"])
if round_number and "time" in cvars:
minqlx.EVENT_DISPATCHERS["round_countdown"].dispatch(round_number)
return
elif round_number:
minqlx.EVENT_DISPATCHERS["round_start"].dispatch(round_number)
return
return res
except:
minqlx.log_exception()
return True
def handle_player_connect(client_id, is_bot):
"""This will be called whenever a player tries to connect. If the dispatcher
returns False, it will not allow the player to connect and instead show them
a message explaining why. The default message is "You are banned from this
server.", but it can be set with :func:`minqlx.set_ban_message`.
:param client_id: The client identifier.
:type client_id: int
:param is_bot: Whether or not the player is a bot.
:type is_bot: bool
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["player_connect"].dispatch(player)
except:
minqlx.log_exception()
return True
def handle_player_loaded(client_id):
"""This will be called whenever a player has connected and finished loading,
meaning it'll go off a bit later than the usual "X connected" messages.
This will not trigger on bots.
:param client_id: The client identifier.
:type client_id: int
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["player_loaded"].dispatch(player)
except:
minqlx.log_exception()
return True
def handle_player_disconnect(client_id, reason):
"""This will be called whenever a player disconnects.
:param client_id: The client identifier.
:type client_id: int
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["player_disconnect"].dispatch(player, reason)
except:
minqlx.log_exception()
return True
def handle_player_spawn(client_id):
"""Called when a player spawns. Note that a spectator going in free spectate mode
makes the client spawn, so you'll want to check for that if you only want "actual"
spawns.
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["player_spawn"].dispatch(player)
except:
minqlx.log_exception()
return True
def handle_kamikaze_use(client_id):
"""This will be called whenever player uses kamikaze item.
:param client_id: The client identifier.
:type client_id: int
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["kamikaze_use"].dispatch(player)
except:
minqlx.log_exception()
return True
def handle_kamikaze_explode(client_id, is_used_on_demand):
"""This will be called whenever kamikaze explodes.
:param client_id: The client identifier.
:type client_id: int
:param is_used_on_demand: Non-zero if kamikaze is used on demand.
:type is_used_on_demand: int
"""
try:
player = minqlx.Player(client_id)
return minqlx.EVENT_DISPATCHERS["kamikaze_explode"].dispatch(player, True if is_used_on_demand else False)
except:
minqlx.log_exception()
return True
def handle_console_print(text):
"""Called whenever the server prints something to the console and when rcon is used."""
try:
if not text:
return
# Log console output. Removes the need to have stdout logs in addition to minqlx.log.
minqlx.get_logger().debug(text.rstrip("\n"))
res = minqlx.EVENT_DISPATCHERS["console_print"].dispatch(text)
if res is False:
return False
if _print_redirection:
global _print_buffer
_print_buffer += text
if isinstance(res, str):
return res
return text
except:
minqlx.log_exception()
return True
_print_redirection = None
_print_buffer = ""
def redirect_print(channel):
"""Redirects print output to a channel. Useful for commands that execute console commands
and want to redirect the output to the channel instead of letting it go to the console.
To use it, use the return value with the "with" statement.
.. code-block:: python
def cmd_echo(self, player, msg, channel):
with minqlx.redirect_print(channel):
minqlx.console_command("echo {}".format(" ".join(msg)))
"""
class PrintRedirector:
def __init__(self, channel):
if not isinstance(channel, minqlx.AbstractChannel):
raise ValueError("The redirection channel must be an instance of minqlx.AbstractChannel.")
self.channel = channel
def __enter__(self):
global _print_redirection
_print_redirection = self.channel
def __exit__(self, exc_type, exc_val, exc_tb):
global _print_redirection
self.flush()
_print_redirection = None
def flush(self):
global _print_buffer
self.channel.reply(_print_buffer)
_print_buffer = ""
return PrintRedirector(channel)
def register_handlers():
minqlx.register_handler("rcon", handle_rcon)
minqlx.register_handler("client_command", handle_client_command)
minqlx.register_handler("server_command", handle_server_command)
minqlx.register_handler("frame", handle_frame)
minqlx.register_handler("new_game", handle_new_game)
minqlx.register_handler("set_configstring", handle_set_configstring)
minqlx.register_handler("player_connect", handle_player_connect)
minqlx.register_handler("player_loaded", handle_player_loaded)
minqlx.register_handler("player_disconnect", handle_player_disconnect)
minqlx.register_handler("player_spawn", handle_player_spawn)
minqlx.register_handler("console_print", handle_console_print)
minqlx.register_handler("kamikaze_use", handle_kamikaze_use)
minqlx.register_handler("kamikaze_explode", handle_kamikaze_explode)
| gpl-3.0 | 3,269,673,672,761,068,500 | 35.201172 | 120 | 0.580523 | false | 3.98003 | true | false | false |
mediatum/mediatum | utils/hash.py | 1 | 1599 | """
mediatum - a multimedia content repository
Copyright (C) 2007 Arne Seifert <[email protected]>
Copyright (C) 2007 Matthias Kramm <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import os
import hashlib
from core import db
q = db.query
logg = logging.getLogger(__name__)
def calcChecksum(filename, method):
if os.path.exists(filename):
f = open(filename)
if method == "SHA-1":
h = hashlib.sha1()
else:
h = hashlib.new('ripemd160')
h.update(f.read())
f.close()
return h.hexdigest()
else:
return ""
def calcChecksumFromMetadata(node):
h = hashlib.sha1()
h.update(str(node.id)) # h.update requires string or buffer as argument
h.update(node.getName())
def attributesToString(node):
string = ""
for item in node.attrs.items():
string += item[0] + item[1]
return string
h.update(attributesToString(node))
return h.hexdigest()
| gpl-3.0 | -1,159,093,611,323,354,600 | 27.052632 | 76 | 0.676673 | false | 3.853012 | false | false | false |
baseclue/django-rest-test | tests/test_compare.py | 1 | 15578 | import unittest
from rest_test import compare
class DictTestCase(unittest.TestCase):
def test_basic(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b='2',
a=1
)
assert compare(data, expected_data)
def test_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b=2,
a=1
)
self.assertFalse(compare(data, expected_data))
def test_deep(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=2,
b=dict(
a='test'
),
c=''
)
)
assert compare(data, expected_data)
def test_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=2,
b=dict(
b=1
),
c=''
)
)
self.assertFalse(compare(data, expected_data))
class ItemEllipsisTestCase(unittest.TestCase):
def test_basic(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b='2',
a=...
)
assert compare(data, expected_data)
def test_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b=2,
a=...
)
self.assertFalse(compare(data, expected_data))
def test_deep(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=2,
b=...,
c=''
)
)
assert compare(data, expected_data)
def test_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=3,
b=...,
c=''
)
)
self.assertFalse(compare(data, expected_data))
def test_missing_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
a=...
)
self.assertFalse(compare(data, expected_data))
def test_moreover_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = dict(
b=2,
a=...,
c='test'
)
self.assertFalse(compare(data, expected_data))
def test_missing_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=2,
b=...,
)
)
self.assertFalse(compare(data, expected_data))
def test_moreover_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b=dict(
a=3,
b=...,
c='',
d='test'
)
)
self.assertFalse(compare(data, expected_data))
class DictEllipsisTestCase(unittest.TestCase):
def test_empty(self):
data = dict(
)
expected_data = {
...: ...
}
assert compare(data, expected_data)
def test_basic(self):
data = dict(
a=1,
b='2'
)
expected_data = {
...: ...
}
assert compare(data, expected_data)
def test_basic_more(self):
data = {
'a': 1,
'b': '2',
'c': 3
}
expected_data = {
...: ...,
'b': '2'
}
assert compare(data, expected_data)
def test_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = {
'b': 2,
...: ...
}
self.assertFalse(compare(data, expected_data))
def test_deep(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b={
'a': 2,
...: ...,
'c': ''
}
)
assert compare(data, expected_data)
def test_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b={
'a': 3,
...: ...,
'c': ''
}
)
self.assertFalse(compare(data, expected_data))
def test_moreover_basic_false(self):
data = dict(
a=1,
b='2'
)
expected_data = {
'b': 2,
...: ...,
'c': 'test'
}
self.assertFalse(compare(data, expected_data))
def test_missing_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b={
'a': 2,
...: ...
}
)
assert compare(data, expected_data)
def test_moreover_deep_false(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = dict(
a=1,
b={
'a': 3,
...: ...,
'c': '',
'd': 'test'
}
)
self.assertFalse(compare(data, expected_data))
def test_bad_usage(self):
data = dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
expected_data = {
'a': 1,
...: dict(
b=dict(
a='test'
),
a=2,
c=''
)
}
with self.assertRaises(TypeError):
compare(data, expected_data)
class ListTestCase(unittest.TestCase):
def test_basic(self):
data = [
1,
'2'
]
expected_data = [
1,
'2'
]
assert compare(data, expected_data)
def test_basic_false(self):
data = [
1,
2
]
expected_data = [
2,
1
]
self.assertFalse(compare(data, expected_data))
def test_combination(self):
data = [
dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
),
dict(
a=2,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
]
expected_data = [
dict(
a=1,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
),
dict(
a=2,
b=dict(
b=dict(
a='test'
),
a=2,
c=''
)
)
]
assert compare(data, expected_data)
class ListEllipsisTestCase(unittest.TestCase):
def test_empty(self):
data = [
'1',
{},
3
]
expected_data = [
...
]
assert compare(data, expected_data)
def test_start(self):
data = [
'1',
{},
3
]
expected_data = [
...,
3
]
assert compare(data, expected_data)
def test_multiple(self):
data = [
'1',
2,
3,
'4',
5
]
expected_data = [
...,
2,
...
]
assert compare(data, expected_data)
def test_end(self):
data = [
1,
2,
3,
4,
5
]
expected_data = [
1,
...
]
assert compare(data, expected_data)
def test_multiple_in(self):
data = [
1,
2,
3,
4,
5,
6,
7
]
expected_data = [
...,
2,
...,
5,
...
]
assert compare(data, expected_data)
def test_start_false(self):
data = [
1,
2,
3
]
expected_data = [
...,
4
]
self.assertFalse(compare(data, expected_data))
def test_multiple_false(self):
data = [
1,
2,
3,
4,
5
]
expected_data = [
...,
6,
...
]
self.assertFalse(compare(data, expected_data))
def test_end_false(self):
data = [
1,
2,
3,
4,
5
]
expected_data = [
2,
...
]
self.assertFalse(compare(data, expected_data))
def test_multiple_in_optional(self):
data = [
1,
2,
3,
4,
5,
6,
7
]
expected_data = [
...,
2,
...,
3,
...
]
assert compare(data, expected_data)
def test_multiple_in_optional_between(self):
data = [
2,
3,
]
expected_data = [
...,
2,
...,
3,
...
]
assert compare(data, expected_data)
def test_bad_usage(self):
data = [
1,
2,
3,
4,
5,
6,
7
]
expected_data = [
...,
...,
7
]
with self.assertRaises(TypeError):
compare(data, expected_data)
def test_one(self):
data = [1]
expected_data = [..., 1, ...]
assert compare(data, expected_data)
class CombinationEllipsisTestCase(unittest.TestCase):
def test_combination(self):
data = [
{
'foo': 1,
'bar': 2,
'zoo': 3,
}
]
expected_data = [
...,
{
...: ...,
'bar': 2
},
...
]
assert compare(data, expected_data)
def test_combination_empty(self):
data = [
{
}
]
expected_data = [
...,
{
...: ...,
},
...
]
assert compare(data, expected_data)
class TypeTestCase(unittest.TestCase):
def test_list(self):
data = [
'1',
{},
3
]
expected_data = list
assert compare(data, expected_data)
def test_dict(self):
data = {
'1': 2,
2: 3,
3: 2
}
expected_data = dict
assert compare(data, expected_data)
def test_list_with_dict(self):
data = [
'1',
{'test': 'test_value'},
3
]
expected_data = [
'1',
dict,
3
]
assert compare(data, expected_data)
def test_dict_with_list(self):
data = {
'1': 2,
'test_key': [1, 2, 'u'],
3: 2
}
expected_data = {
'1': 2,
'test_key': list,
3: 2
}
assert compare(data, expected_data)
def test_different_types_in_list(self):
data = [
'1',
{},
3
]
expected_data = [
str,
dict,
int
]
assert compare(data, expected_data)
def test_different_types_in_dict(self):
data = {
'1': 2,
2: 'test',
3: [1, 2, 3]
}
expected_data = {
'1': int,
2: str,
3: list
}
assert compare(data, expected_data)
def test_different_types_in_dict_in_deep(self):
data = [
'1',
{
'1': 2,
2: 'test',
3: [1, 2, 3]
},
3
]
expected_data = [
'1',
{
'1': int,
2: str,
3: list
},
3
]
assert compare(data, expected_data)
class CombinationTypeEllipsisTestCase(unittest.TestCase):
def test_combination(self):
data = [
{
'foo': 1,
'bar': 2,
'zoo': 3,
},
{
'test_foo': '1',
'test_bar': 2,
'test_zoo': [1, 2, 3],
},
]
expected_data = [
...,
{
...: ...,
'bar': int
},
...,
{
'test_foo': str,
'test_bar': 2,
'test_zoo': list,
}
]
assert compare(data, expected_data)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,426,249,069,602,658,000 | 18.399751 | 57 | 0.29991 | false | 4.53376 | true | false | false |
apeyrard/sjtu-work | DIP/exercises/ex3/ex3.py | 1 | 3652 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import sys
from PIL import Image
import numpy as np
import math
import argparse
def getMatrix(image):
data = list(image.getdata())
width, height = image.size
matrix = np.array(data).reshape(height,width)
return matrix
def getData(matrix):
data = list(matrix.reshape(matrix.shape[0]*matrix.shape[1]))
return data
def preprocessing(matrix):
newMat = matrix.copy()
for y in range(newMat.shape[1]):
for x in range(newMat.shape[0]):
newMat[x][y] = newMat[x][y]*(-1)**(x+y)
return newMat
def postprocessing(matrix):
return preprocessing(matrix)
def ideal(matrix, cutoff, function):
newMat = matrix.copy()
center = (math.floor(newMat.shape[0]/2), math.floor(newMat.shape[1]/2))
for y in range(newMat.shape[1]):
for x in range(newMat.shape[0]):
dist = math.sqrt((x-center[0])**2+(y-center[1])**2)
if function == 'low':
if dist > cutoff:
newMat[x][y] = 0+0j
if function == 'high':
if dist < cutoff:
newMat[x][y] = 0+0j
return newMat
def butter(matrix, order, cutoff, function):
if order is None:
print("Order must be specified for butterworth filter")
sys.exit(1)
newMat = matrix.copy()
center = (math.floor(newMat.shape[0]/2), math.floor(newMat.shape[1]/2))
for y in range(newMat.shape[1]):
for x in range(newMat.shape[0]):
dist = math.sqrt((x-center[0])**2+(y-center[1])**2)
if function == 'low':
newMat[x][y] = newMat[x][y] * (1/(1+(dist/cutoff)**(2*order)))
if function == 'high':
newMat[x][y] = newMat[x][y] * (1-(1/(1+(dist/cutoff)**(2*order))))
return newMat
def gauss(matrix, cutoff, function):
newMat = matrix.copy()
center = (math.floor(newMat.shape[0]/2), math.floor(newMat.shape[1]/2))
for y in range(newMat.shape[1]):
for x in range(newMat.shape[0]):
dist = math.sqrt((x-center[0])**2+(y-center[1])**2)
if function == 'low':
newMat[x][y] = newMat[x][y] * (math.exp(-(dist**2)/(2*(cutoff**2))))
if function == 'high':
newMat[x][y] = newMat[x][y] * (1- (math.exp(-(dist**2)/(2*(cutoff**2)))))
return newMat
parser = argparse.ArgumentParser(description='Filtering in frequency domain')
parser.add_argument('--ideal', action='store_true')
parser.add_argument('--butterworth', action='store_true')
parser.add_argument('--gaussian', action='store_true')
parser.add_argument('--highpass', action='store_true')
parser.add_argument('--lowpass', action='store_true')
parser.add_argument('cutoff', type=float)
parser.add_argument('--order', type=float)
parser.add_argument('image')
args = parser.parse_args()
try:
with Image.open(args.image) as im:
if args.lowpass:
filtering = 'low'
else:
filtering = 'high'
imNew = Image.new(im.mode, im.size)
matrix = getMatrix(im)
prepMat = preprocessing(matrix)
fourierMat = np.fft.fft2(prepMat)
if args.ideal:
imageF = ideal(fourierMat, args.cutoff, filtering)
elif args.butterworth:
imageF = butter(fourierMat, args.order, args.cutoff, filtering)
else:
imageF = gauss(fourierMat, args.cutoff, filtering)
newImage = np.fft.ifft2(imageF)
postNew = postprocessing(newImage)
imNew.putdata(getData(postNew))
imNew.show()
except FileNotFoundError as e:
sys.exit("Error : file not found")
| mit | 3,300,094,426,064,420,000 | 31.035088 | 89 | 0.589266 | false | 3.275336 | false | false | false |
luk156/minimo | minimo/documento/migrations/0004_auto__add_unitamisura__add_field_riga_unita.py | 1 | 10331 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UnitaMisura'
db.create_table(u'documento_unitamisura', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nome', self.gf('django.db.models.fields.CharField')(default='Numero', max_length=30)),
('sigla', self.gf('django.db.models.fields.CharField')(default='N', max_length=4)),
('stato', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'documento', ['UnitaMisura'])
# Adding field 'Riga.unita'
db.add_column(u'documento_riga', 'unita',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['documento.UnitaMisura'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'UnitaMisura'
db.delete_table(u'documento_unitamisura')
# Deleting field 'Riga.unita'
db.delete_column(u'documento_riga', 'unita_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'documento.documento': {
'Meta': {'ordering': "['data']", 'object_name': 'Documento'},
'bollo': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'cap': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'citta': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
'cod_fiscale': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.DateField', [], {}),
'data_consegna': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'descrizione_ritenuta': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importo_residuo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'numero': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'p_iva': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'pagamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['documento.Pagamento']", 'null': 'True', 'blank': 'True'}),
'provincia': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'ragione_sociale': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
'riferimento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['documento.Documento']", 'null': 'True', 'blank': 'True'}),
'ritenuta': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'sconto': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'stato': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documento_template'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['template.TemplateDocumento']"}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'valore_bollo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'via': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'})
},
u'documento.pagamento': {
'Meta': {'object_name': 'Pagamento'},
'giorni': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'iban': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intestazione': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
'istituto': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'stato': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'documento.riga': {
'Meta': {'object_name': 'Riga'},
'codice': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '70', 'null': 'True', 'blank': 'True'}),
'descrizione': ('django.db.models.fields.TextField', [], {}),
'descrizione_imposta': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '70', 'null': 'True', 'blank': 'True'}),
'documento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['documento.Documento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importo_unitario': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'imposta': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'quantita': ('django.db.models.fields.FloatField', [], {}),
'unita': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['documento.UnitaMisura']", 'null': 'True', 'blank': 'True'})
},
u'documento.unitamisura': {
'Meta': {'object_name': 'UnitaMisura'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'default': "'Numero'", 'max_length': '30'}),
'sigla': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '4'}),
'stato': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'template.templatedocumento': {
'Meta': {'object_name': 'TemplateDocumento'},
'descrizione': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '70'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['documento'] | gpl-2.0 | 823,570,124,755,032,000 | 74.416058 | 209 | 0.55106 | false | 3.467942 | false | false | false |
fzimmermann89/pyload | module/plugins/hoster/FastixRu.py | 1 | 1366 | # -*- coding: utf-8 -*-
import re
import urllib
from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo
from module.plugins.internal.utils import json
class FastixRu(MultiHoster):
__name__ = "FastixRu"
__type__ = "hoster"
__version__ = "0.17"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?fastix\.(ru|it)/file/\w{24}'
__config__ = [("activated", "bool", "Activated", True),
("use_premium" , "bool", "Use premium account if available" , True),
("revertfailed", "bool", "Revert to standard download if fails", True)]
__description__ = """Fastix multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Massimo Rosamilia", "[email protected]")]
def setup(self):
self.chunk_limit = 3
def handle_premium(self, pyfile):
self.html = self.load("http://fastix.ru/api_v2/",
get={'apikey': self.account.get_data('apikey'),
'sub' : "getdirectlink",
'link' : pyfile.url})
data = json.loads(self.html)
self.log_debug("Json data", data)
if "error\":true" in self.html:
self.offline()
else:
self.link = data['downloadlink']
getInfo = create_getInfo(FastixRu)
| gpl-3.0 | 3,599,834,605,401,120,300 | 29.355556 | 90 | 0.533675 | false | 3.520619 | false | false | false |
Purg/kwiver | vital/bindings/python/vital/types/landmark_map.py | 1 | 4829 | """
ckwg +31
Copyright 2016 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
vital::landmark_map interface
"""
import ctypes
from vital.types import Landmark
from vital.util import VitalObject, free_void_ptr
class LandmarkMap (VitalObject):
@classmethod
def from_dict(cls, id_lm_d):
"""
Create a new instance of LandmarkMap using the given dictionary mapping
integer IDs to Landmark instances.
:param id_lm_d: dictionary mapping integer IDs to Landmark instances
:type id_lm_d: dict[int|long, vital.types.Landmark]
:return: New landmark map instance containing a copy of the input map.
:rtype: LandmarkMap
"""
s = len(id_lm_d)
t_lm_ids = (ctypes.c_int64 * s)
t_lm_landmarks = (Landmark.c_ptr_type() * s)
lm_ids = t_lm_ids()
lm_landmarks = t_lm_landmarks()
i = 0
for k, l in id_lm_d.iteritems():
lm_ids[i] = k
lm_landmarks[i] = l.c_pointer
i += 1
lm_cptr = cls._call_cfunc(
'vital_landmark_map_new',
[t_lm_landmarks, t_lm_ids, ctypes.c_size_t],
[lm_landmarks, lm_ids, s],
cls.c_ptr_type()
)
return cls(lm_cptr)
def __init__(self, from_cptr=None):
"""
Create and empty map, or initialize from and existing C instance pointer
:param from_cptr: Optional existing landmark map C pointer
"""
super(LandmarkMap, self).__init__(from_cptr)
def _new(self):
return self._call_cfunc(
'vital_landmark_map_new_empty',
restype=self.C_TYPE_PTR
)
def _destroy(self):
self._call_cfunc(
'vital_landmark_map_destroy', [self.C_TYPE_PTR], [self]
)
def __eq__(self, other):
return (
isinstance(other, LandmarkMap) and
self.as_dict() == other.as_dict()
)
def __ne__(self, other):
return not (self == other)
def __len__(self):
return self.size
@property
def size(self):
"""
Get the size of this map
:return: the size of this map
:rtype: int
"""
return self._call_cfunc(
'vital_landmark_map_size',
[self.C_TYPE_PTR], [self],
ctypes.c_size_t
)
def as_dict(self):
"""
Get a copy of this map as a python dictionary
:return: Dictionary mapping landmark IDs to Landmark instances
:rtype: dict[int|long, vital.types.Landmark]
"""
t_lm_ids = ctypes.POINTER(ctypes.c_int64)
t_lm_landmarks = ctypes.POINTER(Landmark.c_ptr_type())
lm_ids = t_lm_ids()
lm_landmarks = t_lm_landmarks()
self._call_cfunc(
'vital_landmark_map_landmarks',
[self.C_TYPE_PTR, ctypes.POINTER(t_lm_ids), ctypes.POINTER(t_lm_landmarks)],
[self, ctypes.byref(lm_ids), ctypes.byref(lm_landmarks)]
)
d = {}
s = self.size
for i in xrange(s):
# Need to copy ctypes pointer object
l_cptr = Landmark.c_ptr_type()(lm_landmarks[i].contents)
d[lm_ids[i]] = Landmark(from_cptr=l_cptr)
free_void_ptr(lm_ids)
free_void_ptr(lm_landmarks)
return d
| bsd-3-clause | -1,191,061,869,346,105,300 | 30.562092 | 88 | 0.61607 | false | 3.820411 | false | false | false |
tectronics/openmalaria-git | util/compareOutput.py | 1 | 7178 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of OpenMalaria.
#
# Copyright (C) 2005-2010 Swiss Tropical Institute and Liverpool School Of Tropical Medicine
#
# OpenMalaria is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
import math
from optparse import OptionParser
from approxEqual import ApproxSame
from readOutput import readEntries
REL_PRECISION=1e-6
ABS_PRECISION=1e-6
def charEqual (fn1,fn2):
MAX=10*1024
f1 = open(fn1,'r')
f2 = open(fn2,'r')
while True:
s1 = f1.read(MAX)
s2 = f2.read(MAX)
if (len(s1)==0) or (len(s2)==0):
# end of one or both files; equal if it's the end of both
return len(s1) == len(s2)
if s1 != s2:
return False
def main(fn1,fn2,maxDiffsToPrint=6):
"""Takes names of the two files to compare and optionally an argument describing
the maximum number of differences to print directly (note: order is not intuitive).
Returns a tuple ret,ident; ret is 0 if test passes (output considered near-enough equal),
ident is 1 if files are binary-equal."""
ret=0
opt=""
if REL_PRECISION!=1e-6:
opt+=" --rel-prescision="+str(REL_PRECISION)
if ABS_PRECISION!=1e-6:
opt+=" --abs-prescision="+str(ABS_PRECISION)
print "\033[1;34m compareOutput.py"+opt+" "+fn1+" "+fn2+" "+str(maxDiffsToPrint)+"\033[0;0m"
# Read both files and combine into a map of key to pairs (v1, v2)
try:
if charEqual (fn1,fn2):
print "output.txt files are identical"
return 0,True
print "output.txt files aren't binary-equal"
values1=readEntries(fn1)
values2=readEntries(fn2)
# python 3000 syntax is "except IOError as e", backported to 2.6 but not always supported. Old syntax:
except IOError, e:
print str(e)
return 1,False
values=dict()
for (k,v1) in values1.iteritems():
v2=None
if (k in values2):
v2=values2[k]
del values2[k]
values[k] = (v1,v2)
for (k,v2) in values2.iteritems():
values[k] = (None,v2)
# Go through all values:
numPrinted=0
numDiffs=0
numMissing1=0
numMissing2=0
perMeasureNum = dict()
perMeasureTotal1 = dict()
perMeasureTotal2 = dict()
perMeasureNumDiff = dict()
perMeasureDiffSum = dict()
perMeasureDiffAbsSum = dict()
approxSame = ApproxSame(REL_PRECISION, ABS_PRECISION)
for (k,(v1,v2)) in values.iteritems():
if v1==None:
numMissing1 += 1
elif v2==None:
numMissing2 += 1
else:
perMeasureNum[k.a] = perMeasureNum.get(k.a, 0) + 1
perMeasureTotal1[k.a] = perMeasureTotal1.get(k.a, 0.0) + v1
perMeasureTotal2[k.a] = perMeasureTotal2.get(k.a, 0.0) + v2
# Compare with relative precision
if approxSame (v1, v2):
continue
numDiffs += 1
# Sum up total difference per measure
perMeasureDiffSum[k.a] = perMeasureDiffSum.get(k.a,0.0) + v2 - v1
perMeasureDiffAbsSum[k.a] = perMeasureDiffAbsSum.get(k.a,0.0) + math.fabs(v2-v1)
numPrinted += 1
perMeasureNumDiff[k.a] = perMeasureNumDiff.get(k.a,0) + 1;
if (numPrinted <= maxDiffsToPrint):
print "survey "+str(k.b)+", group "+str(k.c)+", measure "+str(k.a)+": "+str(v1)+" -> "+str(v2)
if (numPrinted == maxDiffsToPrint):
print "[won't print any more line-by-line diffs]"
if (numMissing1 > 0) or (numMissing2 > 0):
print str(numMissing1) + " entries missing from first file, " + str(numMissing2) +" from second"
ret = 3
maxDiffSum=0.0
maxAbsDiffSum=0.0
for (k.a,absDiff) in perMeasureDiffAbsSum.iteritems():
if not (absDiff <= 1e-6): # handle NANs
# standard division throws on divide-by-zero, which I don't want
def div(x,y):
try:
return x/y
except ZeroDivisionError:
return 1e400 * 0 # nan
diff=perMeasureDiffSum[k.a]
sum1=perMeasureTotal1[k.a]
sum2=perMeasureTotal2[k.a]
diffSum=div(diff,sum1)
maxDiffSum=max(maxDiffSum,math.fabs(diffSum))
absDiffSum=div(absDiff,sum1)
maxAbsDiffSum=max(maxAbsDiffSum,absDiffSum)
print "for measure "+str(k.a)+":\tsum(1st file):"+str(sum1)+"\tsum(2nd file):"+str(sum2)+"\tdiff/sum: "+str(diffSum)+"\t(abs diff)/sum: "+str(absDiffSum)
if maxDiffSum>0 or maxAbsDiffSum>0:
print "Max diff/sum:",maxDiffSum,"max (abs diff)/sum:",maxAbsDiffSum
if numDiffs == 0:
print "No significant differences (total relative diff: "+str(approxSame.getTotalRelDiff())+"), ok."
return ret,False
else:
print "\033[1;31m"+str(numDiffs)+" significant differences (total relative diff: "+str(approxSame.getTotalRelDiff())+ ")!\033[0;0m"
return 1,False
# Test for options
def evalOptions (args):
parser = OptionParser(usage="Usage: %prog [options] logfile1 logfile2 [max different lines to print]",
# damn reformatting into a single paragraph: this doesn't get printed very nicely when --help is invoked
description="""Compare logfile1 and logfile2 for differences, returning a measure of difference.
See http://code.google.com/p/openmalaria/wiki/UtilsRunScripts#compareOutput.py for details on output.""")
parser.add_option("-R","--rel-precision",
action="store", dest="rel_precision", type="float",
help="Set relative precision (default: 1.0e-6)")
parser.add_option("-A","--abs-precision",
action="store", dest="abs_precision", type="float",
help="Set absolute precision (default: 1.0e-6)")
(options, others) = parser.parse_args(args=args)
return options,others
if __name__ == '__main__':
(options,others) = evalOptions (sys.argv[1:])
if options.rel_precision:
REL_PRECISION=options.rel_precision
if options.abs_precision:
ABS_PRECISION=options.abs_precision
if (len(others) == 3):
ret,ident = main (others[0],others[1],int(others[2]))
elif (len(others) == 2):
ret,ident = main (others[0],others[1])
else:
print "Usage: "+sys.argv[0]+" logfile1 logfile2 [max different lines to print]"
ret=-1
sys.exit(ret)
| gpl-2.0 | -189,319,169,589,652,500 | 38.224044 | 165 | 0.617721 | false | 3.43445 | false | false | false |
linyc74/CaMNIST | view.py | 1 | 12377 | import numpy as np
import cv2, time, sys, threading, json, os
from PyQt4 import QtCore, QtGui
from controller import *
class CamnistGUI(QtGui.QMainWindow):
def __init__(self, controller_obj):
super(CamnistGUI, self).__init__()
self.controller = controller_obj
pkg_dir = os.path.dirname(__file__)
path = os.path.join(pkg_dir, 'parameters/gui.json')
gui_parms = json.loads(open(path, 'r').read())
w = gui_parms['monitor_width']
h = gui_parms['monitor_height']
self.setWindowTitle('CaMNIST')
self.setWindowIcon(QtGui.QIcon('icons/cool.png'))
self.setGeometry(100, 100, w, h)
self.setFixedSize(w, h)
self.setMouseTracking(True)
self.monitor = QtGui.QLabel(self)
self.monitor.setGeometry(0, 0, w, h)
self.monitor.setAlignment(QtCore.Qt.AlignCenter)
self.toolbar = QtGui.QToolBar('Tool Bar')
self.toolbar.setMovable(True)
self.toolbar.setStyleSheet("QToolBar { background:white; }")
self.toolbar.setIconSize(QtCore.QSize(30, 45))
self.addToolBar(QtCore.Qt.LeftToolBarArea, self.toolbar)
self.info_window = TextWindow()
self.camera_tuner_window = CameraTunerWindow( controller_obj = self.controller )
self.__init__toolbtns()
def __init__toolbtns(self):
# Each action has a unique key and a name
# key = icon filename = method name
# name = text of the action/button
# ( keys , names )
K = [('snapshot' , 'Snapshot' ),
('toggle_recording' , 'Record Video' ),
('open_info' , 'Show Real-time Info' ),
('open_camera_tuner', 'Adjust Camera Parameters' )]
self.actions = {}
self.toolbtns = {}
# Create actions and tool buttons
for key, name in K:
pkg_dir = os.path.dirname(__file__)
path = os.path.join(pkg_dir, 'icons/' + key + '.png')
icon = QtGui.QIcon(path)
self.actions[key] = QtGui.QAction(icon, name, self)
self.toolbtns[key] = self.toolbar.addAction(self.actions[key])
# For actions that needs to be connected to the core object,
K = ['snapshot', 'toggle_recording']
# In this loop I defined a standard way of
# connecting each action to a method in the core object via the controller object.
for key in K:
# Get a argument-less method from the controller object.
# Note that the method_name = key.
method = self.controller.get_method( method_name = key )
# The get_method() returns None
# if a particular method is not found in the core object.
if not method is None:
# Connect the action to the method in the controller object
self.actions[key].triggered.connect(method)
# For actions that needs to be connected to the self gui object,
keys = ['open_info', 'open_camera_tuner']
for key in keys:
try:
method = getattr(self, key)
self.actions[key].triggered.connect(method)
except Exception as exception_inst:
print(exception_inst)
def open_info(self):
if not self.info_window.isVisible():
self.info_window.show()
def open_camera_tuner(self):
self.camera_tuner_window.show()
def wheelEvent(self, event):
if event.delta() > 0:
self.controller.call_method('zoom_in')
else:
self.controller.call_method('zoom_out')
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self,
'CaMNIST',
'Are you sure you want to quit CaMNIST?',
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.controller.call_method('close')
self.info_window.close()
self.camera_tuner_window.close()
event.accept()
else:
event.ignore()
# Methods for incoming signals
def connect_signals(self, thread, signal_name):
'Called by an external object to connect signals.'
# The suffix '(PyQt_PyObject)' means the argument to be transferred
# could be any type of python objects,
# not limited to Qt objects.
signal = signal_name + '(PyQt_PyObject)'
# The method name to be called upon signal arrival = the signal name
try:
method = getattr(self, signal_name)
self.connect(thread, QtCore.SIGNAL(signal), method)
except Exception as exception_inst:
print("Try to connect PyQt signal '{}'".format(signal_name))
print(exception_inst + '\n')
def progress_update(self, text_value):
self.progress_bar.progress_update(text_value)
def display_image(self, image):
# convert from BGR to RGB for latter QImage
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, bytesPerComponent = image.shape
bytesPerLine = bytesPerComponent * width
# convert cv2 image to QImage
Q_img = QtGui.QImage(image,
width, height, bytesPerLine,
QtGui.QImage.Format_RGB888)
# Convert QImage to QPixmap
Q_pixmap = QtGui.QPixmap.fromImage(Q_img)
# Set the QLabel to display the QPixmap
self.monitor.setPixmap(Q_pixmap)
def recording_starts(self):
self.actions['toggle_recording'].setIcon(QtGui.QIcon('icons/stop_recording.png'))
self.actions['toggle_recording'].setText('Stop')
def recording_ends(self):
self.actions['toggle_recording'].setIcon(QtGui.QIcon('icons/toggle_recording.png'))
self.actions['toggle_recording'].setText('Record Video')
def set_info_text(self, text):
self.info_window.setText(text)
def display_topography(self, vertices):
self.gl_window.gl_widget.updateObject(vertices)
class SliderWidget(QtGui.QWidget):
'''
This widget wraps a single parameter in the TunerWindow.
Name, value, min, max, interval are stored in this object.
Three gui elements are included to display the information of the parameter:
1) QLabel showing name
2) QLabel showing value
3) QSlider
'''
def __init__(self, parent, name, min, max, value, interval):
super(SliderWidget, self).__init__(parent)
self.name = name
self.min = min
self.max = max
self.value = value
self.interval = interval
self.hbox = QtGui.QHBoxLayout()
self.QLabel_name = QtGui.QLabel(self)
self.QLabel_value = QtGui.QLabel(self)
self.QSlider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.setLayout(self.hbox)
self.hbox.addWidget(self.QLabel_name)
self.hbox.addWidget(self.QLabel_value)
self.hbox.addWidget(self.QSlider)
self.QLabel_name.setText(name)
self.QLabel_value.setText(str(value))
self.QSlider.setMinimum(min)
self.QSlider.setMaximum(max)
self.QSlider.setValue(value)
self.QSlider.setSingleStep(interval)
self.QSlider.setTickInterval(interval)
self.QSlider.setTickPosition(QtGui.QSlider.TicksBelow)
self.QSlider.valueChanged.connect(self.setValue)
def setValue(self, value):
# Round the value to fit the interval
value = value - self.min
value = round( value / float(self.interval) ) * self.interval
value = int( value + self.min )
self.value = value
self.QSlider.setValue(value)
self.QLabel_value.setText(str(value))
class TextWindow(QtGui.QWidget):
def __init__(self):
super(TextWindow, self).__init__()
self.setWindowTitle('Info')
self.setWindowIcon(QtGui.QIcon('icons/cool.png'))
self.setGeometry(150, 150, 512, 256)
self.setFixedSize(512, 256)
self.font = QtGui.QFont()
self.font.setFamily('Segoe UI')
self.font.setBold(False)
self.font.setPixelSize(14)
self.textbox = QtGui.QLabel(self)
self.textbox.setGeometry(0, 0, 512, 256)
self.textbox.setAlignment(QtCore.Qt.AlignLeft)
self.textbox.setFont(self.font)
def setText(self, text):
self.textbox.setText(text)
class TunerWindow(QtGui.QWidget):
'''
A gui template window for tuning parameters.
This class does not contain any business logic.
All it does is to provide an interface to adjust parameters through gui.
Each parameter is wrapped in a 'block' of SliderWidget object.
Properties (name, min, max, value, interval)
of each parameter is stored in the SliderWidget object.
'''
def __init__(self):
super(TunerWindow, self).__init__()
# self.setMinimumWidth(600)
# self.setMaximumWidth(600)
self.main_vbox = QtGui.QVBoxLayout()
self.setLayout(self.main_vbox)
self.btn_hbox = QtGui.QHBoxLayout()
self.main_vbox.addLayout(self.btn_hbox)
K = [('ok' ,'OK' ),
('cancel','Cancel'),
('apply' ,'Apply' )]
self.btn = {}
for key, name in K:
self.btn[key] = QtGui.QPushButton(name, self)
self.btn[key].clicked.connect(getattr(self, key))
self.btn_hbox.addWidget( self.btn[key] )
self.parameters = []
def apply_parameter(self):
'''
Supposed to be overridden.
Defines what to do when ok() or apply() are called.
'''
pass
def ok(self):
self.apply_parameter()
self.hide()
def cancel(self):
self.hide()
def apply(self):
self.apply_parameter()
def add_parameter(self, name, min, max, value, interval):
'''
Add a new SliderWidget object holding all information of the new parameter.
'''
widget = SliderWidget(parent = self,
name = name,
min = min,
max = max,
value = value,
interval = interval)
self.parameters.append(widget)
self.main_vbox.insertWidget(len(self.main_vbox)-1, widget)
class CameraTunerWindow(TunerWindow):
'''
Inherits from the TunerWindow class.
The business logics for the camera imaging parameters
is specified in this class.
This class also manages the transfer of camera parameters
to the core object.
'''
def __init__(self, controller_obj):
super(CameraTunerWindow, self).__init__()
self.controller = controller_obj
self.setWindowIcon(QtGui.QIcon('icons/cool.png'))
self.setWindowTitle('Stereo Depth Parameters')
self.setMinimumWidth(600)
self.add_parameter(name='brightness' , min=0 , max=255 , value=150 , interval=5 )
self.add_parameter(name='contrast' , min=0 , max=255 , value=64 , interval=5 )
self.add_parameter(name='saturation' , min=0 , max=255 , value=80 , interval=5 )
self.add_parameter(name='gain' , min=0 , max=255 , value=50 , interval=5 )
self.add_parameter(name='exposure' , min=-7 , max=-1 , value=-4 , interval=1 )
self.add_parameter(name='white_balance' , min=3000, max=6500, value=5000, interval=100)
self.add_parameter(name='focus' , min=0 , max=255 , value=0 , interval=5 )
def apply_parameter(self):
'''
Transfers parameters to the core object via the controller.
'''
parms = {}
for p in self.parameters:
parms[p.name] = p.value
self.controller.call_method( method_name = 'apply_camera_parameters',
arg = parms )
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
gui = CamnistGUI( controller_obj = MockController() )
gui.show()
sys.exit(app.exec_())
| mit | 6,695,720,950,841,457,000 | 32.271505 | 95 | 0.586329 | false | 3.997739 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.