code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from tools import *
from default_record import *
from xen.xend import uuid
from xen.xend import XendDomain, XendNode
from xen.xend import BNVMAPI, BNStorageAPI
from xen.xend.server.netif import randomMAC
from xen.xend.ConfigUtil import getConfigVar
from xen.xend.XendAPIConstants import *
from xen.xend.XendAuthSessions import instance as auth_manager
from xen.xend.XendLogging import log_unittest, init
init("/var/log/xen/unittest.log", "DEBUG", log_unittest)
log = log_unittest
MB = 1024 * 1024
XEND_NODE = XendNode.instance()
XEND_DOMAIN = XendDomain.instance()
VMAPI = BNVMAPI.instance()
STORAGEAPI = BNStorageAPI.instance()
SESSION = "SessionForTest"
# SESSION = VMAPI.session_login_with_password('root', 'onceas').get('Value')
SR_TYPE = 'ocfs2'
ISO_SR_TYPE = 'gpfs_iso'
VM_VDI_MAP = {}
if getConfigVar('compute', 'VM', 'disk_limit'):
DISK_LIMIT = int(getConfigVar('compute', 'VM', 'disk_limit'))
else:
DISK_LIMIT = 6
if getConfigVar('compute', 'VM', 'interface_limit'):
INTERFACE_LIMIT = int(getConfigVar('compute', 'VM', 'interface_limit'))
else:
INTERFACE_LIMIT = 6
def _get_ocfs2_SR():
sr = XEND_NODE.get_sr_by_type(SR_TYPE)
if not sr:
raise Exception("We need ocfs2 SR_ref here!")
else:
return sr[0]
SR_ref = _get_ocfs2_SR()
log.debug(">>>>>>>>>>>SR is: %s" % SR_ref)
def login_session():
return "SessionForTest"
def negative_session():
return "NegativeSession"
def negative_host():
return "NegativeHost"
def logout_session(session):
auth_manager().logout(session)
def destroy_VM_and_VDI(vm_ref, hard_shutdown_before_delete=False):
if VM_VDI_MAP:
vdi_ref = VM_VDI_MAP.get(vm_ref)
log.debug("destroy_VM_and_VDI, vdi_ref: %s" % vdi_ref)
if not vdi_ref:
vdi_ref = vm_ref
XEND_NODE.srs[SR_ref].destroy_vdi(vdi_ref, True, True)
if hard_shutdown_before_delete:
XEND_DOMAIN.domain_destroy(vm_ref)
XEND_DOMAIN.domain_delete(vm_ref, True)
def destroy_VDI(vdi_ref):
sr = XEND_NODE.get_sr_by_vdi(vdi_ref)
XEND_NODE.srs[sr].destroy_vdi(vdi_ref, True, True)
def start_VM(vm_ref, start_paused=False, force_start=True):
try:
log.debug(">>>>>>>>>>>start_VM")
VMAPI._VM_start(SESSION, vm_ref, start_paused, force_start)
power_state = VMAPI._VM_get_power_state(vm_ref).get('Value')
log.debug(">>>>>>>>>>>>>VM power state: %s<<<<<<<<<<<<<<" % power_state)
if cmp(power_state, XEN_API_VM_POWER_STATE[XEN_API_VM_POWER_STATE_RUNNING]) == 0:
return True
else:
return False
except Exception, e:
log.exception("<<<<<<<<<<<<start_VM failed! VM: %s;Exception: %s" %(vm_ref, e))
raise e
def set_VM_is_a_template(vm_ref):
return VMAPI._VM_set_is_a_template(SESSION, vm_ref, True)
def create_bootable_VM_with_VDI(memory_size = 512, vcpu_num = 1, disk_size = 10):
log.debug(">>>>>>>>>>>create_running_VM_with_VDI")
memory_size = memory_size * MB
vm_rec = dict(VM_default)
vm_rec['memory_static_max'] = memory_size
vm_rec['memory_dynamic_max'] = memory_size
vm_rec['VCPUs_max'] = vcpu_num
vm_rec['VCPUs_at_startup'] = vcpu_num
vm_ref = XEND_DOMAIN.create_domain(vm_rec)
try:
if vm_ref :
create_VBD_and_VDI(vm_ref, disk_size, True)
create_CD_attached_VM(vm_ref, "hdc", False)
create_console_attached_VM(vm_ref, "rfb")
return vm_ref
except Exception, e:
log.exception("<<<<<<<<<<<create_VM_with_VDI failed! VM: %s; Exception: %s" % (vm_ref, e))
XEND_DOMAIN.domain_delete(vm_ref, True)
raise e
def create_VM_with_VDI(memory_size = 512, vcpu_num = 1, disk_size = 10):
log.debug(">>>>>>>>>>>create_VM_with_VDI")
memory_size = memory_size * MB
vm_rec = dict(VM_default)
vm_rec['memory_static_max'] = memory_size
vm_rec['memory_dynamic_max'] = memory_size
vm_rec['VCPUs_max'] = vcpu_num
vm_rec['VCPUs_at_startup'] = vcpu_num
vm_ref = XEND_DOMAIN.create_domain(vm_rec)
try:
if vm_ref :
create_VBD_and_VDI(vm_ref, disk_size, True)
return vm_ref
except Exception, e:
log.exception("<<<<<<<<<<<create_VM_with_VDI failed! VM: %s; Exception: %s" % (vm_ref, e))
XEND_DOMAIN.domain_delete(vm_ref, True)
raise e
def create_VM(memory_size = 512, vcpu_num = 1):
try:
log.debug(">>>>>>>>>>>create VM")
memory_size = memory_size * MB
vm_rec = dict(VM_default)
vm_rec['memory_static_max'] = memory_size
vm_rec['memory_dynamic_max'] = memory_size
vm_rec['VCPUs_max'] = vcpu_num
vm_rec['VCPUs_at_startup'] = vcpu_num
vm_ref = XEND_DOMAIN.create_domain(vm_rec)
return vm_ref
except Exception, e:
log.exception("<<<<<<<<<<<create_VM failed! Exception: %s" % (e))
raise e
def create_VIF_attached_VM(attached_vm, mac, network):
try:
log.debug(">>>>>>>>>>>create_VIF_attached_VM")
vif_record = dict(vif_default)
vif_record['VM'] = attached_vm
vif_record['MTU'] = 1500
vif_record['MAC'] = mac
vif_record['network'] = network
response = VMAPI._VIF_create(SESSION, vif_record)
return response
except Exception, e:
log.exception("<<<<<<<<<<<create_VIF_attached_VM failed! VM: %s; Exception: %s" % (attached_vm, e))
raise e
def create_console_attached_VM(attached_vm, protocol):
try:
log.debug(">>>>>>>>>>create_console_attached_VM")
console_record = dict(console_default)
console_record['VM'] = attached_vm
console_record['protocol'] = protocol
response = VMAPI._console_create(SESSION, console_record)
return response
except Exception, e:
log.exception("<<<<<<<<<<<create_console_attached_VM failed! VM: %s; Exception: %s" % (attached_vm, e))
raise e
def create_CD_attached_VM(attached_vm, device, bootable):
try:
log.debug(">>>>>>>>>>create_CD_attached_VM")
vdi_uuid = _get_ISO_VDI()
vbd_record = dict(vbd_default)
vbd_record['VM'] = attached_vm
vbd_record['bootable'] = bootable
vbd_record['device'] = device
vbd_record['VDI'] = vdi_uuid
vbd_record['type'] = "CD"
response = VMAPI._VBD_create(SESSION, vbd_record)
return response
except Exception, e:
log.exception("<<<<<<<<<<<create_CD_attached_VM failed! VM: %s; Exception: %s" % (attached_vm, e))
raise e
def create_data_VBD_attached_VM(attached_vm, vdi_ref):
try:
return VMAPI._VM_create_data_VBD(SESSION, attached_vm, vdi_ref)
except Exception, e:
log.exception("<<<<<<<<<<<create_data_VBD_attached_VM failed! VM: %s; Exception: %s" % (attached_vm, e))
raise e
def get_first_VIF(vm_ref):
try:
vifs = VMAPI._VM_get_VIFs().get('Value')
if vifs:
return vifs[0]
return None
except Exception, e:
log.exception("<<<<<<<<<<<get_first_VIF failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def get_VIF_ovs_bridge(vif_ref):
try:
return XEND_DOMAIN.get_dev_property_by_uuid('vif', vif_ref, 'bridge')
except Exception, e:
log.exception("<<<<<<<<<<<get_VIF_ovs_bridge failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def get_negative_VIF():
return "THIS_IS_NEGATIVE_VIF"
def _get_ISO_VDI():
srs_ref = XEND_NODE.get_sr_by_type(ISO_SR_TYPE)
if srs_ref:
sr = XEND_NODE.get_sr(srs_ref[0])
vdis = sr.get_vdis()
if vdis:
for vdi in vdis:
if cmp(sr.get_vdi_by_uuid(vdi).name_label, 'cd-rom') == 0:
continue
return vdi
else:
raise Exception, "No ISO disk in system."
else:
raise Exception, "No ISO storage in system."
def gen_randomMAC():
return randomMAC()
def gen_negativeMAC():
return "THIS_IS_NEGATIVE_MAC"
def _createUuid():
return uuid.uuidFactory()
def gen_regularUuid():
return uuid.toString(_createUuid())
def gen_negativeUuid():
return "THIS_IS_NEGATIVE_UUID"
def gen_negativeName():
return "THIS_IS_NEGATIVE_NAME_$%!"
def gen_regularSnapshotName(ref):
return "ss-%s" % ref
def gen_negativeSnapshotName():
return "ss-!&&!"
def vm_api_VM_create_on_from_template(session, host, template_vm, new_vm_name, param_dict, ping):
try:
return VMAPI.VM_create_on_from_template(session, host, template_vm, new_vm_name, param_dict, ping)
except Exception, e:
log.exception("<<<<<<<<<<<vm_api_VM_create_on_from_template failed! VM: %s; Exception: %s" % (new_vm_name, e))
raise e
def vm_api_VM_snapshot(session, vm_ref, snap_name):
try:
return VMAPI.VM_snapshot(session, vm_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<vm_api_VM_snapshot failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def vm_api_VM_get_system_VDI(session, vm_ref):
try:
return VMAPI._VM_get_system_VDI(session, vm_ref)
except Exception, e:
log.exception("<<<<<<<<<<<vm_api_VM_get_system_VDI failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def vm_api_VM_rollback(session, vm_ref, snap_name):
try:
return VMAPI.VM_rollback(session, vm_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<vm_api_VM_rollback failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def storage_api_VDI_snapshot(session, vdi_ref, snap_name):
try:
return STORAGEAPI.VDI_snapshot(session, vdi_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<storage_api_VDI_snapshot failed! VDI: %s; Exception: %s" % (vdi_ref, e))
raise e
def storage_api_VDI_rollback(session, vdi_ref, snap_name):
try:
return STORAGEAPI.VDI_rollback(session, vdi_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<storage_api_VDI_rollback failed! VDI: %s; Exception: %s" % (vdi_ref, e))
raise e
def storage_api_VDI_destroy_snapshot(session, vdi_ref, snap_name):
try:
return STORAGEAPI.VDI_destroy_snapshot(session, vdi_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<storage_api_VDI_destroy_snapshot failed! VDI: %s; Exception: %s" % (vdi_ref, e))
raise e
def create_data_VDI(disk_size=10):
try:
log.debug(">>>>>>>>>>>in create_data_VDI")
vdi_uuid = gen_regularUuid()
vdi_record = dict(vdi_default)
vdi_record['uuid'] = vdi_uuid
vdi_record['virtual_size'] = disk_size
vdi_record['type'] = 'metadata'
vdi_record['sharable'] = True
vdi_record = STORAGEAPI._VDI_select_SR(SESSION, vdi_record)
sr = vdi_record.get('SR')
vdi_ref = XEND_NODE.srs[sr].create_vdi(vdi_record, True)
return vdi_ref
except Exception, e:
log.exception("<<<<<<<<<<<create_data_VDI failed! Exception: %s" % (e))
raise e
def create_VBD_and_VDI(vm_ref, disk_size, is_system_vbd):
log.debug(">>>>>>>>>>>in create_VBD_and_VDI")
vdi_uuid = gen_regularUuid()
sr_instance = XEND_NODE.get_sr(SR_ref)
location = "tap:aio:"+sr_instance.get_location()+"/"+vdi_uuid+"/disk.vhd";
vdi_record = dict(vdi_default)
vdi_record['uuid'] = vdi_uuid
vdi_record['virtual_size'] = disk_size
if is_system_vbd:
vdi_record['type'] = 'user'
else:
vdi_record['type'] = 'metadata'
vdi_record['sharable'] = True
vdi_record['SR_ref'] = SR_ref
vdi_record['location'] = location
vbd_record = dict(vbd_default)
vbd_record['VM'] = vm_ref
if is_system_vbd:
vbd_record['bootable'] = True
else:
vbd_record['bootable'] = False
if is_system_vbd:
vbd_record['device'] = 'hda'
vbd_record['mode'] ='RW'
vbd_record['type'] ='Disk'
vdi_ref = XEND_NODE.srs[SR_ref].create_vdi(vdi_record, True)
try:
VM_VDI_MAP[vm_ref] = vdi_ref
vbd_record['VDI'] = vdi_ref
dominfo = XEND_DOMAIN.get_vm_by_uuid(vm_ref)
vbd_ref = dominfo.create_vbd_for_xenapi(vbd_record, location)
log.debug(">>>>>>>>>>>vbd ref: %s" % vbd_ref)
XEND_DOMAIN.managed_config_save(dominfo)
return vbd_ref
except Exception, e:
log.debug("<<<<<<<<<<<VBD create failed! Destroy attached VDI: %s. %s" % (vdi_ref, e))
destroy_VDI(vdi_ref)
raise e
| Hearen/OnceServer | pool_management/bn-xend-core/xend/tests/util/BNVMAPI_Util.py | Python | mit | 13,032 |
from setuptools import setup, find_packages
setup(
name="mould",
version="0.1",
packages=find_packages(),
package_data={
"mould": ["*.tpl"],
},
install_requires=[
"Flask",
"Flask-Script",
"Flask-Testing",
"alembic",
"gunicorn",
"sqlalchemy"
],
)
| kates/mould | setup.py | Python | mit | 440 |
from __future__ import absolute_import
from .validates import *
| openelections/openelections-core | openelex/us/vt/validate/__init__.py | Python | mit | 64 |
import hashlib
import hmac
import json
import requests
class GitHubResponse:
"""Wrapper for GET request response from GitHub"""
def __init__(self, response):
self.response = response
@property
def is_ok(self):
"""Check if request has been successful
:return: if it was OK
:rtype: bool
"""
return self.response.status_code < 300
@property
def data(self):
"""Response data as dict/list
:return: data of response
:rtype: dict|list
"""
return self.response.json()
@property
def url(self):
"""URL of the request leading to this response
:return: URL origin
:rtype: str
"""
return self.response.url
@property
def links(self):
"""Response header links
:return: URL origin
:rtype: dict
"""
return self.response.links
@property
def is_first_page(self):
"""Check if this is the first page of data
:return: if it is the first page of data
:rtype: bool
"""
return 'first' not in self.links
@property
def is_last_page(self):
"""Check if this is the last page of data
:return: if it is the last page of data
:rtype: bool
"""
return 'last' not in self.links
@property
def is_only_page(self):
"""Check if this is the only page of data
:return: if it is the only page page of data
:rtype: bool
"""
return self.is_first_page and self.is_last_page
@property
def total_pages(self):
"""Number of pages
:return: number of pages
:rtype: int
"""
if 'last' not in self.links:
return self.actual_page
return self.parse_page_number(self.links['last']['url'])
@property
def actual_page(self):
"""Actual page number
:return: actual page number
:rtype: int
"""
return self.parse_page_number(self.url)
@staticmethod
def parse_page_number(url):
"""Parse page number from GitHub GET URL
:param url: URL used for GET request
:type url: str
:return: page number
:rtype: int
"""
if '?' not in url:
return 1
params = url.split('?')[1].split('=')
params = {k: v for k, v in zip(params[0::2], params[1::2])}
if 'page' not in params:
return 1
return int(params['page'])
class GitHubAPI:
"""Simple GitHub API communication wrapper
It provides simple way for getting the basic GitHub API
resources and special methods for working with webhooks.
.. todo:: handle if GitHub is out of service, custom errors,
better abstraction, work with extensions
"""
#: URL to GitHub API
API_URL = 'https://api.github.com'
#: URL for OAuth request at GitHub
AUTH_URL = 'https://github.com/login/oauth/authorize?scope={}&client_id={}'
#: URL for OAuth token at GitHub
TOKEN_URL = 'https://github.com/login/oauth/access_token'
#: Scopes for OAuth request
SCOPES = ['user', 'repo', 'admin:repo_hook']
#: Required webhooks to be registered
WEBHOOKS = ['push', 'release', 'repository']
#: Controller for incoming webhook events
WEBHOOK_CONTROLLER = 'webhooks.gh_webhook'
#: URL for checking connections within GitHub
CONNECTIONS_URL = 'https://github.com/settings/connections/applications/{}'
def __init__(self, client_id, client_secret, webhooks_secret,
session=None, token=None):
self.client_id = client_id
self.client_secret = client_secret
self.webhooks_secret = webhooks_secret
self.session = session or requests.Session()
self.token = token
self.scope = []
def _get_headers(self):
"""Prepare auth header fields (empty if no token provided)
:return: Headers for the request
:rtype: dict
"""
if self.token is None:
return {}
return {
'Authorization': 'token {}'.format(self.token),
'Accept': 'application/vnd.github.mercy-preview+json'
}
def get_auth_url(self):
"""Create OAuth request URL
:return: OAuth request URL
:rtype: str
"""
return self.AUTH_URL.format(' '.join(self.SCOPES), self.client_id)
def login(self, session_code):
"""Authorize via OAuth with given session code
:param session_code: The session code for OAuth
:type session_code: str
:return: If the auth procedure was successful
:rtype: bool
.. todo:: check granted scope vs GH_SCOPES
"""
response = self.session.post(
self.TOKEN_URL,
headers={
'Accept': 'application/json'
},
data={
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': session_code,
}
)
if response.status_code != 200:
return False
data = response.json()
self.token = data['access_token']
self.scope = [x for x in data['scope'].split(',')]
return True
def get(self, what, page=0):
"""Perform GET request on GitHub API
:param what: URI of requested resource
:type what: str
:param page: Number of requested page
:type page: int
:return: Response from the GitHub
:rtype: ``repocribro.github.GitHubResponse``
"""
uri = self.API_URL + what
if page > 0:
uri += '?page={}'.format(page)
return GitHubResponse(self.session.get(
uri,
headers=self._get_headers()
))
def webhook_get(self, full_name, hook_id):
"""Perform GET request for repo's webhook
:param full_name: Full name of repository that contains the hook
:type full_name: str
:param hook_id: GitHub ID of hook to be get
:type hook_id: int
:return: Data of the webhook
:rtype: ``repocribro.github.GitHubResponse``
"""
return self.get('/repos/{}/hooks/{}'.format(full_name, hook_id))
def webhooks_get(self, full_name):
"""GET all webhooks of the repository
:param full_name: Full name of repository
:type full_name: str
:return: List of returned webhooks
:rtype: ``repocribro.github.GitHubResponse``
"""
return self.get('/repos/{}/hooks'.format(full_name))
def webhook_create(self, full_name, hook_url, events=None):
"""Create new webhook for specified repository
:param full_name: Full name of the repository
:type full_name: str
:param hook_url: URL where the webhook data will be sent
:type hook_url: str
:param events: List of requested events for that webhook
:type events: list of str
:return: The created webhook data
:rtype: dict or None
"""
if events is None:
events = self.WEBHOOKS
data = {
'name': 'web',
'active': True,
'events': events,
'config': {
'url': hook_url,
'content_type': 'json',
'secret': self.webhooks_secret
}
}
response = self.session.post(
self.API_URL + '/repos/{}/hooks'.format(full_name),
data=json.dumps(data),
headers=self._get_headers()
)
if response.status_code == 201:
return response.json()
return None
def webhook_tests(self, full_name, hook_id):
"""Perform test request for repo's webhook
:param full_name: Full name of repository that contains the hook
:type full_name: str
:param hook_id: GitHub ID of hook to be tested
:type hook_id: int
:return: If request was successful
:rtype: bool
"""
response = self.session.delete(
self.API_URL + '/repos/{}/hooks/{}/tests'.format(
full_name, hook_id
),
headers=self._get_headers()
)
return response.status_code == 204
def webhook_delete(self, full_name, hook_id):
"""Perform DELETE request for repo's webhook
:param full_name: Full name of repository that contains the hook
:type full_name: str
:param hook_id: GitHub ID of hook to be deleted
:type hook_id: int
:return: If request was successful
:rtype: bool
"""
response = self.session.delete(
self.API_URL + '/repos/{}/hooks/{}'.format(
full_name, hook_id
),
headers=self._get_headers()
)
return response.status_code == 204
def webhook_verify_signature(self, data, signature):
"""Verify the content with signature
:param data: Request data to be verified
:param signature: The signature of data
:type signature: str
:return: If the content is verified
:rtype: bool
"""
h = hmac.new(
self.webhooks_secret.encode('utf-8'),
data,
hashlib.sha1
)
return hmac.compare_digest(h.hexdigest(), signature)
@property
def app_connections_link(self):
return self.CONNECTIONS_URL.format(self.client_id)
| MarekSuchanek/repocribro | repocribro/github.py | Python | mit | 9,565 |
import os
def Dir_toStdName(path):
if not (path[-1]=="/" or path[-1] == "//"):
path=path+"/"
return path
def Dir_getFiles(path):
path=Dir_toStdName(path)
allfiles=[]
files=os.listdir(path)
for f in files:
abs_path = path + f
if os.path.isdir(abs_path):
sub_files=Dir_getFiles(abs_path)
sub_files=[ f+'/'+i for i in sub_files ]
allfiles.extend(sub_files)
else:
allfiles.append(f)
return allfiles
class Dir:
def __init__(self,dir_name):
self.m_dir=Dir_toStdName(dir_name)
def listDir(self):
return os.listdir(self.m_dir)
def listFiles(self):
return Dir_getFiles(self.m_dir)
if __name__ == "__main__":
d=Dir("../../")
print d.listFiles()
| FSource/Faeris | tool/binpy/libpy/files/Dir.py | Python | mit | 703 |
def get_related_fields(model):
pass
def get_table_size(model):
pass
def get_row_size(model):
pass
| unbracketed/snowbird | snowbird/analyzer.py | Python | mit | 112 |
import os, requests, tempfile, time, webbrowser
import lacuna.bc
import lacuna.exceptions as err
### Dev notes:
### The tempfile containing the captcha image is not deleted until solveit()
### has been called.
###
### Allowing the tempfile to delete itself (delete=True during tempfile
### creation), or using the tempfile in conjunction with a 'with:' expression,
### have both been attempted.
###
### The problem is that, when using those auto-deletion methods, the tempfile
### is occasionally being removed from the system before the image viewer
### we're firing off actually gets a chance to read it. Everything is
### happening in the right order, it's just that the image viewer startup is
### too slow.
###
### Deleting the tempfile manually in solveit() works - don't decide to get
### clever and replace the unlink() in solveit() with some form of tempfile
### autodeletion without a lot of testing.
class Captcha(lacuna.bc.LacunaObject):
""" Fetches, displays, and solves graphical captchas.
General usage will be::
cap = my_client.get_captcha()
cap.showit() # display the captcha image
cap.prompt_user() # ask the user for a solution
cap.solveit() # check the user's solution
"""
path = 'captcha'
@lacuna.bc.LacunaObject.call_returning_meth
def fetch( self, **kwargs ):
""" Fetches a captcha for the user to solve from the server.
This mirrors the TLE API, but you generally don't need to call this.
Returns a :class:`lacuna.captcha.Puzzle` object.
"""
return Puzzle( self.client, kwargs['rslt'] )
def showit( self ):
""" Actually downloads the captcha image, and attempts to display it
to the user in one of several browsers.
If :meth:`fetch` is called first, :meth:`showit` uses that fetched data, but
this is not necessary. :meth:`showit` will call fetch for you.
Raises :class:`lacuna.exceptions.RequestError` if the image is not
fetchable (network error or the TLE servers have gone down).
Raises EnvironmentError if it cannot find an image viewer to use to
display the captcha image.
"""
if not hasattr(self,'url') or not hasattr(self,'guid'):
puzzle = self.fetch()
self.url = puzzle.url
self.guid = puzzle.guid
img_resp = requests.get( self.url )
if img_resp.status_code != 200:
raise err.RequestError("The captcha image URL is not responding.")
f = tempfile.NamedTemporaryFile( suffix='.png', prefix='tle_capcha_', delete=False );
self.tempfile = f.name
f.write( img_resp.content )
if hasattr(img_resp, 'connection'):
img_resp.connection.close()
local_url = 'file://' + f.name
found_browser = False
for b in [ None, 'windows-default', 'macosx', 'safari', 'firefox',
'google-chrome', 'chrome', 'chromium-browser', 'chromium' ]:
try:
browser = webbrowser.get( b )
browser.open( local_url, 0, True )
found_browser = True
break
except webbrowser.Error as e:
pass
if not found_browser:
raise EnvironmentError("Unable to find a browser to show the captcha image. Captcha solution is required.")
def prompt_user(self):
""" Prompts the user to solve the displayed captcha.
It's not illegal to call this without first calling :meth:`solveit`,
but doing so makes no sense.
"""
self.resp = input("Enter the solution to the captcha here: ")
return self.resp
def solveit(self):
""" Sends the user's response to the server to check for accuracy.
Returns True if the user's response was correct. Raises
:class:`lacuna.exceptions.CaptchaResponseError` otherwise.
"""
if not hasattr(self,'resp'):
raise AttributeError("You must prompt the user for a response before calling solveit().")
try:
self.solve( self.guid, self.resp )
except err.ServerError as e:
raise err.CaptchaResponseError("Incorrect captcha response")
finally:
delattr( self, 'url' )
delattr( self, 'guid' )
delattr( self, 'resp' )
if os.path.isfile(self.tempfile):
os.unlink( self.tempfile )
return True
@lacuna.bc.LacunaObject.call_member_meth
def solve( self, guid:str, solution:str, **kwargs ):
""" Mirrors the TLE Captcha module's :meth:`solve` method, but unless you
really need this and you really know why, use :meth:`solveit` instead.
"""
pass
class Puzzle(lacuna.bc.SubClass):
"""
Object Attributes::
url FQ URL to the puzzle image
guid uuid attached to the puzzle; must be passed back along with
the solution.
"""
| tmtowtdi/MontyLacuna | lib/lacuna/captcha.py | Python | mit | 5,055 |
# pylint: disable=C0111,R0903
"""Shows that debug is enabled"""
import platform
import core.module
import core.widget
import core.decorators
class Module(core.module.Module):
@core.decorators.every(minutes=60)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.full_text))
def full_text(self, widgets):
return "debug"
def state(self, widget):
return "warning"
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| tobi-wan-kenobi/bumblebee-status | bumblebee_status/modules/core/debug.py | Python | mit | 503 |
from ctypes import c_float, cast, POINTER
import numpy as np
import OpenGL.GL as gl
import openvr
from openvr.gl_renderer import OpenVrFramebuffer as OpenVRFramebuffer
from openvr.gl_renderer import matrixForOpenVrMatrix as matrixForOpenVRMatrix
from openvr.tracked_devices_actor import TrackedDevicesActor
import gltfutils as gltfu
c_float_p = POINTER(c_float)
class OpenVRRenderer(object):
def __init__(self, multisample=0, znear=0.1, zfar=1000):
self.vr_system = openvr.init(openvr.VRApplication_Scene)
w, h = self.vr_system.getRecommendedRenderTargetSize()
self.vr_framebuffers = (OpenVRFramebuffer(w, h, multisample=multisample),
OpenVRFramebuffer(w, h, multisample=multisample))
self.vr_compositor = openvr.VRCompositor()
if self.vr_compositor is None:
raise Exception('unable to create compositor')
self.vr_framebuffers[0].init_gl()
self.vr_framebuffers[1].init_gl()
poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
self.poses = poses_t()
self.projection_matrices = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Left,
znear, zfar))),
np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Right,
znear, zfar))))
self.eye_transforms = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I),
np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Right)).I))
self.view = np.eye(4, dtype=np.float32)
self.view_matrices = (np.empty((4,4), dtype=np.float32),
np.empty((4,4), dtype=np.float32))
self.controllers = TrackedDevicesActor(self.poses)
self.controllers.show_controllers_only = False
self.controllers.init_gl()
self.vr_event = openvr.VREvent_t()
def render(self, gltf, nodes, window_size=(800, 600)):
self.vr_compositor.waitGetPoses(self.poses, openvr.k_unMaxTrackedDeviceCount, None, 0)
hmd_pose = self.poses[openvr.k_unTrackedDeviceIndex_Hmd]
if not hmd_pose.bPoseIsValid:
return
hmd_34 = np.ctypeslib.as_array(cast(hmd_pose.mDeviceToAbsoluteTracking.m, c_float_p),
shape=(3,4))
self.view[:3,:] = hmd_34
view = np.linalg.inv(self.view.T)
view.dot(self.eye_transforms[0], out=self.view_matrices[0])
view.dot(self.eye_transforms[1], out=self.view_matrices[1])
gl.glViewport(0, 0, self.vr_framebuffers[0].width, self.vr_framebuffers[0].height)
for eye in (0, 1):
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.vr_framebuffers[eye].fb)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gltfu.set_material_state.current_material = None
gltfu.set_technique_state.current_technique = None
for node in nodes:
gltfu.draw_node(node, gltf,
projection_matrix=self.projection_matrices[eye],
view_matrix=self.view_matrices[eye])
self.controllers.display_gl(self.view_matrices[eye], self.projection_matrices[eye])
self.vr_compositor.submit(openvr.Eye_Left, self.vr_framebuffers[0].texture)
self.vr_compositor.submit(openvr.Eye_Right, self.vr_framebuffers[1].texture)
# mirror left eye framebuffer to screen:
gl.glBlitNamedFramebuffer(self.vr_framebuffers[0].fb, 0,
0, 0, self.vr_framebuffers[0].width, self.vr_framebuffers[0].height,
0, 0, window_size[0], window_size[1],
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def process_input(self):
pass
# state = self.vr_system.getControllerState(1)
# if state and state.rAxis[1].x > 0.05:
# self.vr_system.triggerHapticPulse(1, 0, int(3200 * state.rAxis[1].x))
# state = self.vr_system.getControllerState(2)
# if state and state.rAxis[1].x > 0.05:
# self.vr_system.triggerHapticPulse(2, 0, int(3200 * state.rAxis[1].x))
# if self.vr_system.pollNextEvent(self.vr_event):
# if self.vr_event.eventType == openvr.VREvent_ButtonPress:
# pass #print('vr controller button pressed')
# elif self.vr_event.eventType == openvr.VREvent_ButtonUnpress:
# pass #print('vr controller button unpressed')
def shutdown(self):
self.controllers.dispose_gl()
openvr.shutdown()
| jzitelli/python-gltf-experiments | OpenVRRenderer.py | Python | mit | 4,971 |
__author__ = 'shinyorke_mbp'
| Shinichi-Nakagawa/xp2015_baseball_tools | service/__init__.py | Python | mit | 29 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
# not sure about line 7
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^dropzone-drag-drop/$', include('dragdrop.urls', namespace="dragdrop", app_name="dragdrop")),
url(r'^index/$', 'dragdrop.views.GetUserImages'),
url(r'^$', 'signups.views.home', name='home'),
url(r'^register/$', 'drinker.views.DrinkerRegistration'),
url(r'^login/$', 'drinker.views.LoginRequest'),
url(r'^logout/$', 'drinker.views.LogOutRequest'),
url(r'^index/filter/$', 'filter.views.changeBright'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# not sure if I need an actual url wrapper in this code.
# url(r'^admin/varnish/', include('varnishapp.urls')),
)
if settings.DEBUG:
# urlpatterns add STATIC_URL and serves the STATIC_ROOT file
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT) | vdmann/cse-360-image-hosting-website | src/mvp_landing/urls.py | Python | mit | 1,288 |
"""
The following tests that db connections works properly.
Make sure the default configurations match your connection to the database
"""
import pymysql
import warnings
warnings.filterwarnings("ignore")
from StreamingSQL.db import create_connection, execute_command
from StreamingSQL.fonts import Colors, Formats
"""Default configuration to connect to the DB"""
host = 'localhost'
port = 3306
usr = 'root'
paswd = ''
db = 'test'
def test_default_create_connection():
"""
Test the connection to database
Assert:
Connection occurs
"""
cur = create_connection(host=host, port=port, user=usr, password=paswd, db=db)
assert type(cur) == pymysql.cursors.Cursor
def test_wrong_host_fail_create_connection():
"""
Test that error is properly returned when there is an incorrect host
Assert:
Proper error is returned/formatted
"""
error = "2003: Cant connect to MySQL server on '%s' [Errno 61] Connection refused"
error = (Formats.BOLD + Colors.RED + "Connection Error - " + error + Formats.END + Colors.END) % host[:-3]
cur = create_connection(host=host[:-3], port=port, user=usr, password=paswd, db=db)
try:
assert cur == error
except AssertionError:
pass
def test_wrong_port_fail_create_connection():
"""
Test that error is properly returned when there is an incorrect port number
Assert:
Proper error is returned/formatted
"""
error = "2003: Cant connect to MySQL server on '%s' [Errno 61] Connection refused"
error = (Formats.BOLD + Colors.RED + "Connection Error - " + error + Formats.END + Colors.END) % host
cur = create_connection(host=host, port=port + 13, user=usr, password=paswd, db=db)
try:
assert cur == error
except AssertionError:
pass
def test_wrong_user_fail_create_connection():
"""
Test that error is properly returned when there is an incorrect user
Assert:
Proper error is returned/formatted
"""
error = "2003: Cant connect to MySQL server on '%s' [Errno 61] Connection refused"
error = (Formats.BOLD + Colors.RED + "Connection Error - " + error + Formats.END + Colors.END) % host
cur = create_connection(host=host, port=port, user='', password=paswd, db=db)
try:
assert cur == error
except AssertionError:
pass
def test_wrong_passwd_fail_create_connection():
"""
Test that error is properly returned when there is an incorrect password
Assert:
Proper error is returned/formatted
"""
error = "2003: Cant connect to MySQL server on '%s' [Errno 61] Connection refused"
error = (Formats.BOLD + Colors.RED + "Connection Error - " + error + Formats.END + Colors.END) % host
cur = create_connection(host=host, port=port, user=usr, password=usr, db=db)
try:
assert cur == error
except AssertionError:
pass
def test_execute_command():
"""
Execute "SELECT 1;"
Assert:
A result of 1 is returned
"""
cur = create_connection(host=host, port=port, user=usr, password=paswd, db=db)
assert type(cur) == pymysql.cursors.Cursor
stmt = "SELECT 1"
result = execute_command(cur, stmt)
assert result[0][0] == 1
def test_syntax_fail_execute_command():
"""
Execute "SLCT 1;"
Assert:
An error message is returned
"""
stmt = "SLCT 1"
error = ("1064: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server "+
"version for the right syntax to use near '%s' at line 1")
error = Formats.BOLD + Colors.RED + "Connection Error - " + error % stmt + Formats.END + Colors.END
cur = create_connection(host=host, port=port, user=usr, password=paswd, db=db)
assert type(cur) == pymysql.cursors.Cursor
result = execute_command(cur, stmt)
try:
assert result == error
except AssertionError:
pass
def test_new_db_create_connection():
"""
Create a connection to a new database
Assert:
New database is created/removed
"""
db="db2"
cur = create_connection(host=host, port=port, user=usr, password=paswd, db=db)
assert type(cur) == pymysql.cursors.Cursor
stmt = "SELECT `SCHEMA_NAME` from `INFORMATION_SCHEMA`.`SCHEMATA` WHERE `SCHEMA_NAME` LIKE '%s';" % db
result = execute_command(cur, stmt)
assert result[0][0] == db
stmt = "FLUSH TABLES; DROP DATABASE IF EXISTS %s;" % db
result = execute_command(cur, stmt)
assert result == ()
| oshadmon/StreamingSQL | tests/test_db.py | Python | mit | 4,584 |
from app import app
import argparse
import os
import routes
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Run the MightySpring backend server.')
parser.add_argument('--debug',
'-d',
default=True)
parser.add_argument('--port',
'-p',
nargs='?',
default=int(os.environ.get('PORT', 5000)),
type=int)
parser.add_argument('--bind-address',
'-b',
nargs='?',
default=u'0.0.0.0',
type=unicode)
args = parser.parse_args()
debug = args.debug
port = args.port
bind_address = args.bind_address
app.run(host=bind_address, port=port, debug=debug) | texuf/myantname | main.py | Python | mit | 869 |
def clean_dict_repr(mw):
"""Produce a repr()-like output of dict mw with ordered keys"""
return '{' + \
', '.join('{k!r}: {v!r}'.format(k=k, v=v) for k, v in
sorted(mw.items())) +\
'}'
| npilon/planterbox | planterbox/util.py | Python | mit | 236 |
#http://pandas.pydata.org/pandas-docs/stable/tutorials.html
#file='pand.py'
#exec(compile(open(file).read(), file, 'exec'))
from pandas import DataFrame, read_csv
import matplotlib.pyplot as plt
import pandas as pd
#import sys
#import matplotlib
names = ['Bob','Jessica','Mary','John','Mel']
births = [968, 155, 77, 578, 973]
BabyDataSet = list(zip(names,births)) # zip pairs entries together and list combines the entries to a list
print(BabyDataSet)
#The DataFrame attribute of pandas reorganizes the list into a tabular panda object
#similar to an sql table or an excel spreadsheet.
df = pd.DataFrame(data = BabyDataSet, columns=['Names', 'Births'])
print(df)
wait = input("PRESS ENTER TO CONTINUE.")
#We can now save the content as a standard tabular data format (csv)
df.to_csv('births1880.csv',index=False,header=False)
#We can also read back from the same file into a panda object
df = pd.read_csv(r'births1880.csv')
print(df)
print('Wrong header. read_cvs treated the first record as the header')
print('set the header to null')
wait = input("PRESS ENTER TO CONTINUE.")
df = pd.read_csv(r'births1880.csv',header=None)
print(df)
print('Now we have the right data but no header')
print('Label the headers')
wait = input("PRESS ENTER TO CONTINUE.")
df = pd.read_csv(r'births1880.csv', names=['Names','Births'])
print(df)
print('This looks like the table we need')
print('Numbers of 0,1,2,3,4 are row numbers similar to an Excel spreadsheet')
wait = input("PRESS ENTER TO CONTINUE.")
print('Lets do something with this tabulated data')
print('Sort the dataframe and select the top row')
Sorted1=df.sort_values(['Births'], ascending=False)
#Sorted2=df.sort_values(by='Births', ascending=False)
#Sorted.head(1)
print(Sorted1)
wait = input("PRESS ENTER TO CONTINUE.")
print('Use the max() attribute to find the maximum value')
MaxValue=df['Births'].max()
print('MaxValue is ',MaxValue)
wait = input("PRESS ENTER TO CONTINUE.")
print('Convert a column to an array')
print(df['Names'].values)
print('Reference the second entry')
print(df['Names'][1:2].values)
print('Apply a booleen mask on the Births column when compared to the MaxValue')
mask = df['Births']==MaxValue
print(mask)
print('Find the name associated with the maximum value')
MaxName = df['Names'][mask].values
print('Name at Max Value is ',MaxName)
wait = input("PRESS ENTER TO CONTINUE.")
#Create a graph object
print('Create a graph object')
df['Births'].plot()
# Text to display on graph
print('Construct a string to display on the graph')
Text = str(MaxValue) + " - " + MaxName
print(Text)
# Add text to graph
print('Annonate the graph')
plt.annotate(Text, xy=(1, MaxValue), xytext=(8, 0),
xycoords=('axes fraction', 'data'), textcoords='offset points')
print('Show the graph')
plt.show()
#Uncomment the following to save it as a png file
#plt.savefig('mygraph.png')
| nuitrcs/python-researchers-toolkit | scripts/pand.py | Python | mit | 2,880 |
import numpy as np
import itertools
from scipy.misc import comb as bincoef
import random
#########################################################################
# GENERATORS
#########################################################################
def sign_permutations(length):
""" Memory efficient generator: generate all n^2 sign permutations. """
# return a generator which generates the product of "length" smaller
# generators of (-1 or +1) (i.e. the unrolled signs, evaulated as needed)
return itertools.product([-1, 1], repeat=length)
def random_product(*args, **kwds):
""" Random selection from itertools.product(*args, **kwds). """
pools = map(tuple, args) * kwds.get('repeat', 1)
limiter = 0
generate_limit = kwds.get('generate_limit', None)
while True if generate_limit == None else limiter < generate_limit:
limiter = limiter + 1
yield tuple(random.choice(pool) for pool in pools)
def random_sign_permutations(length, limit):
""" Random sign permutation generator. """
return random_product([-1, 1], repeat=length, generate_limit=limit)
def binary_combinations(length, sublength, comb_function=itertools.combinations, limit=None):
""" Memory efficient generator: generate all length choose sublength combinations. """
# get the combination indices, support both infinite and finite length generators
combination_indices = comb_function(range(length), sublength, limit) if limit else comb_function(range(length), sublength)
def indices_to_sign_vectors():
""" Generates sign vectors from indices. """
for index_tuple in combination_indices:
for i in xrange(length):
yield 1 if index_tuple.count(i) > 0 else 0
def grouper(n, iterable, fillvalue=None):
" For grouping a generated stream into tuples. "
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
# generate all combinations, grouped into tuples
return grouper(length, indices_to_sign_vectors())
def random_combination(iterable, r, limit=None):
""" Random selection from itertools.combinations(iterable, r). """
pool = tuple(iterable)
n = len(pool)
limiter = 0
comb = bincoef(len(iterable), r)
print comb
comb_indices = random.sample(xrange(comb), limit)
while True if limit == None else limiter < limit:
print comb_indices[limiter]
perm = get_nth_perm(pool, comb_indices[limiter])
subpool = sorted(perm[:r])
indices = sorted(random.sample(xrange(n), r))
print tuple(pool[i] for i in indices), subpool, perm
limiter = limiter + 1
yield tuple(pool[i] for i in indices)
def _progress_bar(self, max=100, label=""):
class Progress:
def __init__(self, max=100, label=""):
self.value = 1
self.label = label
self.max = max
def set(self, value):
self.value = value
p50 = int(50.0 * value / self.max)
if value >= self.max:
self.clear()
else:
sys.stdout.write("\r" + "|"*p50 + "\033[30m" + "·"*(50-p50) + "\033[0m %02d%% %s" % (p50*2, self.label))
sys.stdout.flush()
def advance(self):
self.set(self.value + 1)
def clear(self):
sys.stdout.write("\r"+" "*(80 + len(self.label))+"\r")
sys.stdout.flush()
return Progress(max, label)
def permutation_test(self, other, variables=None, ranked=False, two_samples=False, limit=1000):
"""Performs a permutation test on the given or the default dependent variables.
If two_samples is True, will conduct a two-sample test. Otherwise a one-sample test will be conducted.
If ranked is True, a Wilcoxon / Wilcoxon-Mann-Whitney test will be used for the one-sample /
two-sample case, respectively. Otherwise a Fisher / Pitman test will be conducted."""
variables = self._np1d(variables, fallback = self.dependent)
for var in variables:
A = self.get(var)
B = other.get(var)
if not two_samples:
D = [a - b for a, b in zip(A, B)]
if ranked:
D = self._signed_rank(D)
result = perm_test.one_sample(D, progress_bar = self._progress_bar(), limit=limit)
else:
if ranked:
D = self._rank(np.concatenate((A, B)))
A, B = D[:len(A)], D[len(A):]
result = perm_test.two_sample(A, B, progress_bar = self._progress_bar(), limit=limit)
return result
def _signed_rank(self, values):
"""Returns the signed rank of a list of values"""
lambda_signs = np.vectorize(lambda num: 1 if num >= 0 else -1)
signs = lambda_signs(values)
ranks = np.round(stats.rankdata(np.abs(values))).astype(int)
return signs*ranks
def signed_rank(self, attribute):
"""Returns the signed ranks of the data of the given attribute"""
values = self.get(attribute)
return self._signed_rank(values)
def _rank(self, values):
"""Returns the ranks of the data of a list of values"""
ranks = np.round(stats.rankdata(values)).astype(int)
return ranks
def rank(self, attribute):
"""Returns the ranks of the data of the given attribute"""
values = self.get(attribute)
return self._rank(values)
# def random_combination(iterable, r, limit=1000):
# """ Random selection from itertools.combinations(iterable, r). """
# pool = tuple(iterable)
# # number of
# comb = bincoef(len(iterable), r)
# comb_indices = random.sample(xrange(comb), limit)
# n = len(pool)
# limiter = 0
# for i in comb_indices:
# perm = get_nth_perm(pool, i)
# subpool = sorted(perm[:r])
# yield tuple(subpool)
def get_nth_perm(seq, index):
"Returns the <index>th permutation of <seq>"
seqc= list(seq[:])
seqn= [seqc.pop()]
divider= 2 # divider is meant to be len(seqn)+1, just a bit faster
while seqc:
index, new_index= index//divider, index%divider
seqn.insert(new_index, seqc.pop())
divider+= 1
return seqn
#########################################################################
# ACTUAL TESTS
#########################################################################
def one_sample(A, limit = 10000, progress_bar = None):
""" Conducts a permutation test on the input data"""
stat_ref = np.sum(A)
# count permutation test statistics <=, >=, or ||>=|| than reference stat
counts = np.array([0,0,0]) # (lesser, greater, more extreme)
total_perms = 2**len(A)
if total_perms < limit:
limit = total_perms
if progress_bar:
progress_bar.max = limit
progress_bar.label = "of %d permutations" % progress_bar.max
for sign_row in sign_permutations(len(A)):
stat_this = np.sum(np.array(A)*sign_row)
counts = counts + stat_compare(stat_ref,stat_this)
if progress_bar:
progress_bar.advance()
# return p-values for lower, upper, and two-tail tests (FP number)
return counts / 2.0**len(A)
def two_sample(A, B, limit = 10000, progress_bar = None):
""" Conducts a permutation test on the input data, transformed by fun. """
# apply transformation to input data (e.g. signed-rank for WMW)
data = np.concatenate((A, B))
stat_ref = np.sum(A)
# count permutation test statistics <=, >=, or ||>=|| than reference stat
counts = np.array([0,0,0]) # (lesser, greater)
total_perms = bincoef(len(data), len(A))
if not limit or total_perms < limit :
limit = None
comb_function = itertools.combinations
else:
comb_function = random_combination
if progress_bar:
progress_bar.max = limit or total_perms
progress_bar.label = "of %d permutations" % progress_bar.max
for binary_row in binary_combinations(len(data), len(A), comb_function=comb_function, limit=limit):
#print binary_row
stat_this = np.sum(np.array(data)*binary_row)
counts = counts + stat_compare(stat_ref,stat_this)
# if progress_bar:
# progress_bar.advance()
# return p-values for lower, upper, and two-tail tests (FP number)
n_comb = np.multiply.reduce(np.array(range(len(data)-len(A)+1,len(data)+1)))\
/ np.multiply.reduce(np.array(range(1,len(A)+1)))
n_comb = limit or total_perms
counts[2] = min(2*counts[0:2].min(),n_comb) # hack to define p.twotail as 2*smaller of 1 tail p's
return counts / float(n_comb)
def stat_compare(ref,test):
""" Tests for comparing permutation and observed test statistics"""
lesser = 1 * (test <= ref)
greater = 1 * (test >= ref)
more_extreme = 1 * (np.abs(test) >= np.abs(ref))
return np.array([lesser,greater,more_extreme])
| maebert/knyfe | knyfe/perm_test.py | Python | mit | 8,835 |
import time
import json
import tornado.httpclient
http_client = tornado.httpclient.HTTPClient()
class HTTPServiceProxy(object):
def __init__(self, host='localhost', port=6999, cache_timeout=5.0):
self._host = host
self._port = port
self._cache_timeout = cache_timeout
self._cache = {}
self._cache_time = {}
def get(self, *path):
print 'http://%s:%d/%s' % (self._host, self._port, '/'.join(path))
if path in self._cache and \
self._cache_time[path] + self._cache_timeout > time.time():
return self._cache[path]
try:
response = http_client.fetch('http://%s:%d/%s' % (self._host, self._port, '/'.join(path)))
self._cache[path] = response.body
self._cache_time[path] = time.time()
return response.body
except tornado.httpclient.HTTPError as e:
if path in self._cache:
del self._cache[path]
return None
def post(self, *path, **kwargs):
url = 'http://%s:%d/%s' % (self._host, self._port, '/'.join(path))
print url
try:
request = tornado.httpclient.HTTPRequest(url, method='POST', body=json.dumps(kwargs))
response = http_client.fetch(request)
return response.body
except tornado.httpclient.HTTPError as e:
return None
class MonitorProxy(HTTPServiceProxy):
"""
Proxy object for the challenge monitor service.
"""
def __init__(self):
super(MonitorProxy, self).__init__(host='localhost', port=6999, cache_timeout=0.0)
@property
def challenges(self):
return json.loads(self.get('list'))
@property
def visible_challenges(self):
return json.loads(self.get('list_visible'))
def status(self, challenge):
try:
return json.loads(self.get('status')).get(challenge, None)
except TypeError:
return None
def show(self, challenge):
self.post('show', challenge)
def hide(self, challenge):
self.post('hide', challenge)
def start(self, challenge):
self.post('start', challenge)
def stop(self, challenge):
self.post('stop', challenge)
def metadata(self, challenge):
try:
return json.loads(self.get('metadata', challenge))
except TypeError:
return None
def fetch_file(self, challenge, filename):
return self.get('static_files', challenge, filename)
monitor = MonitorProxy()
class AuthProxy(HTTPServiceProxy):
"""
Proxy object for the user authentication serivce.
"""
def __init__(self, host='127.0.0.1', port=6998, cache_timeout=1.0):
super(AuthProxy, self).__init__(host='localhost', port=6998, cache_timeout=1.0)
@property
def users(self):
return json.loads(self.get('list'))
def create_user(self, user):
self.post('create_user', user)
def is_admin(self, user):
try:
return json.loads(self.post('get_tag', user, key='is_admin', default='false'))
except (ValueError, TypeError):
return False
def is_playing(self, user):
try:
return json.loads(self.post('get_tag', user, key='is_playing', default='true'))
except (ValueError, TypeError):
return False
def set_password(self, user, password):
self.post('set_password', user, password=password)
def check_password(self, user, password):
try:
return json.loads(self.post('check_password', user, password=password))
except TypeError:
return False
def set_tag(self, user, key, value):
self.post('set_tag', user, key=key, value=json.dumps(value))
def get_tag(self, user, key, default=''):
return self.post('get_tag', user, key=key, default=default)
auth = AuthProxy()
class ScoreboardProxy(HTTPServiceProxy):
"""
Proxy object for the scoreboard service.
"""
def __init__(self, host='127.0.0.1', port=6997, cache_timeout=1.0):
super(ScoreboardProxy, self).__init__(host='localhost', port=6997, cache_timeout=1.0)
def capture(self, user, challenge):
self.post('capture', challenge, user=user)
def get_captures_by_user(self, user):
return json.loads(self.get('get_captures_by_user', user))
def get_captures_by_challenge(self, challenge):
return json.loads(self.get('get_captures_by_challenge', challenge))
scoreboard = ScoreboardProxy()
| nickbjohnson4224/greyhat-crypto-ctf-2014 | frontend/services.py | Python | mit | 4,564 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for gclient.py.
See gclient_smoketest.py for integration tests.
"""
import Queue
import copy
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gclient
import gclient_utils
import gclient_scm
from testing_support import trial_dir
def write(filename, content):
"""Writes the content of a file and create the directories as needed."""
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(content)
class SCMMock(object):
def __init__(self, unit_test, name, url):
self.unit_test = unit_test
self.name = name
self.url = url
def RunCommand(self, command, options, args, file_list):
self.unit_test.assertEquals('None', command)
self.unit_test.processed.put((self.name, self.url))
def FullUrlForRelativeUrl(self, url):
return self.url + url
# pylint: disable=no-self-use
def DoesRemoteURLMatch(self, _):
return True
def GetActualRemoteURL(self, _):
return self.url
class GclientTest(trial_dir.TestCase):
def setUp(self):
super(GclientTest, self).setUp()
self.processed = Queue.Queue()
self.previous_dir = os.getcwd()
os.chdir(self.root_dir)
# Manual mocks.
self._old_createscm = gclient.Dependency.CreateSCM
gclient.Dependency.CreateSCM = self._createscm
self._old_sys_stdout = sys.stdout
sys.stdout = gclient.gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient.gclient_utils.MakeFileAnnotated(sys.stdout)
def tearDown(self):
self.assertEquals([], self._get_processed())
gclient.Dependency.CreateSCM = self._old_createscm
sys.stdout = self._old_sys_stdout
os.chdir(self.previous_dir)
super(GclientTest, self).tearDown()
def _createscm(self, parsed_url, root_dir, name, out_fh=None, out_cb=None):
self.assertTrue(parsed_url.startswith('svn://example.com/'), parsed_url)
self.assertTrue(root_dir.startswith(self.root_dir), root_dir)
return SCMMock(self, name, parsed_url)
def testDependencies(self):
self._dependencies('1')
def testDependenciesJobs(self):
self._dependencies('1000')
def _dependencies(self, jobs):
"""Verifies that dependencies are processed in the right order.
e.g. if there is a dependency 'src' and another 'src/third_party/bar', that
bar isn't fetched until 'src' is done.
Args:
|jobs| is the number of parallel jobs simulated.
"""
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', jobs])
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "bar", "url": "svn://example.com/bar" },\n'
' { "name": "bar/empty", "url": "svn://example.com/bar_empty" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",\n'
# This one will depend on dir1/dir2 in bar.
' "foo/dir1/dir2/dir3": "/dir1/dir2/dir3",\n'
' "foo/dir1/dir2/dir3/dir4": "/dir1/dir2/dir3/dir4",\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
# There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2.
' "foo/dir1/dir2": "/dir1/dir2",\n'
'}')
write(
os.path.join('bar/empty', 'DEPS'),
'deps = {\n'
'}')
obj = gclient.GClient.LoadCurrentConfig(options)
self._check_requirements(obj.dependencies[0], {})
self._check_requirements(obj.dependencies[1], {})
obj.RunOnDeps('None', args)
actual = self._get_processed()
first_3 = [
('bar', 'svn://example.com/bar'),
('bar/empty', 'svn://example.com/bar_empty'),
('foo', 'svn://example.com/foo'),
]
if jobs != 1:
# We don't care of the ordering of these items except that bar must be
# before bar/empty.
self.assertTrue(
actual.index(('bar', 'svn://example.com/bar')) <
actual.index(('bar/empty', 'svn://example.com/bar_empty')))
self.assertEquals(first_3, sorted(actual[0:3]))
else:
self.assertEquals(first_3, actual[0:3])
self.assertEquals(
[
('foo/dir1', 'svn://example.com/foo/dir1'),
('foo/dir1/dir2', 'svn://example.com/bar/dir1/dir2'),
('foo/dir1/dir2/dir3', 'svn://example.com/foo/dir1/dir2/dir3'),
('foo/dir1/dir2/dir3/dir4',
'svn://example.com/foo/dir1/dir2/dir3/dir4'),
],
actual[3:])
self.assertEquals(3, len(obj.dependencies))
self.assertEquals('foo', obj.dependencies[0].name)
self.assertEquals('bar', obj.dependencies[1].name)
self.assertEquals('bar/empty', obj.dependencies[2].name)
self._check_requirements(
obj.dependencies[0],
{
'foo/dir1': ['bar', 'bar/empty', 'foo'],
'foo/dir1/dir2/dir3':
['bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2'],
'foo/dir1/dir2/dir3/dir4':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3'],
})
self._check_requirements(
obj.dependencies[1],
{
'foo/dir1/dir2': ['bar', 'bar/empty', 'foo', 'foo/dir1'],
})
self._check_requirements(
obj.dependencies[2],
{})
self._check_requirements(
obj,
{
'foo': [],
'bar': [],
'bar/empty': ['bar'],
})
def _check_requirements(self, solution, expected):
for dependency in solution.dependencies:
e = expected.pop(dependency.name)
a = sorted(dependency.requirements)
self.assertEquals(e, a, (dependency.name, e, a))
self.assertEquals({}, expected)
def _get_processed(self):
"""Retrieves the item in the order they were processed."""
items = []
try:
while True:
items.append(self.processed.get_nowait())
except Queue.Empty:
pass
return items
def testAutofix(self):
# Invalid urls causes pain when specifying requirements. Make sure it's
# auto-fixed.
url = 'proto://host/path/@revision'
d = gclient.Dependency(
None, 'name', url, url, None, None, None,
None, '', True, False, None, True)
self.assertEquals('proto://host/path@revision', d.url)
def testStr(self):
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
gclient.Dependency(
obj, 'bar', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.Dependency(
obj.dependencies[0], 'foo/dir1', 'raw_url', 'url', None, None, None,
None, 'DEPS', True, False, None, True),
],
[])
# Make sure __str__() works fine.
# pylint: disable=protected-access
obj.dependencies[0]._file_list.append('foo')
str_obj = str(obj)
self.assertEquals(263, len(str_obj), '%d\n%s' % (len(str_obj), str_obj))
def testHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
print >> fh, 'solutions = [{"name":"top","url":"svn://example.com/top"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
print >> fh, 'hooks = %s' % repr(hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options, patch_refs={})
self.assertEqual(
[h.action for h in client.GetHooks(options)],
[tuple(x['action']) for x in hooks])
def testCustomHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
extra_hooks = [{'name': 'append', 'pattern':'.', 'action':['supercmd']}]
print >> fh, ('solutions = [{"name":"top","url":"svn://example.com/top",'
'"custom_hooks": %s},' ) % repr(extra_hooks + [{'name': 'skip'}])
print >> fh, '{"name":"bottom","url":"svn://example.com/bottom"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
hooks.append({'pattern':'.', 'action':['cmd2', 'arg1', 'arg2']})
skip_hooks = [
{'name': 'skip', 'pattern':'.', 'action':['cmd3', 'arg1', 'arg2']}]
skip_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['cmd4', 'arg1', 'arg2']})
print >> fh, 'hooks = %s' % repr(hooks + skip_hooks)
fh.close()
# Make sure the custom hooks for that project don't affect the next one.
subdir_fn = os.path.join(topdir, 'bottom')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
sub_hooks = [{'pattern':'.', 'action':['response1', 'yes1', 'yes2']}]
sub_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['response2', 'yes', 'sir']})
print >> fh, 'hooks = %s' % repr(sub_hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options, patch_refs={})
self.assertEqual(
[h.action for h in client.GetHooks(options)],
[tuple(x['action']) for x in hooks + extra_hooks + sub_hooks])
def testTargetOS(self):
"""Verifies that specifying a target_os pulls in all relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. The
value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz', 'unix'], sorted(obj.enforced_os))
def testTargetOsWithTargetOsOnly(self):
"""Verifies that specifying a target_os and target_os_only pulls in only
the relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. With
target_os_only also set, the _enforced_os tuple will be set to only the
target_os value.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz'], sorted(obj.enforced_os))
def testTargetOsOnlyWithoutTargetOs(self):
"""Verifies that specifying a target_os_only without target_os_only raises
an exception.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
exception_raised = False
try:
gclient.GClient.LoadCurrentConfig(options)
except gclient_utils.Error:
exception_raised = True
self.assertTrue(exception_raised)
def testTargetOsInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file pulls in all
relevant dependencies.
The target_os variable in a DEPS file allows specifying the name of an
additional OS which should be considered when selecting dependencies from a
DEPS' deps_os. The value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },\n'
' { "name": "bar",\n'
' "url": "svn://example.com/bar",\n'
' }]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix", },\n'
' "baz": { "foo/baz": "/baz", },\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps_os = {\n'
' "unix": { "bar/unix": "/unix", },\n'
' "baz": { "bar/baz": "/baz", },\n'
' "jaz": { "bar/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('bar', 'svn://example.com/bar'),
('bar/unix', 'svn://example.com/bar/unix'),
('foo', 'svn://example.com/foo'),
('foo/baz', 'svn://example.com/foo/baz'),
('foo/unix', 'svn://example.com/foo/unix'),
],
sorted(self._get_processed()))
def testTargetOsForHooksInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file runs the right
entries in hooks_os.
"""
write(
'DEPS',
'hooks = [\n'
' {\n'
' "name": "a",\n'
' "pattern": ".",\n'
' "action": [ "python", "do_a" ],\n'
' },\n'
']\n'
'\n'
'hooks_os = {\n'
' "blorp": ['
' {\n'
' "name": "b",\n'
' "pattern": ".",\n'
' "action": [ "python", "do_b" ],\n'
' },\n'
' ],\n'
'}\n')
write(
'.gclient',
'solutions = [\n'
' { "name": ".",\n'
' "url": "svn://example.com/",\n'
' }]\n')
# Test for an OS not in hooks_os.
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', '1'])
options.deps_os = 'zippy'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', args)
self.assertEqual(['zippy'], sorted(obj.enforced_os))
all_hooks = [h.action for h in obj.GetHooks(options)]
self.assertEquals(
[('.', 'svn://example.com/'),],
sorted(self._get_processed()))
self.assertEquals(all_hooks,
[('python', 'do_a')])
# Test for OS that has extra hooks in hooks_os.
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', '1'])
options.deps_os = 'blorp'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', args)
self.assertEqual(['blorp'], sorted(obj.enforced_os))
all_hooks = [h.action for h in obj.GetHooks(options)]
self.assertEquals(
[('.', 'svn://example.com/'),],
sorted(self._get_processed()))
self.assertEquals(all_hooks,
[('python', 'do_a'),
('python', 'do_b')])
def testUpdateWithOsDeps(self):
"""Verifies that complicated deps_os constructs result in the
correct data also with multple operating systems. Also see
testDepsOsOverrideDepsInDepsFile."""
test_data = [
# Tuples of deps, deps_os, os_list and expected_deps.
(
# OS with no overrides at all.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os2'],
{'foo': 'default_foo'}
),
(
# One OS wants to add a module.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os1_bar'}}
),
(
# One OS wants to add a module. One doesn't care.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os1_bar'}}
),
(
# Two OSes want to add a module with the same definition.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os12_bar' },
'os2': { 'bar': 'os12_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os12_bar'}}
),
(
# One OS doesn't need module, one OS wants the default.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': {}},
['os1', 'os2'],
{'foo': 'default_foo'}
),
(
# OS doesn't need module.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os1'],
{'foo': 'default_foo'}
),
(
# No-op override. Regression test for http://crbug.com/735418 .
{'foo': 'default_foo'},
{'os1': { 'foo': 'default_foo' } },
[],
{'foo': {'should_process': True, 'url': 'default_foo'}}
),
]
for deps, deps_os, target_os_list, expected_deps in test_data:
orig_deps = copy.deepcopy(deps)
result = gclient.Dependency.MergeWithOsDeps(
deps, deps_os, target_os_list, False)
self.assertEqual(result, expected_deps)
self.assertEqual(deps, orig_deps)
def testUpdateWithOsDepsInvalid(self):
test_data = [
# Tuples of deps, deps_os, os_list.
(
# OS wants a different version of module.
{'foo': 'default_foo'},
{'os1': { 'foo': 'os1_foo'} },
['os1'],
),
(
# One OS doesn't need module, another OS wants a special version.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': { 'foo': 'os2_foo'}},
['os1', 'os2'],
),
]
for deps, deps_os, target_os_list in test_data:
with self.assertRaises(gclient_utils.Error):
gclient.Dependency.MergeWithOsDeps(deps, deps_os, target_os_list, False)
def testLateOverride(self):
"""Verifies expected behavior of LateOverride."""
url = "[email protected]:dart-lang/spark.git"
d = gclient.Dependency(None, 'name', 'raw_url', 'url',
None, None, None, None, '', True, False, None, True)
late_url = d.LateOverride(url)
self.assertEquals(url, late_url)
def testDepsOsOverrideDepsInDepsFile(self):
"""Verifies that a 'deps_os' path cannot override a 'deps' path. Also
see testUpdateWithOsDeps above.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps = {\n'
' "foo/src": "/src",\n' # This path is to be overridden by similar path
# in deps_os['unix'].
'}\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix",'
' "foo/src": "/src_unix"},\n'
' "baz": { "foo/baz": "/baz",\n'
' "foo/src": None},\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
with self.assertRaises(gclient_utils.Error):
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
],
sorted(self._get_processed()))
def testRecursionOverride(self):
"""Verifies gclient respects the |recursion| var syntax.
We check several things here:
- |recursion| = 3 sets recursion on the foo dep to exactly 3
(we pull /fizz, but not /fuzz)
- pulling foo/bar at recursion level 1 (in .gclient) is overriden by
a later pull of foo/bar at recursion level 2 (in the dep tree)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
],
self._get_processed())
def testRecursedepsOverride(self):
"""Verifies gclient respects the |recursedeps| var syntax.
This is what we mean to check here:
- |recursedeps| = [...] on 2 levels means we pull exactly 3 deps
(up to /fizz, but not /fuzz)
- pulling foo/bar with no recursion (in .gclient) is overriden by
a later pull of foo/bar with recursion (in the dep tree)
- pulling foo/tar with no recursion (in .gclient) is no recursively
pulled (taz is left out)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
' { "name": "foo/tar", "url": "svn://example.com/tar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}\n'
'recursedeps = ["baz"]')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
write(
os.path.join('tar', 'DEPS'),
'deps = {\n'
' "taz": "/taz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('foo/tar', 'svn://example.com/tar'),
],
sorted(self._get_processed()))
def testRecursedepsOverrideWithRelativePaths(self):
"""Verifies gclient respects |recursedeps| with relative paths."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRelativeRecursion(self):
"""Verifies that nested use_relative_paths is always respected."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/bar/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRecursionOverridesRecursedeps(self):
"""Verifies gclient respects |recursion| over |recursedeps|.
|recursion| is set in a top-level DEPS file. That value is meant
to affect how many subdeps are parsed via recursion.
|recursedeps| is set in each DEPS file to control whether or not
to recurse into the immediate next subdep.
This test verifies that if both syntaxes are mixed in a DEPS file,
we disable |recursedeps| support and only obey |recursion|.
Since this setting is evaluated per DEPS file, recursed DEPS
files will each be re-evaluated according to the per DEPS rules.
So a DEPS that only contains |recursedeps| could then override any
previous |recursion| setting. There is extra processing to ensure
this does not happen.
For this test to work correctly, we need to use a DEPS chain that
only contains recursion controls in the top DEPS file.
In foo, |recursion| and |recursedeps| are specified. When we see
|recursion|, we stop trying to use |recursedeps|.
There are 2 constructions of DEPS here that are key to this test:
(1) In foo, if we used |recursedeps| instead of |recursion|, we
would also pull in bar. Since bar's DEPS doesn't contain any
recursion statements, we would stop processing at bar.
(2) In fizz, if we used |recursedeps| at all, we should pull in
fuzz.
We expect to keep going past bar (satisfying 1) and we don't
expect to pull in fuzz (satisfying 2).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}\n'
'recursedeps = ["fuzz"]')
write(
os.path.join('fuzz', 'DEPS'),
'deps = {\n'
' "tar": "/tar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
# Deps after this would have been skipped if we were obeying
# |recursedeps|.
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
# And this dep would have been picked up if we were obeying
# |recursedeps|.
# 'svn://example.com/foo/bar/baz/fuzz',
],
self._get_processed())
def testRecursedepsAltfile(self):
"""Verifies gclient respects the |recursedeps| var syntax with overridden
target DEPS file.
This is what we mean to check here:
- Naming an alternate DEPS file in recursedeps pulls from that one.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = [("bar", "DEPS.alt")]')
write(os.path.join('bar', 'DEPS'), 'ERROR ERROR ERROR')
write(
os.path.join('bar', 'DEPS.alt'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testGitDeps(self):
"""Verifies gclient respects a .DEPS.git deps file.
Along the way, we also test that if both DEPS and .DEPS.git are present,
that gclient does not read the DEPS file. This will reliably catch bugs
where gclient is always hitting the wrong file (DEPS).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testGitDepsFallback(self):
"""Verifies gclient respects fallback to DEPS upon missing deps file."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testDepsFromNotAllowedHostsUnspecified(self):
"""Verifies gclient works fine with DEPS without allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsOK(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["example.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(['example.com']), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsBad(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["other.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals(frozenset(['other.com']), dep.allowed_hosts)
self.assertEquals([dep.dependencies[0]], dep.findDepsFromNotAllowedHosts())
self._get_processed()
def testDepsParseFailureWithEmptyAllowedHosts(self):
"""Verifies gclient fails with defined but empty allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = []\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
def testDepsParseFailureWithNonIterableAllowedHosts(self):
"""Verifies gclient fails with defined but non-iterable allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = None\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
def testCreatesCipdDependencies(self):
"""Verifies something."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'vars = {\n'
' "lemur_version": "version:1234",\n'
'}\n'
'deps = {\n'
' "bar": {\n'
' "packages": [{\n'
' "package": "lemur",\n'
' "version": Var("lemur_version"),\n'
' }],\n'
' "dep_type": "cipd",\n'
' }\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
options.validate_syntax = True
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEquals(1, len(obj.dependencies))
sol = obj.dependencies[0]
sol._condition = 'some_condition'
sol.ParseDepsFile()
self.assertEquals(1, len(sol.dependencies))
dep = sol.dependencies[0]
self.assertIsInstance(dep, gclient.CipdDependency)
self.assertEquals(
'https://chrome-infra-packages.appspot.com/lemur@version:1234',
dep.url)
def testSameDirAllowMultipleCipdDeps(self):
"""Verifies gclient allow multiple cipd deps under same directory."""
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
cipd_root = gclient_scm.CipdRoot(
os.path.join(self.root_dir, 'dir1'), 'https://example.com')
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.CipdDependency(obj.dependencies[0], 'foo',
{'package': 'foo_package',
'version': 'foo_version'},
cipd_root, None, True, False,
'fake_condition', True),
gclient.CipdDependency(obj.dependencies[0], 'foo',
{'package': 'bar_package',
'version': 'bar_version'},
cipd_root, None, True, False,
'fake_condition', True),
],
[])
dep0 = obj.dependencies[0].dependencies[0]
dep1 = obj.dependencies[0].dependencies[1]
self.assertEquals('https://example.com/foo_package@foo_version', dep0.url)
self.assertEquals('https://example.com/bar_package@bar_version', dep1.url)
if __name__ == '__main__':
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout, include_zero=True)
sys.stderr = gclient_utils.MakeFileAutoFlush(sys.stderr)
sys.stderr = gclient_utils.MakeFileAnnotated(sys.stderr, include_zero=True)
logging.basicConfig(
level=[logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG][
min(sys.argv.count('-v'), 3)],
format='%(relativeCreated)4d %(levelname)5s %(module)13s('
'%(lineno)d) %(message)s')
unittest.main()
| Shouqun/node-gn | tools/depot_tools/tests/gclient_test.py | Python | mit | 40,396 |
from zang.exceptions.zang_exception import ZangException
from zang.configuration.configuration import Configuration
from zang.connectors.connector_factory import ConnectorFactory
from zang.domain.enums.http_method import HttpMethod
from docs.examples.credetnials import sid, authToken
url = 'https://api.zang.io/v2'
configuration = Configuration(sid, authToken, url=url)
sipDomainsConnector = ConnectorFactory(configuration).sipDomainsConnector
# view domain
try:
domain = sipDomainsConnector.viewDomain('TestDomainSid')
print(domain)
except ZangException as ze:
print(ze)
# list domains
try:
domains = sipDomainsConnector.listDomains()
print(domains.total)
except ZangException as ze:
print(ze)
# create domain
try:
domain = sipDomainsConnector.createDomain(
domainName='mydomain.com',
friendlyName='MyDomain',
voiceUrl='VoiceUrl',
voiceMethod=HttpMethod.POST,
voiceFallbackUrl='VoiceFallbackUrl',
voiceFallbackMethod=HttpMethod.GET)
print(domain.sid)
except ZangException as ze:
print(ze)
# update domain
try:
domain = sipDomainsConnector.updateDomain(
'TestDomainSid',
friendlyName='MyDomain3',
voiceUrl='VoiceUrl2',
voiceMethod=HttpMethod.POST,)
print(domain.voiceUrl)
except ZangException as ze:
print(ze)
# delete domain
try:
domain = sipDomainsConnector.deleteDomain('TestDomainSid')
print(domain.sid)
except ZangException as ze:
print(ze)
# list mapped credentials lists
try:
credentialsLists = sipDomainsConnector.listMappedCredentialsLists(
'TestDomainSid')
print(credentialsLists.total)
except ZangException as ze:
print(ze)
# map credentials list
try:
credentialsList = sipDomainsConnector.mapCredentialsLists(
'TestDomainSid', 'TestCredentialsListSid')
print(credentialsList.credentialsCount)
except ZangException as ze:
print(ze)
# delete mapped credentials list
try:
credentialsList = sipDomainsConnector.deleteMappedCredentialsList(
'TestDomainSid', 'TestCredentialsListSid')
print(credentialsList.friendlyName)
except ZangException as ze:
print(ze)
# list mapped ip access control lists
try:
aclLists = sipDomainsConnector.listMappedIpAccessControlLists(
'TestDomainSid')
print(aclLists.total)
except ZangException as ze:
print(ze)
# map ip access control list
try:
aclList = sipDomainsConnector.mapIpAccessControlList(
'TestDomainSid', 'TestIpAccessControlListSid')
print(aclList.credentialsCount)
except ZangException as ze:
print(ze)
# delete mapped ip access control list
try:
aclList = sipDomainsConnector.deleteMappedIpAccessControlList(
'TestDomainSid', 'TestIpAccessControlListSid')
print(aclList.friendlyName)
except ZangException as ze:
print(ze)
| zang-cloud/zang-python | docs/examples/sip_domains_example.py | Python | mit | 2,859 |
# -*- coding: utf-8 -*-
import pytest
from mmb_perceptron.feature_extractor import FeatureExtractor
class TestFeatureExtractor(object):
"""Tests for feature extractors.
"""
def test_context_size(self):
f = FeatureExtractor()
assert f.context_size == (0, 0)
f.context_size = (1, 2)
assert f.context_size == (1, 2)
with pytest.raises(ValueError):
f.context_size = (-1, 1)
with pytest.raises(ValueError):
f.context_size = (1, -1)
assert f.context_size == (1, 2)
| mbollmann/perceptron | test/test_feature_extractor.py | Python | mit | 554 |
from unittest import TestCase
from unittest.mock import Mock, patch, call, MagicMock
from flowirc.protocol import IRCClientProtocol
__author__ = 'Olle Lundberg'
class TestIRCClientProtocol(TestCase):
def setUp(self):
self.proto = IRCClientProtocol()
self.transport = Mock()
self.proto.message_received = Mock()
def tearDown(self):
self.proto = None
self.transport = None
def test_connection_made(self):
self.proto.after_connection_made = Mock()
self.proto.connection_made(self.transport)
self.assertEqual(self.proto._transport, self.transport)
self.assertEqual(1, self.proto.after_connection_made.call_count)
self.assertEqual((), self.proto.after_connection_made.call_args)
def test_send(self):
self.proto._transport = Mock()
self.proto.send('foo')
self.proto._transport.write.assert_called_once_with(b'foo')
self.proto._transport.reset_mock()
calls = [call(b'foo'), call(b'bar'), call(b'baz')]
self.proto.send('foo', 'bar', 'baz')
self.assertEqual(3, self.proto._transport.write.call_count)
self.proto._transport.write.assert_has_calls(calls)
self.proto._transport.reset_mock()
data = Mock()
data.encode = Mock(side_effect=AttributeError(
"'NoneType' object has no attribute 'encode'"))
self.assertRaises(AttributeError, self.proto.send, data)
@patch('asyncio.Task')
@patch('flowirc.protocol.IRCMessage')
def test_data_received(self, ircmessage, task):
self.proto.message_received = Mock()
self.proto.data_received(b'')
self.proto.data_received(b'f')
self.assertEqual(0, task.call_count)
self.proto.data_received(b'foo')
self.assertEqual(1, ircmessage.from_str.call_count)
self.assertEqual(1, self.proto.message_received.call_count)
@patch('asyncio.Task')
@patch('flowirc.protocol.IRCMessage')
def test_data_received_2(self, ircmessage, task):
self.proto.message_received = Mock()
ping = "PING irc.example.net\r\n"
mock = MagicMock(return_value=ping)
ircmessage.from_str = mock
self.proto.data_received(b' \r\nPING :irc.example.net\r\n')
self.assertEqual(1, ircmessage.from_str.call_count)
self.proto.message_received.assert_called_once_with(ping)
@patch('asyncio.Task')
@patch('flowirc.protocol.IRCMessage')
def test_data_received_3(self, ircmessage, task):
self.proto.message_received = Mock()
mock = MagicMock(return_value=None)
ircmessage.from_str = mock
self.proto.data_received(b' \r\nNOT_A_CMD :irc.example.net\r\n')
self.assertEqual(1, ircmessage.from_str.call_count)
self.assertEqual(0, self.proto.message_received.call_count)
| lndbrg/flowirc | flowirc/tests/test_IRCClientProtocol.py | Python | mit | 2,853 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import, print_function, unicode_literals
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_jwt',
'django_extensions',
'project',
)
| StefanKjartansson/drf-react-skeleton | project/settings/apps.py | Python | mit | 426 |
#!/usr/bin/env python
"""Print out a report about whats in a vectortile
Usage:
tileinfo.py [options] [SOURCE]
Options:
--srcformat=SRC_FORMAT Source file format: (tile | json)
--indent=INT|None JSON indentation level. Defaults to 4. Use 'None' to disable.
-h --help Show this screen.
--version Show version.
-q --quiet be quiet
"""
import json
import sys
from docopt import docopt
from vectortile import Tile
def info(data, cols):
"""
Compute min/max for all registered columns.
Parameters
----------
data : list
List of points from tile.
cols : list
List of columns from tile header.
Returns
-------
dict
{
column: {
min: value,
max: value
}
}
"""
stats = {c['name']: [] for c in cols}
for point in data:
for c, v in point.items():
stats[c].append(v)
return {n: {'min': min(v), 'max': max(v)} for n, v in stats.items()}
def main():
"""
Get an info report for a tile. Format is same as input tile but with
min/max values for values under 'data'.
"""
arguments = docopt(__doc__, version='tileinfo 0.1')
src_name = arguments['SOURCE']
src_format = arguments['--srcformat']
indent = arguments['--indent']
if isinstance(indent, str) and indent.lower() == 'none':
indent = None
elif isinstance(indent, str):
indent = int(indent)
else:
indent = 4
with sys.stdin if src_name in ('-', None) else open(src_name, 'rb') as f:
# Guess input format if not given
if src_format is None:
if '.json' == f.name[-5:]:
src_format = 'json'
else:
src_format = 'tile'
if src_format == 'tile':
header, data = Tile(f.read()).unpack()
else:
header = json.loads(f.read())
data = header.pop('data')
# Generate the info report
report = info(data, header['cols'])
# Merge report with other tile attributes
out = {k: v for k, v in header.items() if k != 'data'}
out['data'] = {}
for field, vals in report.items():
out['data'][field + '_min'] = vals['min']
out['data'][field + '_max'] = vals['max']
print(json.dumps(out, indent=indent, sort_keys=True))
if __name__ == '__main__':
sys.exit(main())
| SkyTruth/vectortile | utils/tileinfo.py | Python | mit | 2,468 |
#!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
# Author: Damien Farrell 2011
"""This script will create multiple projects from csv files and
add pdbs based on the csv names. It can also create peatsa jobs
and merge them back into the database"""
import pickle, sys, os, copy, time, types, math
import numpy
from PEATDB.Base import PDatabase
from PEATDB import Utils
from PEATDB.Actions import DBActions
from PEATDB.plugins.PEATSAplugin import PEATSAPlugin
from PEATDB.plugins.Correlation import CorrelationAnalyser
from PEATDB.PEATTables import PEATTableModel
import PEATDB.Utils
from PEATDB.Parsers import PDBParser
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.stats import stats
#plt.rc('text',usetex=True)
plt.rc('font',size=7)
plt.rc('legend',fontsize=6)
plt.rc('savefig',dpi=300)
plt.rc('axes',linewidth=.5)
settings={'server':'enzyme.ucd.ie','username':'guest',
'password':'123'}
#path = '/home/people/farrell/Desktop/SADBPaperData'
path = os.getcwd()
savepath = os.path.join(path,'projects')
cpath = os.path.join(path,'data')
if not os.path.exists(cpath):
print 'we need a folder called data in the current path'
csvfiles = os.listdir(cpath)
dbnames = [os.path.splitext(i)[0] for i in csvfiles]
def PEATSAJobs(prjs, resubmit=False):
"""Submit PEATSA runs for all projects or merge results if done"""
for name in prjs:
print name
DB = PDatabase(local=os.path.join(savepath,name))
pdb = DB['wt'].Structure
PS = PEATSAPlugin()
PS.main(DB=DB)
if hasattr(DB.meta,'peatsa_jobs') and resubmit == False:
if 'mycalc' in DB.meta.peatsa_jobs:
print 'job is present'
#try to merge results
S = PEATTableModel(DB)
job,n = PS.getJob('mycalc')
PS.mergeResults(job, 'prediction', S)
DB.commit()
print 'merged results'
else:
mutlist = []
for p in DB.getRecs():
mutlist.append(DB.get(p).Mutations)
#print mutlist
pdbfile = PS.writetempPDB()
#we add source project data so exp data can be read from summary
prjdata = {'server':'enzyme.ucd.ie','username':'guest',
'project':name,'password':'123','port':'8080'}
PS.submitJob(name='mycalc', pdbname=DB.meta.refprotein, pdbfile=pdbfile,
mutations=mutlist, calcs=['stability'],
meta={'protein':name,'expcol':'Exp','project':prjdata})
#required to end process
PS.jobManager.stopLogging()
DB.close()
return
def createProjects(files):
"""Create multiple projects at once from csv files"""
for filename in files:
print filename
name = os.path.splitext(filename)[0]
#create/open db
DB = PDatabase(local=os.path.join(savepath,name))
DB.add('wt')
#add wt pdb
stream = DBActions.fetchPDB(name)
DBActions.addPDBFile(DB, 'wt', pdbdata=stream, pdbname=name, gui=False)
DB.meta.refprotein = 'wt'
DB.meta.info['protein'] = name
#import data from csv
DB.importCSV(os.path.join(cpath,filename), namefield='Mutations')
print 'imported ok'
DB.deleteField('PDB')
DB.commit()
DB.close()
print 'done'
return
def summarise(projects):
summDB = PDatabase(local='summary.fs')
C = CorrelationAnalyser()
figs = []
for f in range(4):
figs.append(plt.figure())
gs = gridspec.GridSpec(5, 5, wspace=0.3, hspace=0.5)
i=0
data=[]
print 'processing %s projects' %len(projects)
for p in projects:
print 'structure:',p
DB = PDatabase(local=os.path.join(savepath,p))
S = PEATTableModel(DB)
try:
exp,pre = S.getColumns(['Exp','prediction'],allowempty=False)
errs = [j[0]-j[1] for j in zip(exp,pre)]
except:
print 'no results'
continue
#DB.close()
#add link to proj
summDB.add(p)
summDB.addField('project',fieldtype='Project')
summDB[p]['project'] = {'server':'enzyme.ucd.ie','username':'guest',
'project':p,'password':'123','port':'8080'}
print summDB.isChanged()
#stats
cc,rmse,meanerr = C.getStats(pre,exp)
#ttest for mean errs 0
ttp = round(stats.ttest_1samp(errs, 0)[1],2)
#normality of errs
w,swp = C.ShapiroWilk(errs)
x={'name':p,'mutants':len(pre),'rmse':rmse,'corrcoef':cc,'meanerr':meanerr,
'ttest':ttp,'shapirowilk':swp}
'''ax = figs[0].add_subplot(gs[0, i])
C.plotCorrelation(pre,exp,title=p,ms=2,axeslabels=False,ax=ax)
ax = figs[1].add_subplot(gs[0, i])
C.showHistogram([pre,exp],title=p,labels=['pre','exp'],ax=ax)
ax = figs[2].add_subplot(gs[0, i])
C.plotNorm(errs,title=p,lw=1,ax=ax)
#qqplot
ax = figs[3].add_subplot(gs[0, i])
C.QQplot(errs,title=p,ax=ax)'''
#get PDB info
parser = PDBParser()
descr = parser.getDescription(p)
x.update(descr)
data.append(x)
i+=1
summDB.importDict(data)
print summDB.isChanged()
summDB.commit()
#add all peatsa jobs to summary proj also
'''print 'adding peatsa job info'
PS = PEATSAPlugin()
PS.main(DB=summDB)
#summDB.meta.peatsa_jobs = None
#from ZODB.PersistentMapping import PersistentMapping
#summDB.meta.peatsa_jobs = PersistentMapping()
PS.checkJobsDict()
PS.jobManager.stopLogging()
for p in projects:
#print summDB.meta
DB = PDatabase(local=os.path.join(savepath,p))
job = DB.meta.peatsa_jobs['mycalc']
summDB.meta.peatsa_jobs[p] = job
print job
#DB.close()
print summDB.isChanged()
print summDB.meta.peatsa_jobs
summDB.commit()'''
#for i in range(len(figs)):
# figs[i].savefig('fig%s.png' %i)
#plt.show()
return
def info(projects):
"""Just return info in current projects"""
total=0
summDB = PDatabase(local='summary.fs')
for p in projects:
DB = PDatabase(local=os.path.join(savepath,p))
l = DB.length()
total += l
print '%s has %s records' %(p,l)
if p not in summDB.getRecs():
print 'not present in summary project'
print '-----------------------'
print 'info on %s projects' %len(projects)
print 'with total of %s records' %total
print '%s mutants' %(total-len(projects))
return
def findOutliers(data):
"""Outliers in all corr data"""
C = CorrelationAnalyser()
return ax
def send2Server(projects):
"""Send all projects to remote versions"""
settings={'server':'enzyme.ucd.ie','username':'guest',
'password':'123','port':8080}
adminsettings={'host':'enzyme.ucd.ie','user':'peatadmin',
'passwd':'nielsen','port':8080}
'''for p in projects:
print p
DB = PDatabase(local=os.path.join(savepath,p))
Utils.createDBonServer(prj=p,settings=adminsettings,
access='guest')
Utils.copyDBtoServer(DB,p,settings)'''
DB = PDatabase(local='summary.fs')
Utils.copyDBtoServer(DB,'PotapovDataset',settings)
return
def summarystats(projects):
"""summary stats"""
for p in projects:
DB = PDatabase(local=os.path.join(savepath,p))
#c=len(DB.recs()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--importcsv", dest="importcsv", action='store_true',
help="create/import", default=False)
parser.add_option("-j", "--jobs", dest="jobs", action='store_true',
help="submit/merge jobs", default=False)
parser.add_option("-s", "--summary", dest="summary", action='store_true',
help="do summary/stats", default=False)
parser.add_option("-p", "--path", dest="path",
help="Path with csv files")
parser.add_option("-c", "--copy", dest="copy",action='store_true',
help="copy to server", default=False)
parser.add_option("-o", "--info", dest="info",action='store_true',
help="get info", default=False)
opts, remainder = parser.parse_args()
if opts.path != None:
print path
if opts.importcsv == True:
createProjects(csvfiles)
if opts.jobs == True:
PEATSAJobs(['1bvc'])
#PEATSAJobs(dbnames, resubmit=False)
if opts.summary == True:
summarise(dbnames)
#summarise(['1wq5'])
if opts.copy == True:
send2Server(dbnames)
if opts.info == True:
info(dbnames)
| dmnfarrell/peat | PEATDB/scripts/multiProjects.py | Python | mit | 10,004 |
from setuptools import setup, find_packages
with open("README.rst") as readme:
long_description = readme.read()
setup(
name='algos-py',
version='0.4.5',
license='MIT',
author='Aleksandr Lisianoi',
author_email='[email protected]',
url='https://github.com/all3fox/algos-py',
packages=find_packages(),
description="Classic computer science algorithms in Python",
long_description=long_description,
platforms=['linux', 'windows', 'macos'],
)
| all3fox/algos-py | setup.py | Python | mit | 485 |
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Michael Hogg
# This file is part of bonemapy - See LICENSE.txt for information on usage and redistribution
import bonemapy
from distutils.core import setup
setup(
name = 'bonemapy',
version = bonemapy.__version__,
description = 'An ABAQUS plug-in to map bone properties from CT scans to 3D finite element bone/implant models',
license = 'MIT license',
keywords = ["ABAQUS", "plug-in","CT","finite","element","bone","properties","python"],
author = 'Michael Hogg',
author_email = '[email protected]',
url = "https://github.com/mhogg/bonemapy",
download_url = "https://github.com/mhogg/bonemapy/releases",
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Environment :: Plugins",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Visualization",
],
long_description = """
bonemapy is an ABAQUS plug-in that is used to extract bone density, or Hounsfield Unit (HU) values, from CT scans. The bone density can then be used to setup heterogeneous
material properties for a 3D finite element bone/implant model.
The HU values are extracted at the element integration points. Tri-linear interpolation is used to calculate the HU values at the location of the integration points.
bonemapy produces a text file containing the HU values that is formatted so that it can easily be read using ABAQUS user subroutines that are required to apply the bone properties. An
ABAQUS odb file is also created containing a fieldoutput representing HU so that the user can quickly visualise the mapped HU values.
""",
)
| mhogg/bonemapy | setup.py | Python | mit | 2,237 |
#!python
import re
import sys
import logging
import boto.ec2
from texttable import Texttable
from pprint import PrettyPrinter
from optparse import OptionParser
PP = PrettyPrinter( indent=2 )
###################
### Arg parsing
###################
parser = OptionParser("usage: %prog [options]" )
parser.add_option( "-v", "--verbose", default=None, action="store_true",
help="enable debug output" )
parser.add_option( "-H", "--no-header", default=None, action="store_true",
help="suppress table header" )
parser.add_option( "-r", "--region", default='us-east-1',
help="ec2 region to connect to" )
parser.add_option( "-g", "--group", default=None,
help="Include instances from these groups only (regex)" )
parser.add_option( "-G", "--exclude-group",default=None,
help="Exclude instances from these groups (regex)" )
parser.add_option( "-n", "--name", default=None,
help="Include instances with these names only (regex)" )
parser.add_option( "-N", "--exclude-name", default=None,
help="Exclude instances with these names (regex)" )
parser.add_option( "-t", "--type", default=None,
help="Include instances with these types only (regex)" )
parser.add_option( "-T", "--exclude-type", default=None,
help="Exclude instances with these types (regex)" )
parser.add_option( "-z", "--zone", default=None,
help="Include instances with these zones only (regex)" )
parser.add_option( "-Z", "--exclude-zone", default=None,
help="Exclude instances with these zones (regex)" )
parser.add_option( "-s", "--state", default=None,
help="Include instances with these states only (regex)" )
parser.add_option( "-S", "--exclude-state",default=None,
help="Exclude instances with these states (regex)" )
(options, args) = parser.parse_args()
###################
### Logging
###################
if options.verbose: log_level = logging.DEBUG
else: log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level)
logging.basicConfig(stream=sys.stderr, level=(logging.ERROR,logging.CRITICAL))
###################
### Connection
###################
conn = boto.ec2.connect_to_region( options.region )
###################
### Regexes
###################
regexes = {}
for opt in [ 'group', 'exclude_group', 'name', 'exclude_name',
'type', 'exclude_type', 'zone', 'exclude_zone',
'state', 'exclude_state' ]:
### we have a regex we should build
if options.__dict__.get( opt, None ):
regexes[ opt ] = re.compile( options.__dict__.get( opt ), re.IGNORECASE )
#PP.pprint( regexes )
def get_instances():
instances = [ i for r in conn.get_all_instances()
for i in r.instances ]
rv = [];
for i in instances:
### we will assume this node is one of the nodes we want
### to operate on, and we will unset this flag if any of
### the criteria fail
wanted_node = True
for re_name, regex in regexes.iteritems():
### What's the value we will be testing against?
if re.search( 'group', re_name ):
value = i.groups[0].name
elif re.search( 'name', re_name ):
value = i.tags.get( 'Name', '' )
elif re.search( 'type', re_name ):
value = i.instance_type
elif re.search( 'state', re_name ):
value = i.state
elif re.search( 'zone', re_name ):
### i.region is an object. i._placement is a string.
value = str(i._placement)
else:
logging.error( "Don't know what to do with: %s" % re_name )
continue
#PP.pprint( "name = %s value = %s pattern = %s" % ( re_name, value, regex.pattern ) )
### Should the regex match or not match?
if re.search( 'exclude', re_name ):
rv_value = None
else:
rv_value = True
### if the match is not what we expect, then clearly we
### don't care about the node
result = regex.search( value )
### we expected to get no results, excellent
if result == None and rv_value == None:
pass
### we expected to get some match, excellent
elif result is not None and rv_value is not None:
pass
### we don't care about this node
else:
wanted_node = False
break
if wanted_node:
rv.append( i )
return rv
def list_instances():
table = Texttable( max_width=0 )
table.set_deco( Texttable.HEADER )
table.set_cols_dtype( [ 't', 't', 't', 't', 't', 't', 't', 't' ] )
table.set_cols_align( [ 'l', 'l', 'l', 'l', 'l', 'l', 'l', 't' ] )
if not options.no_header:
### using add_row, so the headers aren't being centered, for easier grepping
table.add_row(
[ '# id', 'Name', 'Type', 'Zone', 'Group', 'State', 'Root', 'Volumes' ] )
instances = get_instances()
for i in instances:
### XXX there's a bug where you can't get the size of the volumes, it's
### always reported as None :(
volumes = ", ".join( [ ebs.volume_id for ebs in i.block_device_mapping.values()
if ebs.delete_on_termination == False ] )
### you can use i.region instead of i._placement, but it pretty
### prints to RegionInfo:us-east-1. For now, use the private version
### XXX EVERY column in this output had better have a non-zero length
### or texttable blows up with 'width must be greater than 0' error
table.add_row( [ i.id, i.tags.get( 'Name', ' ' ), i.instance_type,
i._placement , i.groups[0].name, i.state,
i.root_device_type, volumes or '-' ] )
#PP.pprint( i.__dict__ )
### table.draw() blows up if there is nothing to print
if instances or not options.no_header:
print table.draw()
if __name__ == '__main__':
list_instances()
| jib/aws-analysis-tools | instances.py | Python | mit | 6,405 |
def read_data(file_name):
return pd.read_csv(file_name)
def preprocess(data):
# Data Preprocessing
data['GDP_scaled']=preprocessing.scale(data['GDP'])
data['CLPRB_scaled']=preprocessing.scale(data['CLPRB'])
data['EMFDB_scaled']=preprocessing.scale(data['EMFDB'])
data['ENPRP_scaled']=preprocessing.scale(data['ENPRP'])
data['NGMPB_scaled']=preprocessing.scale(data['NGMPB'])
data['PAPRB_scaled']=preprocessing.scale(data['PAPRB'])
data['PCP_scaled']=preprocessing.scale(data['PCP'])
data['ZNDX_scaled']=preprocessing.scale(data['ZNDX'])
data['OP_scaled']=preprocessing.scale(data['Nominal Price'])
data['OP2_scaled']=preprocessing.scale(data['Inflation Adjusted Price'])
return data
def split_data(data):
# Split data for train and test
all_x = data[['GDP_scaled','CLPRB_scaled','EMFDB_scaled','ENPRP_scaled','NGMPB_scaled','PAPRB_scaled','PCP_scaled','ZNDX_scaled','OP_scaled', 'OP2_scaled']][:55]
all_y = data[['NUETP']][:55]
return cross_validation.train_test_split(all_x, all_y, test_size=0.2, random_state=0)
# SVR for nuclear
def SVR_predict(X_train, X_test, y_train, y_test):
clf = SVR(kernel='sigmoid', C=90.0, epsilon=0.3).fit(X_train, y_train)
print(clf.score(X_test, y_test))
future_x = data[['GDP_scaled','CLPRB_scaled','EMFDB_scaled','ENPRP_scaled','NGMPB_scaled','PAPRB_scaled','PCP_scaled','ZNDX_scaled','OP_scaled','OP2_scaled']][-6:]
pred = pd.DataFrame(clf.predict(future_x))
pred.columns = [statelist[i]]
result = pd.concat([result, pred], axis=1)
return result
| uwkejia/Clean-Energy-Outlook | examples/Extra/Codes/SVR_nuclear.py | Python | mit | 1,586 |
# Docker-specific local settings
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db',
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
TEMPLATE_DIRS = (
'/srv/webldap/templates',
)
EMAIL_FROM = 'root@localhost'
REQ_EXPIRE_HRS = 48
REQ_EXPIRE_STR = '48 heures'
LDAP_URI = 'ldap://{}:{}'.format(os.environ['LDAP_PORT_389_TCP_ADDR'],
os.environ['LDAP_PORT_389_TCP_PORT'])
LDAP_STARTTLS = False
LDAP_CACERT = ''
LDAP_BASE = 'dc=example,dc=net'
LDAP_WEBLDAP_USER = 'cn=webldap,ou=service-users,dc=example,dc=net'
LDAP_WEBLDAP_PASSWD = 'secret'
LDAP_DEFAULT_GROUPS = []
LDAP_DEFAULT_ROLES = ['member']
| FedeRez/webldap | app/webldap/local_settings.docker.py | Python | mit | 764 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Simplified sprite animation framework.
.. note:: This module is an evolving "work-in-progress" and should be treated
as such until such time as this notice disappears.
"""
from time import sleep, perf_counter
from PIL import Image
class dict_wrapper(object):
"""
Helper class to turn dictionaries into objects.
"""
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [dict_wrapper(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, dict_wrapper(b) if isinstance(b, dict) else b)
class spritesheet(object):
"""
A sprite sheet is a series of images (usually animation frames) combined
into a larger image. A dictionary is usually spread into the object
constructor parameters with the following top-level attributes:
:param image: A path to a sprite map image.
:type image: str
:param frames: A dictionary of settings that defines how to extract
individual frames from the supplied image, as follows
- ``width`` & ``height`` are required and specify the dimensions of
the frames
- ``regX`` & ``regY`` indicate the registration point or "origin" of
the frames
- ``count`` allows you to specify the total number of frames in the
spritesheet; if omitted, this will be calculated based on the
dimensions of the source images and the frames. Frames will be
assigned indexes based on their position in the source images
(left to right, top to bottom).
:type frames: dict
:param animations: A dictionary of key/value pairs where the key is the
name of of the animation sequence, and the value are settings that
defines an animation sequence as follows:
- ``frames`` is a list of frame to show in sequence. Usually this
comprises of frame numbers, but can refer to other animation
sequences (which are handled much like a subroutine call).
- ``speed`` determines how quickly the animation frames are cycled
through compared to the how often the animation sequence yields.
- ``next`` is optional, but if supplied, determines what happens when
the animation sequence is exhausted. Typically this can be used to
self-reference, so that it forms an infinite loop, but can hand off
to any other animation sequence.
:type animations: dict
Loosely based on https://www.createjs.com/docs/easeljs/classes/SpriteSheet.html
"""
def __init__(self, image, frames, animations):
with open(image, 'rb') as fp:
self.image = Image.open(fp)
self.image.load()
self.frames = dict_wrapper(frames)
self.animations = dict_wrapper(animations)
# Reframe the sprite map in terms of the registration point (if set)
regX = self.frames.regX if hasattr(self.frames, "regX") else 0
regY = self.frames.regY if hasattr(self.frames, "regY") else 0
self.image = self.image.crop((regX, regY, self.image.width - regX, self.image.height - regY))
self.width, self.height = self.image.size
assert(self.width % self.frames.width == 0)
assert(self.height % self.frames.height == 0)
self.frames.size = (self.frames.width, self.frames.height)
if not hasattr(self.frames, 'count'):
self.frames.count = (self.width * self.height) // (self.frames.width * self.frames.height)
self.cache = {}
def __getitem__(self, frame_index):
"""
Returns (and caches) the frame for the given index.
:param frame_index: The index of the frame.
:type frame_index: int
:returns: A Pillow image cropped from the main image corresponding to
the given frame index.
:raises TypeError: if the ``frame_index`` is not numeric
:raises IndexError: if the ``frame_index`` is less than zero or more
than the largest frame.
"""
if not isinstance(frame_index, int):
raise TypeError("frame index must be numeric")
if frame_index < 0 or frame_index > self.frames.count:
raise IndexError("frame index out of range")
cached_frame = self.cache.get(frame_index)
if cached_frame is None:
offset = frame_index * self.frames.width
left = offset % self.width
top = (offset // self.width) * self.frames.height
right = left + self.frames.width
bottom = top + self.frames.height
bounds = [left, top, right, bottom]
cached_frame = self.image.crop(bounds)
self.cache[frame_index] = cached_frame
return cached_frame
def __len__(self):
"""
The number of frames in the sprite sheet
"""
return self.frames.count
def animate(self, seq_name):
"""
Returns a generator which "executes" an animation sequence for the given
``seq_name``, inasmuch as the next frame for the given animation is
yielded when requested.
:param seq_name: The name of a previously defined animation sequence.
:type seq_name: str
:returns: A generator that yields all frames from the animation
sequence.
:raises AttributeError: If the ``seq_name`` is unknown.
"""
while True:
index = 0
anim = getattr(self.animations, seq_name)
speed = anim.speed if hasattr(anim, "speed") else 1
num_frames = len(anim.frames)
while index < num_frames:
frame = anim.frames[int(index)]
index += speed
if isinstance(frame, int):
yield self[frame]
else:
for subseq_frame in self.animate(frame):
yield subseq_frame
if not hasattr(anim, "next"):
break
seq_name = anim.next
class framerate_regulator(object):
"""
Implements a variable sleep mechanism to give the appearance of a consistent
frame rate. Using a fixed-time sleep will cause animations to be jittery
(looking like they are speeding up or slowing down, depending on what other
work is occurring), whereas this class keeps track of when the last time the
``sleep()`` method was called, and calculates a sleep period to smooth out
the jitter.
:param fps: The desired frame rate, expressed numerically in
frames-per-second. By default, this is set at 16.67, to give a frame
render time of approximately 60ms. This can be overridden as necessary,
and if no FPS limiting is required, the ``fps`` can be set to zero.
:type fps: float
"""
def __init__(self, fps=16.67):
if fps == 0:
fps = -1
self.max_sleep_time = 1.0 / fps
self.total_transit_time = 0
self.called = 0
self.start_time = None
self.last_time = None
def __enter__(self):
self.enter_time = perf_counter()
if not self.start_time:
self.start_time = self.enter_time
self.last_time = self.enter_time
return self
def __exit__(self, *args):
"""
Sleeps for a variable amount of time (dependent on when it was last
called), to give a consistent frame rate. If it cannot meet the desired
frame rate (i.e. too much time has occurred since the last call), then
it simply exits without blocking.
"""
self.called += 1
self.total_transit_time += perf_counter() - self.enter_time
if self.max_sleep_time >= 0:
elapsed = perf_counter() - self.last_time
sleep_for = self.max_sleep_time - elapsed
if sleep_for > 0:
sleep(sleep_for)
self.last_time = perf_counter()
def effective_FPS(self):
"""
Calculates the effective frames-per-second - this should largely
correlate to the desired FPS supplied in the constructor, but no
guarantees are given.
:returns: The effective frame rate.
:rtype: float
"""
if self.start_time is None:
self.start_time = 0
elapsed = perf_counter() - self.start_time
return self.called / elapsed
def average_transit_time(self):
"""
Calculates the average transit time between the enter and exit methods,
and return the time in milliseconds.
:returns: The average transit in milliseconds.
:rtype: float
"""
return self.total_transit_time * 1000.0 / self.called
| rm-hull/luma.core | luma/core/sprite_system.py | Python | mit | 8,896 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_target_amount(apps, schema_editor):
Entry = apps.get_model("momentum", "Entry")
for entry in Entry.objects.all():
entry.target_amount = entry.goal.target_amount
entry.save()
class Migration(migrations.Migration):
dependencies = [
('momentum', '0009_entry_target_amount'),
]
operations = [
migrations.RunPython(populate_target_amount),
]
| mod2/momentum | momentum/migrations/0010_target_data_migration.py | Python | mit | 517 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from itertools import groupby
from time import time
from functools import partial
import re
import django
django.setup()
from django.db import transaction
from clldutils.dsv import reader
from clldutils.text import split_text
from clldutils.path import Path
from clldutils import jsonlib
import attr
from dplace_app.models import Source
from loader.util import configure_logging, load_regions
from loader.society import society_locations, load_societies, load_society_relations
from loader.phylogenies import load_phylogenies
from loader.variables import load_vars
from loader.values import load_data
from loader.sources import load_references
from loader.glottocode import load_languages
comma_split = partial(split_text, separators=',', strip=True, brackets={})
semicolon_split = partial(split_text, separators=';', strip=True, brackets={})
def valid_enum_member(choices, instance, attribute, value):
if value not in choices:
raise ValueError(value)
@attr.s
class Variable(object):
category = attr.ib(convert=lambda s: [c.capitalize() for c in comma_split(s)])
id = attr.ib()
title = attr.ib()
definition = attr.ib()
type = attr.ib(
validator=partial(valid_enum_member, ['Continuous', 'Categorical', 'Ordinal']))
units = attr.ib()
source = attr.ib()
changes = attr.ib()
notes = attr.ib()
codes = attr.ib(default=attr.Factory(list))
@attr.s
class Data(object):
soc_id = attr.ib()
sub_case = attr.ib()
year = attr.ib()
var_id = attr.ib()
code = attr.ib()
comment = attr.ib()
references = attr.ib(convert=semicolon_split)
source_coded_data = attr.ib()
admin_comment = attr.ib()
@attr.s
class ObjectWithSource(object):
id = attr.ib()
name = attr.ib()
year = attr.ib()
author = attr.ib()
reference = attr.ib()
base_dir = attr.ib()
@property
def dir(self):
return self.base_dir.joinpath(self.id)
def as_source(self):
return Source.objects.create(
**{k: getattr(self, k) for k in 'year author name reference'.split()})
@attr.s
class RelatedSociety(object):
dataset = attr.ib(convert=lambda s: s.strip())
name = attr.ib(convert=lambda s: s.strip())
id = attr.ib(convert=lambda s: s.strip())
@classmethod
def from_string(cls, s):
match = re.match('([A-Za-z]+):\s*([^\[]+)\[([^\]]+)\]$', s)
if not match:
raise ValueError(s)
return cls(*match.groups())
@attr.s
class RelatedSocieties(object):
id = attr.ib()
related = attr.ib(convert=lambda s: [
RelatedSociety.from_string(ss) for ss in semicolon_split(s)])
@attr.s
class Dataset(ObjectWithSource):
type = attr.ib(validator=partial(valid_enum_member, ['cultural', 'environmental']))
description = attr.ib()
url = attr.ib()
def _items(self, what, **kw):
fname = self.dir.joinpath('{0}.csv'.format(what))
return list(reader(fname, **kw)) if fname.exists() else []
@property
def data(self):
return [Data(**d) for d in self._items('data', dicts=True)]
@property
def references(self):
return self._items('references', namedtuples=True)
@property
def societies(self):
return self._items('societies', namedtuples=True)
@property
def society_relations(self):
return [
RelatedSocieties(**d) for d in self._items('societies_mapping', dicts=True)]
@property
def variables(self):
codes = {vid: list(c) for vid, c in groupby(
sorted(self._items('codes', namedtuples=True), key=lambda c: c.var_id),
lambda c: c.var_id)}
return [
Variable(codes=codes.get(v['id'], []), **v)
for v in self._items('variables', dicts=True)]
@attr.s
class Phylogeny(ObjectWithSource):
scaling = attr.ib()
url = attr.ib()
@property
def trees(self):
return self.dir.joinpath('summary.trees')
@property
def taxa(self):
return list(reader(self.dir.joinpath('taxa.csv'), dicts=True))
class Repos(object):
def __init__(self, dir_):
self.dir = dir_
self.datasets = [
Dataset(base_dir=self.dir.joinpath('datasets'), **r) for r in
reader(self.dir.joinpath('datasets', 'index.csv'), dicts=True)]
self.phylogenies = [
Phylogeny(base_dir=self.dir.joinpath('phylogenies'), **r) for r in
reader(self.dir.joinpath('phylogenies', 'index.csv'), dicts=True)]
def path(self, *comps):
return self.dir.joinpath(*comps)
def read_csv(self, *comps, **kw):
return list(reader(self.path(*comps), **kw))
def read_json(self, *comps):
return jsonlib.load(self.path(*comps))
def load(repos, test=True):
configure_logging(test=test)
repos = Repos(repos)
for func in [
load_societies,
load_society_relations,
load_regions,
society_locations,
load_vars,
load_languages,
load_references,
load_data,
load_phylogenies,
]:
with transaction.atomic():
if not test:
print("%s..." % func.__name__) # pragma: no cover
start = time()
res = func(repos)
if not test: # pragma: no cover
print("{0} loaded in {1:.2f} secs".format(res, time() - start))
if __name__ == '__main__': # pragma: no cover
load(Path(sys.argv[1]), test=False)
sys.exit(0)
| NESCent/dplace | dplace_app/load.py | Python | mit | 5,554 |
def count_keys_equal(A, n, m):
equal = [0] * (m + 1)
for i in range(0, n): # 0 1 2 3 4 5 6
key = A[i] # 1 3 0 1 1 3 1
equal[key] += 1
return equal
def count_keys_less(equal, m):
less = [0] * (m + 1)
less[0] = 0
for j in range(1, m+1): # 0 1 2 3 4 5 6
less[j]= less[j - 1] + equal[j - 1] # 0 0 0 0 0 0 0
return less
def rearrange(A, less, n, m):
next = [0] * (m + 1)
B = [0] * n
for j in range(0, m + 1): # 1 2 5 5 6 7 10
next[j] = less[j] + 1
for i in range(0, n):
key = A[i]
index = next[key] - 1
B[index] = A[i]
next[key] += 1
return B
def counting_sort(A, n, m):
equal = count_keys_equal(A, n, m)
less = count_keys_less(equal, m)
return rearrange(A, less, n, m)
A = [4, 1, 5, 0, 1, 6, 5, 1, 5, 3]
print A
m = max(A)
n = len(A)
B = counting_sort(A, n, m)
print(B)
| wastegas/Data-Structures-Algorithms | src/Sorting/counting-sort.py | Python | mit | 951 |
'''
Copyright 2011 Jean-Baptiste B'edrune, Jean Sigwald
Using New BSD License:
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#
# This code has since been edited to improve HFS parsing, add lzvn/lzfse support
# and is now a part of the mac_apt framework
#
import os
import mmap
import sys
import struct
import tempfile
import zlib
import pytsk3
import logging
from plugins.helpers.common import CommonFunctions
from plugins.helpers.btree import AttributesTree, CatalogTree, ExtentsOverflowTree
from plugins.helpers.structs import *
log = logging.getLogger('MAIN.HELPERS.HFS_ALT')
lzfse_capable = False
try:
import liblzfse
lzfse_capable = True
except ImportError:
print("liblzfse not found. Won't decompress lzfse/lzvn streams")
def write_file(filename,data):
f = open(filename, "wb")
f.write(data)
f.close()
def lzvn_decompress(compressed_stream, compressed_size, uncompressed_size): #TODO: Move to a class!
'''Adds Prefix and Postfix bytes as required by decompressor,
then decompresses and returns uncompressed bytes buffer
'''
header = b'bvxn' + struct.pack('<I', uncompressed_size) + struct.pack('<I', compressed_size)
footer = b'bvx$'
return liblzfse.decompress(header + compressed_stream + footer)
class HFSFile(object):
def __init__(self, volume, hfsplusfork, fileID, deleted=False):
self.volume = volume
self.blockSize = volume.blockSize
self.fileID = fileID
self.totalBlocks = hfsplusfork.totalBlocks
self.logicalSize = hfsplusfork.logicalSize
self.extents = []
self.deleted = deleted
b = 0
for extent in hfsplusfork.HFSPlusExtentDescriptor:
self.extents.append(extent)
b += extent.blockCount
while b != hfsplusfork.totalBlocks:
#log.debug("extents overflow {}".format(b))
k,v = volume.getExtentsOverflowForFile(fileID, b)
if not v:
log.debug("extents overflow missing, startblock={}".format(b))
break
for extent in v:
self.extents.append(extent)
b += extent.blockCount
def copyOutFile(self, outputfile, truncate=True):
f = open(outputfile, "wb")
for i in range(self.totalBlocks):
f.write(self.readBlock(i))
if truncate:
f.truncate(self.logicalSize)
f.close()
'''def readAllBuffer(self, truncate=True):
r = b""
for i in range(self.totalBlocks):
r += self.readBlock(i)
if truncate:
r = r[:self.logicalSize]
return r
'''
def readAllBuffer(self, truncate=True, output_file=None):
'''Write to output_file if valid, else return a buffer of data.
Warning: If file size > 200 MiB, b'' is returned, file data is only written to output_file.
'''
r = b""
bs = self.volume.blockSize
blocks_max = 52428800 // bs # 50MB
for extent in self.extents:
if extent.blockCount == 0: continue
#if not self.deleted and self.fileID != kHFSAllocationFileID and not self.volume.isBlockInUse(lba):
# log.debug("FAIL, block "0x{:x}" not marked as used".format(n))
if extent.blockCount > blocks_max:
counter = blocks_max
remaining_blocks = extent.blockCount
start_address = extent.startBlock * bs
while remaining_blocks > 0:
num_blocks_to_read = min(blocks_max, remaining_blocks)
size = num_blocks_to_read * bs
data = self.volume.read(start_address, size)
if output_file:
output_file.write(data)
elif self.logicalSize < 209715200: # 200MiB
r += data
remaining_blocks -= num_blocks_to_read
start_address += size
else:
data = self.volume.read(extent.startBlock * bs, bs * extent.blockCount)
if output_file:
output_file.write(data)
elif self.logicalSize < 209715200: # 200MiB
r += data
if truncate:
if output_file:
output_file.truncate(self.logicalSize)
elif self.logicalSize < 209715200: # 200MiB
r = r[:self.logicalSize]
return r
def processBlock(self, block, lba):
return block
def readBlock(self, n):
bs = self.volume.blockSize
if n*bs > self.logicalSize:
raise ValueError("BLOCK OUT OF BOUNDS")
bc = 0
for extent in self.extents:
bc += extent.blockCount
if n < bc:
lba = extent.startBlock+(n-(bc-extent.blockCount))
if not self.deleted and self.fileID != kHFSAllocationFileID and not self.volume.isBlockInUse(lba):
raise ValueError("FAIL, block %x not marked as used" % n)
return self.processBlock(self.volume.read(lba*bs, bs), lba)
return b""
class HFSCompressedResourceFork(HFSFile):
def __init__(self, volume, hfsplusfork, fileID, compression_type, uncompressed_size):
super(HFSCompressedResourceFork,self).__init__(volume, hfsplusfork, fileID)
block0 = self.readBlock(0)
self.compression_type = compression_type
self.uncompressed_size = uncompressed_size
if compression_type in [8, 12]: # 8 is lzvn, 12 is lzfse
#only tested for 8
self.header = HFSPlusCmpfLZVNRsrcHead.parse(block0)
#print(self.header)
else:
self.header = HFSPlusCmpfRsrcHead.parse(block0)
#print(self.header)
self.blocks = HFSPlusCmpfRsrcBlockHead.parse(block0[self.header.headerSize:])
log.debug("HFSCompressedResourceFork numBlocks:{}".format(self.blocks.numBlocks))
#HAX, readblock not implemented
def readAllBuffer(self, truncate=True, output_file=None):
'''Warning: If output size > 200 MiB, b'' is returned, file data is only written to output_file.'''
if self.compression_type in [7, 8, 11, 12] and not lzfse_capable:
raise ValueError('LZFSE/LZVN compression detected, no decompressor available!')
if self.logicalSize >= 209715200:
temp_file = tempfile.SpooledTemporaryFile(209715200)
super(HFSCompressedResourceFork, self).readAllBuffer(True, temp_file)
temp_file.seek(0)
buff = mmap.mmap(temp_file.fileno(), 0) # memory mapped file to access as buffer
else:
buff = super(HFSCompressedResourceFork, self).readAllBuffer()
r = b""
if self.compression_type in [7, 11]: # lzvn or lzfse # Does it ever go here????
raise ValueError("Did not expect type " + str(self.compression_type) + " in resource fork")
try:
# The following is only for lzvn, not encountered lzfse yet!
data_start = self.header.headerSize
compressed_stream = buff[data_start:self.header.totalSize]
decompressed = lzvn_decompress(compressed_stream, self.header.totalSize - self.header.headerSize, self.uncompressed_size)
if output_file: output_file.write(decompressed)
elif self.uncompressed_size < 209715200: r += decompressed
except liblzfse.error as ex:
raise ValueError("Exception from lzfse_lzvn decompressor")
elif self.compression_type in [8, 12]: # lzvn or lzfse in 64k chunks
try:
# The following is only for lzvn, not encountered lzfse yet!
full_uncomp = self.uncompressed_size
chunk_uncomp = 65536
i = 0
src_offset = self.header.headerSize
for offset in self.header.chunkOffsets:
compressed_size = offset - src_offset
data = buff[src_offset:offset] #input_file.read(compressed_size)
src_offset = offset
if full_uncomp <= 65536:
chunk_uncomp = full_uncomp
else:
chunk_uncomp = 65536
if len(self.header.chunkOffsets) == i + 1: # last chunk
chunk_uncomp = full_uncomp - (65536 * i)
if chunk_uncomp < compressed_size and data[0] == 0x06:
decompressed = data[1:]
else:
decompressed = lzvn_decompress(data, compressed_size, chunk_uncomp)
if output_file: output_file.write(decompressed)
elif self.uncompressed_size < 209715200: r += decompressed
i += 1
except liblzfse.error as ex:
raise ValueError("Exception from lzfse_lzvn decompressor")
else:
base = self.header.headerSize + 4
for b in self.blocks.HFSPlusCmpfRsrcBlockArray:
decompressed = zlib.decompress(buff[base+b.offset:base+b.offset+b.size])
if output_file: output_file.write(decompressed)
elif self.uncompressed_size < 209715200: r += decompressed
if self.logicalSize >= 209715200:
mmap.close()
temp_file.close()
return r
class HFSVolume(object):
def __init__(self, pytsk_image, offset=0):
self.img = pytsk_image
self.offset = offset
try:
data = self.read(0, 0x1000)
self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
assert self.header.signature == 0x4858 or self.header.signature == 0x482B
except AssertionError:
raise ValueError("Not an HFS+ image")
#self.is_hfsx = self.header.signature == 0x4858
self.blockSize = self.header.blockSize
self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
self.allocationBitmap = self.allocationFile.readAllBuffer()
self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
self.extentsTree = ExtentsOverflowTree(self.extentsFile)
self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
self.catalogTree = CatalogTree(self.catalogFile)
self.xattrTree = AttributesTree(self.xattrFile)
self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)
def read(self, offset, size):
return self.img.read(self.offset + offset, size)
def volumeID(self):
return struct.pack(">LL", self.header.finderInfo[6], self.header.finderInfo[7])
def isBlockInUse(self, block):
thisByte = self.allocationBitmap[block // 8]
return (thisByte & (1 << (7 - (block % 8)))) != 0
def unallocatedBlocks(self):
for i in range(self.header.totalBlocks):
if not self.isBlockInUse(i):
yield i, self.read(i*self.blockSize, self.blockSize)
def getExtentsOverflowForFile(self, fileID, startBlock, forkType=kForkTypeData):
return self.extentsTree.searchExtents(fileID, forkType, startBlock)
def getXattr(self, fileID, name):
return self.xattrTree.searchXattr(fileID, name)
def getFileByPath(self, path):
return self.catalogTree.getRecordFromPath(path)
def getFinderDateAdded(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if k and v.recordType == kHFSPlusFileRecord:
return v.data.ExtendedFileInfo.finderDateAdded
elif k and v.recordType == kHFSPlusFolderRecord:
return v.data.ExtendedFolderInfo.finderDateAdded
return 0
def listFolderContents(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if not k or v.recordType != kHFSPlusFolderRecord:
return
for k,v in self.catalogTree.getFolderContents(v.data.folderID):
if v.recordType == kHFSPlusFolderRecord:
print(v.data.folderID, getString(k) + "/")
elif v.recordType == kHFSPlusFileRecord:
print(v.data.fileID, getString(k))
def listFinderData(self, path):
'''Returns finder data'''
finder_data = {}
k,v = self.catalogTree.getRecordFromPath(path)
date_added = 0
if k and v.recordType == kHFSPlusFileRecord:
date_added = v.data.ExtendedFileInfo.finderDateAdded
if v.data.FileInfo.fileType: finder_data['fileType'] = v.data.FileInfo.fileType
if v.data.FileInfo.fileCreator: finder_data['fileCreator'] = v.data.FileInfo.fileCreator
if v.data.FileInfo.finderFlags: finder_data['finderFlags'] = v.data.FileInfo.finderFlags
if v.data.ExtendedFileInfo.extendedFinderFlags: finder_data['extendedFinderFlags'] = v.data.ExtendedFileInfo.extendedFinderFlags
elif k and v.recordType == kHFSPlusFolderRecord:
date_added = v.data.ExtendedFolderInfo.finderDateAdded
if v.data.FolderInfo.finderFlags: finder_data['FinderFlags'] = v.data.FolderInfo.finderFlags
if v.data.ExtendedFolderInfo.extendedFinderFlags: finder_data['extendedFinderFlags'] = v.data.ExtendedFolderInfo.extendedFinderFlags
if date_added: finder_data['DateAdded'] = date_added
return finder_data
def getCnidForPath(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if not v:
raise ValueError("Path not found")
if k and v.recordType == kHFSPlusFileRecord:
return v.data.fileID
elif k and v.recordType == kHFSPlusFolderThreadRecord:
return v.data.folderID
def getXattrsByPath(self, path):
file_id = self.getCnidForPath(path)
return self.xattrTree.getAllXattrs(file_id)
def getXattrByPath(self, path, name):
file_id = self.getCnidForPath(path)
return self.getXattr(file_id, name)
''' Compression type in Xattr as per apple:
Source: https://opensource.apple.com/source/copyfile/copyfile-138/copyfile.c.auto.html
case 3: /* zlib-compressed data in xattr */
case 4: /* 64k chunked zlib-compressed data in resource fork */
case 7: /* LZVN-compressed data in xattr */
case 8: /* 64k chunked LZVN-compressed data in resource fork */
case 9: /* uncompressed data in xattr (similar to but not identical to CMP_Type1) */
case 10: /* 64k chunked uncompressed data in resource fork */
case 11: /* LZFSE-compressed data in xattr */
case 12: /* 64k chunked LZFSE-compressed data in resource fork */
/* valid compression type, we want to copy. */
break;
case 5: /* specifies de-dup within the generation store. Don't copy decmpfs xattr. */
copyfile_debug(3, "compression_type <5> on attribute com.apple.decmpfs for src file %s is not copied.",
s->src ? s->src : "(null string)");
continue;
case 6: /* unused */
'''
def readFile(self, path, output_file=None):
'''Reads file specified by 'path' and copies it out into output_file if valid, else returns as string.
Warning: If file is too large, over 200 MiB, then it will return b'', and only write to output_file.
'''
k,v = self.catalogTree.getRecordFromPath(path)
if not v:
raise ValueError("File not found")
data = b''
assert v.recordType == kHFSPlusFileRecord
xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
if xattr:
decmpfs = HFSPlusDecmpfs.parse(xattr)
log.debug("decmpfs.compression_type={}".format(str(decmpfs.compression_type)))
if decmpfs.compression_type == 1:
data = xattr[16:]
if output_file: output_file.write(data)
elif decmpfs.compression_type == 3:
if decmpfs.uncompressed_size == len(xattr) - 16:
data = xattr[16:]
else:
data = zlib.decompress(xattr[16:])
if output_file: output_file.write(data)
elif decmpfs.compression_type == 4:
f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID, decmpfs.compression_type, decmpfs.uncompressed_size)
data = f.readAllBuffer(True, output_file)
elif decmpfs.compression_type in [7, 11]:
if xattr[16] == 0x06: # perhaps even 0xF?
data = xattr[17:] #tested OK
else: #tested OK
uncompressed_size = struct.unpack('<I', xattr[8:12])[0]
compressed_size = len(xattr) - 16
compressed_stream = xattr[16:]
data = lzvn_decompress(compressed_stream, compressed_size, uncompressed_size)
if output_file: output_file.write(data)
elif decmpfs.compression_type in [8, 12]:
# tested for type 8 , OK
f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID, decmpfs.compression_type, decmpfs.uncompressed_size)
data = f.readAllBuffer(True, output_file)
if output_file: output_file.write(data)
else:
f = HFSFile(self, v.data.dataFork, v.data.fileID)
data = f.readAllBuffer(True, output_file)
return data
def readJournal(self):
jb = self.read(self.header.journalInfoBlock * self.blockSize, self.blockSize)
jib = JournalInfoBlock.parse(jb)
return self.read(jib.offset,jib.size)
def GetFileMACTimesFromFileRecord(self, v):
times = { 'c_time':None, 'm_time':None, 'cr_time':None, 'a_time':None }
catalog_file = v.data
times['c_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.attributeModDate)
times['m_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.contentModDate)
times['cr_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.createDate)
times['a_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.accessDate)
return times
def GetFileMACTimes(self, file_path):
'''
Returns dictionary {c_time, m_time, cr_time, a_time}
where cr_time = created time and c_time = Last time inode/mft modified
'''
k,v = self.catalogTree.getRecordFromPath(file_path)
if k and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord):
return self.GetFileMACTimesFromFileRecord(v)
raise ValueError("Path not found or not file/folder!")
def IsValidFilePath(self, path):
'''Check if a file path is valid, does not check for folders!'''
k,v = self.catalogTree.getRecordFromPath(path)
if not v:
return False
return v.recordType == kHFSPlusFileRecord #TODO: Check for hard links , sym links?
def IsValidFolderPath(self, path):
'''Check if a folder path is valid'''
k,v = self.catalogTree.getRecordFromPath(path)
if not v:
return False
return v.recordType == kHFSPlusFolderRecord #TODO: Check for hard links , sym links?
def IsSymbolicLink(self, path):
'''Check if a path points to a file/folder or symbolic link'''
mode = self.GetFileMode(path)
if mode:
return (mode & S_IFLNK) == S_IFLNK
return False
def GetFileSizeFromFileRecord(self, v):
xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
if xattr:
decmpfs = HFSPlusDecmpfs.parse(xattr)
return decmpfs.uncompressed_size #TODO verify for all cases!
else:
return v.data.dataFork.logicalSize
def GetFileSize(self, path):
'''For a given file path, gets logical file size'''
k,v = self.catalogTree.getRecordFromPath(path)
if k and v.recordType == kHFSPlusFileRecord:
return self.GetFileSizeFromFileRecord(v)
else:
raise ValueError("Path not found")
def GetUserAndGroupID(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if k and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord):
return (v.data.HFSPlusBSDInfo.ownerID, v.data.HFSPlusBSDInfo.groupID)
else:
raise ValueError("Path not found")
def GetFileMode(self, path):
'''Returns the file or folder's fileMode '''
k,v = self.catalogTree.getRecordFromPath(path)
if k and v and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord):
return v.data.HFSPlusBSDInfo.fileMode
else:
raise ValueError("Path not found or not a file/folder") | ydkhatri/mac_apt | plugins/helpers/hfs_alt.py | Python | mit | 22,333 |
# -*- encoding: utf-8 -*-
from mock import Mock, patch
from psutil import AccessDenied, TimeoutExpired
from thefuck.output_readers import rerun
class TestRerun(object):
def setup_method(self, test_method):
self.patcher = patch('thefuck.output_readers.rerun.Process')
process_mock = self.patcher.start()
self.proc_mock = process_mock.return_value = Mock()
def teardown_method(self, test_method):
self.patcher.stop()
@patch('thefuck.output_readers.rerun._wait_output', return_value=False)
@patch('thefuck.output_readers.rerun.Popen')
def test_get_output(self, popen_mock, wait_output_mock):
popen_mock.return_value.stdout.read.return_value = b'output'
assert rerun.get_output('', '') is None
wait_output_mock.assert_called_once()
@patch('thefuck.output_readers.rerun.Popen')
def test_get_output_invalid_continuation_byte(self, popen_mock):
output = b'ls: illegal option -- \xc3\nusage: ls [-@ABC...] [file ...]\n'
expected = u'ls: illegal option -- \ufffd\nusage: ls [-@ABC...] [file ...]\n'
popen_mock.return_value.stdout.read.return_value = output
actual = rerun.get_output('', '')
assert actual == expected
@patch('thefuck.output_readers.rerun._wait_output')
def test_get_output_unicode_misspell(self, wait_output_mock):
rerun.get_output(u'pácman', u'pácman')
wait_output_mock.assert_called_once()
def test_wait_output_is_slow(self, settings):
assert rerun._wait_output(Mock(), True)
self.proc_mock.wait.assert_called_once_with(settings.wait_slow_command)
def test_wait_output_is_not_slow(self, settings):
assert rerun._wait_output(Mock(), False)
self.proc_mock.wait.assert_called_once_with(settings.wait_command)
@patch('thefuck.output_readers.rerun._kill_process')
def test_wait_output_timeout(self, kill_process_mock):
self.proc_mock.wait.side_effect = TimeoutExpired(3)
self.proc_mock.children.return_value = []
assert not rerun._wait_output(Mock(), False)
kill_process_mock.assert_called_once_with(self.proc_mock)
@patch('thefuck.output_readers.rerun._kill_process')
def test_wait_output_timeout_children(self, kill_process_mock):
self.proc_mock.wait.side_effect = TimeoutExpired(3)
self.proc_mock.children.return_value = [Mock()] * 2
assert not rerun._wait_output(Mock(), False)
assert kill_process_mock.call_count == 3
def test_kill_process(self):
proc = Mock()
rerun._kill_process(proc)
proc.kill.assert_called_once_with()
@patch('thefuck.output_readers.rerun.logs')
def test_kill_process_access_denied(self, logs_mock):
proc = Mock()
proc.kill.side_effect = AccessDenied()
rerun._kill_process(proc)
proc.kill.assert_called_once_with()
logs_mock.debug.assert_called_once()
| nvbn/thefuck | tests/output_readers/test_rerun.py | Python | mit | 2,942 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Action'
db.create_table('gratitude_action', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('gratitude', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gratitude.Gratitude'], null=True)),
('action', self.gf('django.db.models.fields.CharField')(max_length=500)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal('gratitude', ['Action'])
def backwards(self, orm):
# Deleting model 'Action'
db.delete_table('gratitude_action')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gratitude.action': {
'Meta': {'object_name': 'Action'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'gratitude': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gratitude.Gratitude']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'gratitude.gratitude': {
'Meta': {'object_name': 'Gratitude'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'stash_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'stashed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '5000'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gratitude.setting': {
'Meta': {'object_name': 'Setting'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'gratitude.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_messages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['gratitude'] | adamfeuer/ArtOfGratitude_app | gratitude/migrations/0005_auto__add_action.py | Python | mit | 7,091 |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015 Axel Mendoza <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
| aek/solt_sftp | src/solt_sftp/__init__.py | Python | mit | 1,132 |
from snowball.utils import SnowMachine
from snowball.climate import WeatherProbe
# Note: multiline import limits line length
from snowball.water.phases import (
WaterVapor, IceCrystal, SnowFlake
)
def let_it_snow():
"""
Makes it snow, using a SnowMachine when weather doesn't allow it.
Returns a list of SnowFlakes.
Example::
>>> let_it_snow()
The snow machine is broken. No snow today. :/
[]
>>> let_it_snow()
[<snowball.water.phases.SnowFlake object at 0x101dbc210>,
<snowball.water.phases.SnowFlake object at 0x101dbc350>,
<snowball.water.phases.SnowFlake object at 0x101dbc1d0>,
<snowball.water.phases.SnowFlake object at 0x101dbc190>,
<snowball.water.phases.SnowFlake object at 0x101dbc3d0>,
<snowball.water.phases.SnowFlake object at 0x101dbc410>,
<snowball.water.phases.SnowFlake object at 0x101dbc450>,
<snowball.water.phases.SnowFlake object at 0x101dbc390>,
<snowball.water.phases.SnowFlake object at 0x101dbc310>]
"""
# Create a WeatherProbe
weather_probe = WeatherProbe()
if weather_probe.temperature < 0 and weather_probe.clouds:
# There's clouds and it's cold enough
# Create necessary components
vapor = WaterVapor()
ice = IceCrystal()
# Start with empty list of flakes
snow_flakes = []
# Now create 10 snowflakes
for counter in xrange(1, 10):
flake = SnowFlake(vapor, ice)
# Add flake to list
snow_flakes.append(flake)
return snow_flakes
else:
# The weather's not right, use the SnowMachine
snow_machine = SnowMachine()
snow_flakes = snow_machine.let_it_snow()
return snow_flakes
| dokterbob/slf-programming-workshops | examples/snowball/main.py | Python | mit | 1,795 |
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from inspect import isgenerator
class Element(object):
tag = ''
self_closing = False
def __init__(self, *children, **attrs):
if children and isinstance(children[0], dict):
self.attrs = children[0]
children = children[1:]
else:
self.attrs = attrs
# Some helpers for the `class` attribute
if 'classes' in attrs:
attrs['class'] = ' '.join(c for c in attrs.pop('classes') if c)
elif 'class_' in attrs:
attrs['class'] = attrs.pop('class_')
self.children = []
self.add_children(children)
def __call__(self, *children):
self.add_children(children)
return self
def __repr__(self):
attr_string = ''.join(' {}="{}"'.format(key, val) for key, val in self.attrs.items() if val)
return '<{}{}>'.format(self.tag, attr_string)
def add_children(self, children):
if self.self_closing and children:
raise ValueError("Self-closing tags can't have children.")
if children and isgenerator(children[0]):
children = children[0]
for child in children:
if child is not None:
if isinstance(child, list):
self.add_children(child)
else:
self.children.append(child)
| russiancow/stag | stag/base.py | Python | mit | 1,436 |
# Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
# pylint: disable=W0212,W0613
from twisted.internet.defer import Deferred, DeferredList
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase
import smartanthill.litemq.exchange as ex
from smartanthill.exception import LiteMQResendFailed
class LiteMQCase(TestCase):
g_resent_nums = 0
def test_declare_exchange(self):
for type_, class_ in {"direct": ex.ExchangeDirect,
"fanout": ex.ExchangeFanout}.items():
self.assertIsInstance(
ex.ExchangeFactory().newExchange("exchange_name", type_),
class_
)
self.assertRaises(
AttributeError,
lambda: ex.ExchangeFactory().newExchange("exchange_name",
"unknown-type")
)
def test_queue_ack_success(self):
message, properties = "Test message", {"foo": "bar"}
def _callback(m, p):
self.assertEqual(m, message)
self.assertEqual(p, properties)
return True
def _resback(result):
self.assertIsInstance(result, bool)
self.assertEqual(result, True)
q = ex.Queue("queue_name", "routing_key", _callback, ack=True)
d = q.put(message, properties)
self.assertIsInstance(d, Deferred)
d.addCallbacks(_resback)
return d
def test_queue_ack_fails(self):
self.g_resent_nums, resend_max = 0, 3
def _callback(m, p):
self.g_resent_nums += 1
# test exception
if self.g_resent_nums == 1:
return 1/0
# test "ack-invalid" that is equl to False
else:
return False
def _errback(result):
self.assertIsInstance(result, Failure)
self.assertTrue(result.check(LiteMQResendFailed))
self.assertEqual(resend_max, self.g_resent_nums)
q = ex.Queue("queue_name", "routing_key", _callback, ack=True)
q.RESEND_MAX = resend_max
q.RESEND_DELAY = 0
d = q.put("Test message", {"foo": "bar"})
self.assertIsInstance(d, Deferred)
d.addBoth(_errback)
return d
def test_queue_nonack(self):
self.g_resent_nums, resend_max = 0, 3
def _callback(m, p):
self.g_resent_nums += 1
return 1/0
def _errback(result):
self.assertNotIsInstance(result, Failure)
self.assertIsInstance(result, bool)
self.assertEqual(result, False)
self.assertEqual(self.g_resent_nums, 1)
q = ex.Queue("queue_name", "routing_key", _callback, ack=False)
q.RESEND_MAX = resend_max
q.RESEND_DELAY = 0
d = q.put("Test message", {"foo": "bar"})
self.assertIsInstance(d, Deferred)
d.addBoth(_errback)
return d
def test_exchange_direct(self):
message, properties = "Test message", {"foo": "bar"}
def _callback(m, p):
self.assertEqual(m, message)
self.assertEqual(p, properties)
myex = ex.ExchangeFactory().newExchange("exchange_name", "direct")
myex.bind_queue("queue_name", "routing_key", _callback)
empty_result = myex.publish("invalid_routing_key", message, properties)
self.assertEqual(empty_result, [])
result = myex.publish("routing_key", message, properties)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
d = result[0]
def _resback(result):
self.assertEqual(result, None)
myex.unbind_queue("queue_name")
self.assertEqual(len(myex._queues), 0)
self.assertIsInstance(d, Deferred)
d.addCallbacks(_resback)
return d
def test_exchange_fanout(self):
self.g_resent_nums = 0
message, properties = "Test message", {"foo": "bar"}
def _callback(m, p):
self.g_resent_nums += 1
self.assertEqual(m, message)
self.assertEqual(p, properties)
myex = ex.ExchangeFactory().newExchange("exchange_name", "fanout")
myex.bind_queue("queue_name", "routing_key", _callback)
result = myex.publish("invalid_routing_key", message, properties)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
d1 = result[0]
result = myex.publish("routing_key", message, properties)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
d2 = result[0]
self.assertIsInstance(d1, Deferred)
self.assertIsInstance(d2, Deferred)
dl = DeferredList([d1, d2])
def _resback(result):
self.assertEqual(result, [(True, None), (True, None)])
dl.addCallbacks(_resback)
return dl
| smartanthill/smartanthill1_0 | smartanthill/test/test_litemq.py | Python | mit | 4,944 |
#Copyright 2008, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
import numpy.numarray as na
#import Numeric as nu
import math
import os
import sys
import yaml
import m3.unit_conversion as m3u
from m3qa.calibrate import *
from m3qa.calibrate_sensors import *
from m3qa.calibrate_actuator_ec_a1r1 import *
import m3.actuator_ec_pb2 as aec
import m3qa.config_arm_a1r1 as a1r1
# ###################################### a1 J0 ##############################################################
config_default_a1_j0={
'calib':a1r1.config_arm_a1r1_actuator_j0['calib'],
'param':a1r1.config_arm_a1r1_actuator_j0['param'],
'param_internal':
{
'joint_limits': {'both_arms':[-47.0,197.0],'note':'Positive is reaching upward'}
}
}
# ######################################## a1 J1 ############################################################
config_default_a1_j1={
'calib':a1r1.config_arm_a1r1_actuator_j1['calib'],
'param':a1r1.config_arm_a1r1_actuator_j1['param'],
'param_internal':
{
'joint_limits': {'right_arm':[-19,121],'left_arm':[-121,19],'note':'positive is elbow to its right'}
}
}
# ########################################## a1 J2 ##########################################################
config_default_a1_j2={
'calib':a1r1.config_arm_a1r1_actuator_j2['calib'],
'param':a1r1.config_arm_a1r1_actuator_j2['param'],
'param_internal':
{
'joint_limits': {'both_arms':[-76.0,76.0],'note':'positive is reaching to its right'}
}
}
# ############################################# a1 J3 #######################################################
config_default_a1_j3={
'calib':a1r1.config_arm_a1r1_actuator_j3['calib'],
'param':a1r1.config_arm_a1r1_actuator_j3['param'],
'param_internal':
{
'joint_limits': {'both_arms':[0,140.0],'note':'positive is wrist towards chest'}
}
}
# ############################################# a1 J4 #######################################################
config_default_a1_j4={
'calib':a1r1.config_arm_a1r1_actuator_j4['calib'],
'param':a1r1.config_arm_a1r1_actuator_j4['param'],
'param_internal':
{
'joint_limits': {'right_arm':[-78,123],'left_arm':[-123,78],'note':'positive is top of forearm rotating to its right'}
}
}
# ############################################# a1 J5 #######################################################
config_default_a1_j5={
'calib':a1r1.config_arm_a1r1_actuator_j5['calib'],
'param':a1r1.config_arm_a1r1_actuator_j5['param'],
'param_internal':
{
'joint_limits': {'both_arms':[-45,45],'note': 'positive is hand rotating up'}
}
}
# ############################################# a1 J6 #######################################################
config_default_a1_j6={
'calib':a1r1.config_arm_a1r1_actuator_j6['calib'],
'param':a1r1.config_arm_a1r1_actuator_j6['param'],
'param_internal':
{
'joint_limits': {'both_arms':[-45,45],'note':'positive is fingers rotating to its right'}
}
}
# ###########################################################################
class M3Calibrate_Arm_A1R1(M3CalibrateActuatorEcA1R1):
def __init__(self):
M3CalibrateActuatorEcA1R1.__init__(self)
self.joint_names=['Shoulder J0',
'Shoulder J1',
'Shoulder J2',
'Elbow J3',
'Wrist J4',
'Wrist J5',
'Wrist J6']
self.config_default=[
config_default_a1_j0,
config_default_a1_j1,
config_default_a1_j2,
config_default_a1_j3,
config_default_a1_j4,
config_default_a1_j5,
config_default_a1_j6]
def start(self,ctype):
if not M3CalibrateActuatorEcA1R1.start(self,ctype):
return False
self.jid=int(self.comp_ec.name[self.comp_ec.name.find('_j')+2:])
self.param_internal=self.config_default[self.jid]['param_internal']
self.calib_default=self.config_default[self.jid]['calib']
self.param_default=self.config_default[self.jid]['param']
print 'Calibrating joint',self.joint_names[self.jid]
return True
| ahoarau/m3meka | python/scripts/m3qa/calibrate_arm_a1r1.py | Python | mit | 4,744 |
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from unittest import TestCase
from octodns.record import Record
from octodns.provider.dnsimple import DnsimpleClientNotFound, DnsimpleProvider
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
class TestDnsimpleProvider(TestCase):
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
# Our test suite differs a bit, add our NS and remove the simple one
expected.add_record(Record.new(expected, 'under', {
'ttl': 3600,
'type': 'NS',
'values': [
'ns1.unit.tests.',
'ns2.unit.tests.',
]
}))
for record in list(expected.records):
if record.name == 'sub' and record._type == 'NS':
expected._remove_record(record)
break
def test_populate(self):
provider = DnsimpleProvider('test', 'token', 42)
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=401,
text='{"message": "Authentication failed"}')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('Unauthorized', ctx.exception.message)
# General error
with requests_mock() as mock:
mock.get(ANY, status_code=502, text='Things caught fire')
with self.assertRaises(HTTPError) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
# Non-existant zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404,
text='{"message": "Domain `foo.bar` not found"}')
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# No diffs == no changes
with requests_mock() as mock:
base = 'https://api.dnsimple.com/v2/42/zones/unit.tests/' \
'records?page='
with open('tests/fixtures/dnsimple-page-1.json') as fh:
mock.get('{}{}'.format(base, 1), text=fh.read())
with open('tests/fixtures/dnsimple-page-2.json') as fh:
mock.get('{}{}'.format(base, 2), text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(14, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
self.assertEquals(14, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
# test handling of invalid content
with requests_mock() as mock:
with open('tests/fixtures/dnsimple-invalid-content.json') as fh:
mock.get(ANY, text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set([
Record.new(zone, '', {
'ttl': 3600,
'type': 'SSHFP',
'values': []
}),
Record.new(zone, '_srv._tcp', {
'ttl': 600,
'type': 'SRV',
'values': []
}),
Record.new(zone, 'naptr', {
'ttl': 600,
'type': 'NAPTR',
'values': []
}),
]), zone.records)
def test_apply(self):
provider = DnsimpleProvider('test', 'token', 42)
resp = Mock()
resp.json = Mock()
provider._client._request = Mock(return_value=resp)
# non-existant domain, create everything
resp.json.side_effect = [
DnsimpleClientNotFound, # no zone in populate
DnsimpleClientNotFound, # no domain during apply
]
plan = provider.plan(self.expected)
# No root NS, no ignored
n = len(self.expected.records) - 2
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
provider._client._request.assert_has_calls([
# created the domain
call('POST', '/domains', data={'name': 'unit.tests'}),
# created at least one of the record with expected data
call('POST', '/zones/unit.tests/records', data={
'content': '20 30 foo-1.unit.tests.',
'priority': 10,
'type': 'SRV',
'name': '_srv._tcp',
'ttl': 600
}),
])
# expected number of total calls
self.assertEquals(26, provider._client._request.call_count)
provider._client._request.reset_mock()
# delete 1 and update 1
provider._client.records = Mock(return_value=[
{
'id': 11189897,
'name': 'www',
'content': '1.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189898,
'name': 'www',
'content': '2.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189899,
'name': 'ttl',
'content': '3.2.3.4',
'ttl': 600,
'type': 'A',
}
])
# Domain exists, we don't care about return
resp.json.side_effect = ['{}']
wanted = Zone('unit.tests.', [])
wanted.add_record(Record.new(wanted, 'ttl', {
'ttl': 300,
'type': 'A',
'value': '3.2.3.4'
}))
plan = provider.plan(wanted)
self.assertEquals(2, len(plan.changes))
self.assertEquals(2, provider.apply(plan))
# recreate for update, and deletes for the 2 parts of the other
provider._client._request.assert_has_calls([
call('POST', '/zones/unit.tests/records', data={
'content': '3.2.3.4',
'type': 'A',
'name': 'ttl',
'ttl': 300
}),
call('DELETE', '/zones/unit.tests/records/11189899'),
call('DELETE', '/zones/unit.tests/records/11189897'),
call('DELETE', '/zones/unit.tests/records/11189898')
], any_order=True)
| h-hwang/octodns | tests/test_octodns_provider_dnsimple.py | Python | mit | 6,915 |
from components.base.automotive_component import AutomotiveComponent
from config import project_registration as proj
from tools.ecu_logging import ECULogger as L
import random
class AbstractECU(AutomotiveComponent):
'''
This abstract class defines the interface of
an ECU as it is found in an automotive network
'''
def __init__(self, sim_env, ecu_id, data_rate):
''' Constructor
Input: sim_env simpy.Environment environment of this component
ecu_id string id of the corresponding AbstractECU
data_rate float datarate of the ecu
Output: -
'''
AutomotiveComponent.__init__(self, sim_env)
self._ABSTRACT_ECU = True
self._ecu_id = ecu_id # ID of the ECU
self.ecuSW = None # what is done
self.ecuHW = None # what is used to make it happen
self.MessageClass = proj.BUS_MSG_CLASS # what kind of messages are exchanged
self.connected_bus = None # Bus that is connected to the ECU
self.data_rate = proj.BUS_ECU_DATARATE # Datarate with which bits are put on the bus
self._effective_datarate = 0 # Bit per second
self._effective_bittime = 0 # seconds
self._jitter = 1
self.startup_delay = False
def set_startup_delay(self, start_time):
''' this method sets the startup delay. When this delay is set
this ECU is activated after the defined start time
Input: start_time float time when the ECU starts running
Output: -
'''
self.startup_delay = start_time
if start_time:
self.ecuHW.transceiver.ecu_is_active = False
def set_jitter(self, jitter_range):
''' sets the jitter which will be multiplied onto each
timeout value. It will be within jitter_range
e.g. jitter_range of 0.1 means that any random value
between 1.0 and 1.1 will be used
Input: jitter_range: float dispersion from 1.0
Output: -
'''
# determine jitter
self._jitter = 1 + (random.random() * jitter_range)
# apply jitter on layers
try: self.ecuSW.comm_mod.physical_lay.transceiver._jitter = self._jitter
except: pass
try: self.ecuSW.comm_mod._jitter = self._jitter
except: pass
try: self.ecuSW.comm_mod.transp_lay._jitter = self._jitter
except: pass
try: self.ecuSW.comm_mod.datalink_lay._jitter = self._jitter
except: pass
try: self.ecuSW.comm_mod.physical_lay.transceiver._jitter = self._jitter
except: pass
try: self.ecuSW.app_lay._jitter = self._jitter
except: pass
def _connect_hw_sw(self):
''' connect all hardware components with their
associated software connections
Input: -
Output: -
'''
# application Layer
self.ecuSW.app_lay.microcontroller = self.ecuHW.mic_controller
# physical and data link layer '''
self.ecuSW.comm_mod.datalink_lay.controller = self.ecuHW.controller
self.ecuSW.comm_mod.physical_lay.transceiver = self.ecuHW.transceiver
self.ecuSW.comm_mod.datalink_lay.effective_bittime = self._effective_bittime
def connect_to(self, bus):
''' connects the bus to the ECU
Input: bus CANBus Bus that will be connected
Output: -
'''
self.ecuHW.transceiver.connect_bus(bus)
self.connected_bus = bus
def get_type_id(self):
''' returns the id of this ECU type
Input: -
Output: ecu_type string type of this ECU; e.g.'TLSECU'
'''
raise NotImplementedError(" get_type_id() was not implemented by class %s" % self.__class__)
def get_rec_buffer_items(self):
''' returns the current content of the receiving buffer
Input: -
Output: rec_buffer list list of items in the receiving buffer
'''
return self.ecuHW.controller.receive_buffer.items
def get_trans_buffer_items(self):
''' returns the current content of the transmit buffer
Input: -
Output: trans_buffer list list of items in the transmit buffer
'''
return self.ecuHW.controller.transmit_buffer.items
def install_hw_filter(self, allowed_items_list):
''' installs a hardware filter that filters all
message ids that are not defined in the passed
list. This filter is applied on the transceiver
Input: allowed_items_list list list of message_ids that are let pass by the transceiver
Output: -
'''
try:
self.ecuHW.transceiver.install_filter(allowed_items_list)
except:
L().log_err(300)
def _GET_ABSTRACT_ECU(self):
''' marker that this is a AbstractECU '''
return self._ABSTRACT_ECU
@property
def ecu_id(self):
return self._ecu_id
@ecu_id.setter
def ecu_id(self, value):
self._ecu_id = value
def set_monitor(self, monitor):
self.monitor = monitor
| PhilippMundhenk/IVNS | ECUSimulation/components/base/ecu/types/abst_ecu.py | Python | mit | 5,802 |
import boto3
import logging
import argparse
import os
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key, Attr
import json
import decimal
import time
import datetime
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import contracts
from rest import IGParams, IGClient
import asyncio
import uuid
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
class Utils(object):
def __init__(self):
pass
@staticmethod
def reliable(func):
def _decorator(self, *args, **kwargs):
tries = 0
result = func(self, *args, **kwargs)
if result is None:
while result is None and tries < 10:
tries += 1
time.sleep(2 ** tries)
result = func(self, *args, **kwargs)
return result
return _decorator
class CapsuleParams(object):
def __init__(self):
self.Region = ''
self.Instance = ''
self.Email = ''
self.Iam = ''
self.User = ''
self.Password = ''
self.Smtp = ''
class CapsuleController(object):
def __init__(self, params):
self.secDef = contracts.SecurityDefinition()
self.Email = params.Email
self.Iam = params.Iam
self.User = params.User
self.Password = params.Password
self.Smtp = params.Smtp
self.Logger = logging.getLogger()
self.Logger.setLevel(logging.INFO)
ec2 = boto3.resource('ec2', region_name=params.Region)
self.__Instance = ec2.Instance(params.Instance)
db = boto3.resource('dynamodb', region_name=params.Region)
self.__QuotesEod = db.Table('Quotes.EOD')
self.__Securities = db.Table('Securities')
self.__Orders = db.Table('Orders')
s3 = boto3.resource('s3')
debug = os.environ['DEBUG_FOLDER']
self.__debug = s3.Object(debug, 'vix_roll.txt')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(threadName)s - %(message)s')
self.Logger.info('InstanceController Created. Region: %s Instance: %s' % (params.Region, params.Instance))
def AttemptsCount(self):
timestamp = int((datetime.datetime.now() - datetime.timedelta(hours=2)).timestamp()) * 1000
logs = boto3.client('logs')
log_group = '/aws/docker/Capsule'
data = logs.describe_log_streams(logGroupName=log_group, orderBy='LastEventTime', descending=True)
streams = filter(lambda x: x['creationTime'] > timestamp, data['logStreams'])
count = 0
for stream in streams:
lines = logs.get_log_events(logGroupName=log_group,
logStreamName=stream['logStreamName'])
for line in lines['events']:
if 'LogStream Created:' in line['message']:
count += 1
self.Logger.info('Capsule ran %s times in the last 2 hours' % count)
return count
def SendEmail(self, text):
msg = MIMEMultipart('alternative')
msg['Subject'] = 'NIGHTWATCH ALERT'
msg['From'] = self.Email
msg['To'] = self.Email
mime_text = MIMEText(text, 'html')
msg.attach(mime_text)
server = smtplib.SMTP(self.Smtp, 587, timeout=10)
server.set_debuglevel(10)
server.starttls()
server.ehlo()
server.login(self.User, self.Password)
server.sendmail(self.Email, self.Email, msg.as_string())
res = server.quit()
self.Logger.info(res)
def ValidateStrategy(self):
today = datetime.date.today().strftime("%Y%m%d")
fileObj = self.__debug.get()['Body']
ch = fileObj.read(1)
line = ''
while ch:
if ch.decode("utf-8") == '\n':
if today in line:
return True
line = ''
else:
line += ch.decode("utf-8")
ch = fileObj.read(1)
return False
@Utils.reliable
def SuspendTrading(self, symbol, broker):
try:
response = self.__Securities.update_item(
Key={
'Symbol': symbol,
'Broker': broker,
},
UpdateExpression="set #te = :te",
ExpressionAttributeNames={
'#te': 'TradingEnabled'
},
ExpressionAttributeValues={
':te': False
},
ReturnValues="UPDATED_NEW")
except ClientError as e:
self.Logger.error(e.response['Error']['Message'])
except Exception as e:
self.Logger.error(e)
else:
self.Logger.info('Security Updated')
self.Logger.info(json.dumps(response, indent=4, cls=DecimalEncoder))
return response
@Utils.reliable
def SendOrder(self, symbol, maturity, side, size, price, orderType, fillTime, dealId, broker, productType):
try:
order = {
"Side": side,
"Size": decimal.Decimal(str(size)),
"OrdType": orderType}
trade = {
"FillTime": fillTime,
"Side": side,
"FilledSize": decimal.Decimal(str(size)),
"Price": decimal.Decimal(str(price)),
"Broker": {"Name": broker, "RefType": "dealId", "Ref": dealId},
}
strategy = {
"Name": "SYSTEM",
"Reason": "STOP_TRIGGERED"
}
response = self.__Orders.update_item(
Key={
'OrderId': str(uuid.uuid4().hex),
'TransactionTime': str(time.time()),
},
UpdateExpression="set #st = :st, #s = :s, #m = :m, #p = :p, #b = :b, #o = :o, #t = :t, #str = :str",
ExpressionAttributeNames={
'#st': 'Status',
'#s': 'Symbol',
'#m': 'Maturity',
'#p': 'ProductType',
'#b': 'Broker',
'#o': 'Order',
'#t': 'Trade',
'#str': 'Strategy'
},
ExpressionAttributeValues={
':st': 'FILLED',
':s': symbol,
':m': maturity,
':p': productType,
':b': broker,
':o': order,
':t': trade,
':str': strategy
},
ReturnValues="UPDATED_NEW")
except ClientError as e:
self.Logger.error(e.response['Error']['Message'])
except Exception as e:
self.Logger.error(e)
else:
self.Logger.info('Order Created')
self.Logger.info(json.dumps(response, indent=4, cls=DecimalEncoder))
return response
def FindSystemStopOrders(self):
"""
Update Orders table if the stop order was executed by the broker
:return:
None
"""
params = IGParams()
params.Url = os.environ['IG_URL']
params.Key = os.environ['X_IG_API_KEY']
params.Identifier = os.environ['IDENTIFIER']
params.Password = os.environ['PASSWORD']
self.Logger.info('Checking if any stop order was triggered')
async def read():
async with IGClient(params, self.Logger) as client:
auth = await client.Login()
self.Logger.info('Auth: %s' % auth)
lastMonth = datetime.date.today() - datetime.timedelta(days=30)
activities = await client.GetActivities(lastMonth.strftime('%Y-%m-%d'), True)
self.Logger.info('activities: %s' % activities)
await client.Logout()
if activities is not None and 'activities' in activities and len(activities['activities']) > 0:
stopTriggered = [tran for tran in activities['activities']
if tran['channel'] == 'SYSTEM' and 'details' in tran]
if len(stopTriggered) == 0:
self.Logger.info('No stops were triggered')
return
filled = self.GetOrders('Status', 'FILLED')
self.Logger.info('All filled %s' % filled)
for tran in stopTriggered:
for action in tran['details']['actions']:
if action['actionType'] == 'POSITION_CLOSED':
self.Logger.info('affectedDealId: %s' % action['affectedDealId'])
already_done = [o for o in filled if 'Broker'in o['Trade'] and 'Ref'
in o['Trade']['Broker'] and o['Trade']['Broker']['Ref'] == tran['dealId']
and o['Strategy']['Name'] == 'SYSTEM']
if len(already_done) == 1:
self.Logger.info('Already filled this unaccounted stop %s' % tran['dealId'])
continue
found = [o for o in filled if 'Broker'in o['Trade'] and 'Ref' in o['Trade']['Broker']
and o['Trade']['Broker']['Ref'] == action['affectedDealId']]
if len(found) == 1:
f = found[0]
self.Logger.info('Unaccounted stop found %s' % found)
self.SuspendTrading(f['Symbol'], 'IG')
self.SendOrder(f['Symbol'], f['Maturity'], tran['details']['direction'],
tran['details']['size'], tran['details']['level'],
'STOP', tran['date'], tran['dealId'], 'IG', f['ProductType'])
self.SendEmail('STOP Order was triggered by IG. Trading in %s is suspended'
% f['Symbol'])
app_loop = asyncio.get_event_loop()
app_loop.run_until_complete(read())
def ValidateExecutor(self):
pending = self.GetOrders('Status', 'PENDING')
if pending is not None and len(pending) > 0:
self.SendEmail('There are %s PENDING Orders in Chaos' % len(pending))
failed = self.GetOrders('Status', 'FAILED')
if failed is not None and len(failed) > 0:
self.SendEmail('There are %s FAILED Orders in Chaos' % len(failed))
def EndOfDay(self):
allFound = True
for security in filter(lambda x: x['SubscriptionEnabled'], self.GetSecurities()):
today = datetime.date.today().strftime("%Y%m%d")
symbols = []
if security['ProductType'] == 'IND':
symbols = [security['Symbol']]
if security['ProductType'] == 'FUT':
symbols = self.secDef.get_futures(security['Symbol'], 2) # get two front months
for symbol in symbols:
found = self.GetQuotes(symbol, today)
if len(found) > 0:
self.Logger.info('Found Symbols: %s' % found)
else:
self.Logger.error('Failed to find data for %s' % symbol)
allFound &= len(found) > 0
if allFound:
self.Logger.info('All Found. Stopping EC2 Instance')
if self.IsInstanceRunning():
self.StopInstance()
if not self.ValidateStrategy():
self.SendEmail('The VIX Roll strategy left no TRACE file today')
else:
self.Logger.info('Not All Found. Will try again. Restarting EC2 Instance')
if self.IsInstanceRunning():
self.StopInstance()
if self.AttemptsCount() >= 3:
self.SendEmail('Capsule could not retrieve market data after %s attempts' % str(3))
return
self.StartInstance()
def IsInstanceRunning(self):
self.Logger.info('Instance Id: %s, State: %s' % (self.__Instance.instance_id, self.__Instance.state))
return self.__Instance.state['Name'] == 'running'
def StartInstance(self):
self.__Instance.start()
self.__Instance.wait_until_running()
self.Logger.info('Started instance: %s' % self.__Instance.instance_id)
def StopInstance(self):
self.__Instance.stop()
self.__Instance.wait_until_stopped()
self.Logger.info('Stopped instance: %s' % self.__Instance.instance_id)
@Utils.reliable
def GetOrders(self, key, value):
try:
self.Logger.info('Calling orders scan attr: %s, %s' % (key, value))
response = self.__Orders.scan(FilterExpression=Attr(key).eq(value))
except ClientError as e:
self.Logger.error(e.response['Error']['Message'])
return None
except Exception as e:
self.Logger.error(e)
return None
else:
if 'Items' in response:
return response['Items']
@Utils.reliable
def GetSecurities(self):
try:
self.Logger.info('Calling securities scan ...')
response = self.__Securities.scan(FilterExpression=Attr('SubscriptionEnabled').eq(True))
except ClientError as e:
self.Logger.error(e.response['Error']['Message'])
return None
except Exception as e:
self.Logger.error(e)
return None
else:
if 'Items' in response:
return response['Items']
@Utils.reliable
def GetQuotes(self, symbol, date):
try:
self.Logger.info('Calling quotes query Date key: %s' % date)
response = self.__QuotesEod.query(
KeyConditionExpression=Key('Symbol').eq(symbol) & Key('Date').eq(date)
)
except ClientError as e:
self.Logger.error(e.response['Error']['Message'])
return None
except Exception as e:
self.Logger.error(e)
return None
else:
self.Logger.info(json.dumps(response, indent=4, cls=DecimalEncoder))
if 'Items' in response:
return response['Items']
def lambda_handler(event, context):
params = CapsuleParams()
params.Region = os.environ["NIGHT_WATCH_REGION"]
params.Instance = os.environ["NIGHT_WATCH_INSTANCE"]
params.Email = os.environ["NIGHT_WATCH_EMAIL"]
params.Iam = os.environ["NIGHT_WATCH_IAM"]
params.User = os.environ["NIGHT_WATCH_USER"]
params.Password = os.environ["NIGHT_WATCH_PASSWORD"]
params.Smtp = os.environ["NIGHT_WATCH_SMTP"]
controller = CapsuleController(params)
try:
controller.FindSystemStopOrders()
except Exception as e:
controller.Logger.error('FindSystemStopOrders: %s' % e)
controller.ValidateExecutor()
controller.EndOfDay()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--region', help='AWS region', required=True)
parser.add_argument('--instance', help='EC2 instance', required=True)
parser.add_argument('--email', help='Email address', required=True)
parser.add_argument('--iam', help='IAM Role', required=True)
parser.add_argument('--user', help='SMTP User', required=True)
parser.add_argument('--password', help='SMTP Password', required=True)
parser.add_argument('--smtp', help='SMTP Address', required=True)
parser.add_argument('--debug', help='Debug Folder', required=True)
args = parser.parse_args()
os.environ["NIGHT_WATCH_REGION"] = args.region
os.environ["NIGHT_WATCH_INSTANCE"] = args.instance
os.environ["NIGHT_WATCH_EMAIL"] = args.email
os.environ["NIGHT_WATCH_IAM"] = args.iam
os.environ["NIGHT_WATCH_USER"] = args.user
os.environ["NIGHT_WATCH_PASSWORD"] = args.password
os.environ["NIGHT_WATCH_SMTP"] = args.smtp
os.environ["DEBUG_FOLDER"] = args.debug
event = ''
context = ''
lambda_handler(event, context)
if __name__ == "__main__":
main()
| th3sys/capsule | nightwatch.py | Python | mit | 16,596 |
from django import template
from django.conf import settings
register = template.Library()
# settings value
@register.assignment_tag
def get_google_maps_key():
return getattr(settings, 'GOOGLE_MAPS_KEY', "")
| linuxsoftware/mamc-wagtail-site | home/templatetags/home_tags.py | Python | mit | 217 |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
import math
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
# region proposal network (RPN) or not
__C.IS_RPN = False
__C.FLIP_X = False
__C.INPUT = 'COLOR'
# multiscale training and testing
__C.IS_MULTISCALE = True
__C.IS_EXTRAPOLATING = True
#
__C.REGION_PROPOSAL = 'RPN'
__C.NET_NAME = 'CaffeNet'
__C.SUBCLS_NAME = 'voxel_exemplars'
#
# Training options
#
__C.TRAIN = edict()
__C.TRAIN.VISUALIZE = False
__C.TRAIN.VERTEX_REG = False
__C.TRAIN.GRID_SIZE = 256
__C.TRAIN.CHROMATIC = False
# Scales to compute real features
__C.TRAIN.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# The number of scales per octave in the image pyramid
# An octave is the set of scales up to half of the initial scale
__C.TRAIN.NUM_PER_OCTAVE = 4
# parameters for ROI generating
__C.TRAIN.SPATIAL_SCALE = 0.0625
__C.TRAIN.KERNEL_SIZE = 5
# Aspect ratio to use during training
__C.TRAIN.ASPECTS = (1, 0.75, 0.5, 0.25)
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = (0.5,)
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = (0.5,)
__C.TRAIN.BG_THRESH_LO = (0.1,)
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = (0.5,)
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
# Train using subclasses
__C.TRAIN.SUBCLS = True
# Train using viewpoint
__C.TRAIN.VIEWPOINT = False
# Threshold of ROIs in training RCNN
__C.TRAIN.ROI_THRESHOLD = 0.1
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
__C.TRAIN.RPN_BASE_SIZE = 16
__C.TRAIN.RPN_ASPECTS = [0.25, 0.5, 0.75, 1, 1.5, 2, 3] # 7 aspects
__C.TRAIN.RPN_SCALES = [2, 2.82842712, 4, 5.65685425, 8, 11.3137085, 16, 22.627417, 32, 45.254834] # 2**np.arange(1, 6, 0.5), 10 scales
#
# Testing options
#
__C.TEST = edict()
__C.TEST.IS_PATCH = False;
__C.TEST.VERTEX_REG = False
__C.TEST.VISUALIZE = False
# Scales to compute real features
__C.TEST.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# The number of scales per octave in the image pyramid
# An octave is the set of scales up to half of the initial scale
__C.TEST.NUM_PER_OCTAVE = 4
# Aspect ratio to use during testing
__C.TEST.ASPECTS = (1, 0.75, 0.5, 0.25)
# parameters for ROI generating
__C.TEST.SPATIAL_SCALE = 0.0625
__C.TEST.KERNEL_SIZE = 5
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.5
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Test using subclass
__C.TEST.SUBCLS = True
# Train using viewpoint
__C.TEST.VIEWPOINT = False
# Threshold of ROIs in testing
__C.TEST.ROI_THRESHOLD = 0.1
__C.TEST.ROI_THRESHOLD_NUM = 80000
__C.TEST.ROI_NUM = 2000
__C.TEST.DET_THRESHOLD = 0.0001
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# These are the values originally used for training VGG16
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net):
"""Return the directory where experimental artifacts are placed.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return osp.join(path, net.name)
def _add_more_info(is_train):
# compute all the scales
if is_train:
scales_base = __C.TRAIN.SCALES_BASE
num_per_octave = __C.TRAIN.NUM_PER_OCTAVE
else:
scales_base = __C.TEST.SCALES_BASE
num_per_octave = __C.TEST.NUM_PER_OCTAVE
num_scale_base = len(scales_base)
num = (num_scale_base - 1) * num_per_octave + 1
scales = []
for i in xrange(num):
index_scale_base = i / num_per_octave
sbase = scales_base[index_scale_base]
j = i % num_per_octave
if j == 0:
scales.append(sbase)
else:
sbase_next = scales_base[index_scale_base+1]
step = (sbase_next - sbase) / num_per_octave
scales.append(sbase + j * step)
if is_train:
__C.TRAIN.SCALES = scales
else:
__C.TEST.SCALES = scales
print scales
# map the scales to scales for RoI pooling of classification
if is_train:
kernel_size = __C.TRAIN.KERNEL_SIZE / __C.TRAIN.SPATIAL_SCALE
else:
kernel_size = __C.TEST.KERNEL_SIZE / __C.TEST.SPATIAL_SCALE
area = kernel_size * kernel_size
scales = np.array(scales)
areas = np.repeat(area, num) / (scales ** 2)
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)
if is_train:
__C.TRAIN.SCALE_MAPPING = levels
else:
__C.TEST.SCALE_MAPPING = levels
# compute width and height of grid box
if is_train:
area = __C.TRAIN.KERNEL_SIZE * __C.TRAIN.KERNEL_SIZE
aspect = __C.TRAIN.ASPECTS # height / width
else:
area = __C.TEST.KERNEL_SIZE * __C.TEST.KERNEL_SIZE
aspect = __C.TEST.ASPECTS # height / width
num_aspect = len(aspect)
widths = np.zeros((num_aspect), dtype=np.float32)
heights = np.zeros((num_aspect), dtype=np.float32)
for i in xrange(num_aspect):
widths[i] = math.sqrt(area / aspect[i])
heights[i] = widths[i] * aspect[i]
if is_train:
__C.TRAIN.ASPECT_WIDTHS = widths
__C.TRAIN.ASPECT_HEIGHTS = heights
__C.TRAIN.RPN_SCALES = np.array(__C.TRAIN.RPN_SCALES)
else:
__C.TEST.ASPECT_WIDTHS = widths
__C.TEST.ASPECT_HEIGHTS = heights
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
if type(b[k]) is not type(v):
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
_add_more_info(1)
_add_more_info(0)
| yuxng/Deep_ISM | ISM/lib/ism/config.py | Python | mit | 10,824 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-14 02:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Building',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('size', models.PositiveSmallIntegerField()),
],
),
migrations.CreateModel(
name='Floor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.PositiveSmallIntegerField()),
('number', models.PositiveSmallIntegerField()),
('building', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='floors', to='buildings.Building')),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.PositiveSmallIntegerField()),
('name', models.CharField(max_length=255)),
('floor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rooms', to='buildings.Floor')),
],
),
]
| foohooboo/graphql-cookiecutter | graphql_cookiecutter/buildings/migrations/0001_initial.py | Python | mit | 1,612 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class Paths(object):
"""Paths operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_boolean_true(
self, bool_path=False, custom_headers=None, raw=False, **operation_config):
"""
Get true Boolean value on path
:param bool_path: true boolean value
:type bool_path: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/bool/true/{boolPath}'
path_format_arguments = {
'boolPath': self._serialize.url("bool_path", bool_path, 'bool')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_boolean_false(
self, bool_path=False, custom_headers=None, raw=False, **operation_config):
"""
Get false Boolean value on path
:param bool_path: false boolean value
:type bool_path: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/bool/false/{boolPath}'
path_format_arguments = {
'boolPath': self._serialize.url("bool_path", bool_path, 'bool')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_one_million(
self, int_path=1000000, custom_headers=None, raw=False, **operation_config):
"""
Get '1000000' integer value
:param int_path: '1000000' integer value
:type int_path: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/int/1000000/{intPath}'
path_format_arguments = {
'intPath': self._serialize.url("int_path", int_path, 'int')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_negative_one_million(
self, int_path=-1000000, custom_headers=None, raw=False, **operation_config):
"""
Get '-1000000' integer value
:param int_path: '-1000000' integer value
:type int_path: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/int/-1000000/{intPath}'
path_format_arguments = {
'intPath': self._serialize.url("int_path", int_path, 'int')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_ten_billion(
self, long_path=10000000000, custom_headers=None, raw=False, **operation_config):
"""
Get '10000000000' 64 bit integer value
:param long_path: '10000000000' 64 bit integer value
:type long_path: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/long/10000000000/{longPath}'
path_format_arguments = {
'longPath': self._serialize.url("long_path", long_path, 'long')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_negative_ten_billion(
self, long_path=-10000000000, custom_headers=None, raw=False, **operation_config):
"""
Get '-10000000000' 64 bit integer value
:param long_path: '-10000000000' 64 bit integer value
:type long_path: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/long/-10000000000/{longPath}'
path_format_arguments = {
'longPath': self._serialize.url("long_path", long_path, 'long')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def float_scientific_positive(
self, float_path=1.034E+20, custom_headers=None, raw=False, **operation_config):
"""
Get '1.034E+20' numeric value
:param float_path: '1.034E+20'numeric value
:type float_path: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/float/1.034E+20/{floatPath}'
path_format_arguments = {
'floatPath': self._serialize.url("float_path", float_path, 'float')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def float_scientific_negative(
self, float_path=-1.034E-20, custom_headers=None, raw=False, **operation_config):
"""
Get '-1.034E-20' numeric value
:param float_path: '-1.034E-20'numeric value
:type float_path: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/float/-1.034E-20/{floatPath}'
path_format_arguments = {
'floatPath': self._serialize.url("float_path", float_path, 'float')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def double_decimal_positive(
self, double_path=9999999.999, custom_headers=None, raw=False, **operation_config):
"""
Get '9999999.999' numeric value
:param double_path: '9999999.999'numeric value
:type double_path: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/double/9999999.999/{doublePath}'
path_format_arguments = {
'doublePath': self._serialize.url("double_path", double_path, 'float')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def double_decimal_negative(
self, double_path=-9999999.999, custom_headers=None, raw=False, **operation_config):
"""
Get '-9999999.999' numeric value
:param double_path: '-9999999.999'numeric value
:type double_path: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/double/-9999999.999/{doublePath}'
path_format_arguments = {
'doublePath': self._serialize.url("double_path", double_path, 'float')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_unicode(
self, string_path="啊齄丂狛狜隣郎隣兀﨩", custom_headers=None, raw=False, **operation_config):
"""
Get '啊齄丂狛狜隣郎隣兀﨩' multi-byte string value
:param string_path: '啊齄丂狛狜隣郎隣兀﨩'multi-byte string value
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/unicode/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_url_encoded(
self, string_path="begin!*'();:@ &=+$,/?#[]end", custom_headers=None, raw=False, **operation_config):
"""
Get 'begin!*'();:@ &=+$,/?#[]end
:param string_path: 'begin!*'();:@ &=+$,/?#[]end' url encoded string
value
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_empty(
self, string_path="", custom_headers=None, raw=False, **operation_config):
"""
Get ''
:param string_path: '' string value
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/empty/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_null(
self, string_path, custom_headers=None, raw=False, **operation_config):
"""
Get null (should throw)
:param string_path: null string value
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/null/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enum_valid(
self, enum_path, custom_headers=None, raw=False, **operation_config):
"""
Get using uri with 'green color' in path parameter
:param enum_path: send the value green. Possible values include: 'red
color', 'green color', 'blue color'
:type enum_path: str or :class:`UriColor
<fixtures.acceptancetestsurl.models.UriColor>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/enum/green%20color/{enumPath}'
path_format_arguments = {
'enumPath': self._serialize.url("enum_path", enum_path, 'UriColor')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enum_null(
self, enum_path, custom_headers=None, raw=False, **operation_config):
"""
Get null (should throw on the client before the request is sent on
wire)
:param enum_path: send null should throw. Possible values include:
'red color', 'green color', 'blue color'
:type enum_path: str or :class:`UriColor
<fixtures.acceptancetestsurl.models.UriColor>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/null/{enumPath}'
path_format_arguments = {
'enumPath': self._serialize.url("enum_path", enum_path, 'UriColor')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_multi_byte(
self, byte_path, custom_headers=None, raw=False, **operation_config):
"""
Get '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array
:param byte_path: '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte
array
:type byte_path: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/byte/multibyte/{bytePath}'
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_empty(
self, byte_path=bytearray("", encoding="utf-8"), custom_headers=None, raw=False, **operation_config):
"""
Get '' as byte array
:param byte_path: '' as byte array
:type byte_path: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/byte/empty/{bytePath}'
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_null(
self, byte_path, custom_headers=None, raw=False, **operation_config):
"""
Get null as byte array (should throw)
:param byte_path: null as byte array (should throw)
:type byte_path: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/byte/null/{bytePath}'
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_valid(
self, date_path, custom_headers=None, raw=False, **operation_config):
"""
Get '2012-01-01' as date
:param date_path: '2012-01-01' as date
:type date_path: date
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/date/2012-01-01/{datePath}'
path_format_arguments = {
'datePath': self._serialize.url("date_path", date_path, 'date')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_null(
self, date_path, custom_headers=None, raw=False, **operation_config):
"""
Get null as date - this should throw or be unusable on the client
side, depending on date representation
:param date_path: null as date (should throw)
:type date_path: date
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/date/null/{datePath}'
path_format_arguments = {
'datePath': self._serialize.url("date_path", date_path, 'date')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_time_valid(
self, date_time_path, custom_headers=None, raw=False, **operation_config):
"""
Get '2012-01-01T01:01:01Z' as date-time
:param date_time_path: '2012-01-01T01:01:01Z' as date-time
:type date_time_path: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/datetime/2012-01-01T01%3A01%3A01Z/{dateTimePath}'
path_format_arguments = {
'dateTimePath': self._serialize.url("date_time_path", date_time_path, 'iso-8601')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_time_null(
self, date_time_path, custom_headers=None, raw=False, **operation_config):
"""
Get null as date-time, should be disallowed or throw depending on
representation of date-time
:param date_time_path: null as date-time
:type date_time_path: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/datetime/null/{dateTimePath}'
path_format_arguments = {
'dateTimePath': self._serialize.url("date_time_path", date_time_path, 'iso-8601')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def base64_url(
self, base64_url_path, custom_headers=None, raw=False, **operation_config):
"""
Get 'lorem' encoded value as 'bG9yZW0' (base64url)
:param base64_url_path: base64url encoded value
:type base64_url_path: bytes
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/string/bG9yZW0/{base64UrlPath}'
path_format_arguments = {
'base64UrlPath': self._serialize.url("base64_url_path", base64_url_path, 'base64')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def array_csv_in_path(
self, array_path, custom_headers=None, raw=False, **operation_config):
"""
Get an array of string ['ArrayPath1', 'begin!*'();:@ &=+$,/?#[]end' ,
null, ''] using the csv-array format
:param array_path: an array of string ['ArrayPath1', 'begin!*'();:@
&=+$,/?#[]end' , null, ''] using the csv-array format
:type array_path: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/array/ArrayPath1%2cbegin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend%2c%2c/{arrayPath}'
path_format_arguments = {
'arrayPath': self._serialize.url("array_path", array_path, '[str]', div=',')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def unix_time_url(
self, unix_time_url_path, custom_headers=None, raw=False, **operation_config):
"""
Get the date 2016-04-13 encoded value as '1460505600' (Unix time)
:param unix_time_url_path: Unix time encoded value
:type unix_time_url_path: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/paths/int/1460505600/{unixTimeUrlPath}'
path_format_arguments = {
'unixTimeUrlPath': self._serialize.url("unix_time_url_path", unix_time_url_path, 'unix-time')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| sharadagarwal/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Url/autoresturltestservice/operations/paths.py | Python | mit | 44,870 |
from pseudoregion import *
class Edge(PseudoRegion):
"""EDGE Fringe field and other kicks for hard-edged field models
1) edge type (A4) {SOL, DIP, HDIP, DIP3, QUAD, SQUA, SEX, BSOL, FACE}
2.1) model # (I) {1}
2.2-5) p1, p2, p3,p4 (R) model-dependent parameters
Edge type = SOL
p1: BS [T]
If the main solenoid field is B, use p1=-B for the entrance edge and p1=+B for the exit edge.
Edge type = DIP
p1: BY [T]
Edge type = HDIP
p1: BX [T]
Edge type = DIP3
p1: rotation angle [deg]
p2: BY0 [T]
p3: flag 1:in 2:out
Edge type = QUAD
p1: gradient [T/m]
Edge type = SQUA
p1: gradient [T/m]
Edge type = SEX
p1: b2 [T/m2] (cf. C. Wang & L. Teng, MC 207)
Edge type = BSOL
p1: BS [T]
p2: BY [T]
p3: 0 for entrance face, 1 for exit face
Edge type = FACE
This gives vertical focusing from rotated pole faces.
p1: pole face angle [deg]
p2: radius of curvature of reference particle [m]
p3: if not 0 => correct kick by factor 1/(1+delta)
p4: if not 0 ==> apply horizontal focus with strength = (-vertical strength)
If a FACE command is used before and after a sector dipole (DIP), you can approximate a rectangular dipole field.
The DIP, HDIP, QUAD, SQUA, SEX and BSOL edge types use Scott Berg's HRDEND routine to find the change in transverse
position and transverse momentum due to the fringe field.
"""
def __init__(
self,
edge_type,
model,
model_parameters_list,
name=None,
metadata=None):
PseudoRegion.__init__(self, name, metadata)
self.edge_type = edge_type
self.model = model
self.model_parameters = model_parameters
class Edge(Field):
"""
EDGE
1) edge type (A4) {SOL, DIP, HDIP,DIP3,QUAD,SQUA,SEX, BSOL,FACE}
2.1) model # (I) {1}
2.2-5) p1, p2, p3,p4 (R) model-dependent parameters
Edge type = SOL
p1: BS [T]
If the main solenoid field is B, use p1=-B for the entrance edge and p1=+B for the exit edge.
Edge type = DIP
p1: BY [T]
Edge type = HDIP
p1: BX [T]
Edge type = DIP3
p1: rotation angle [deg]
p2: BY0 [T]
p3: flag 1:in 2:out
Edge type = QUAD
p1: gradient [T/m]
Edge type = SQUA
p1: gradient [T/m]
Edge type = SEX
p1: b2 [T/m2] (cf. C. Wang & L. Teng, MC 207)
Edge type = BSOL
p1: BS [T]
p2: BY [T]
p3: 0 for entrance face, 1 for exit face
Edge type = FACE
This gives vertical focusing from rotated pole faces.
p1: pole face angle [deg]
p2: radius of curvature of reference particle [m]
p3: if not 0 => correct kick by the factor 1 / (1+δ)
p4: if not 0 => apply horizontal focus with strength = (-vertical strength)
If a FACE command is used before and after a sector dipole ( DIP ), you can approximate a rectangular dipole field.
The DIP, HDIP, QUAD, SQUA, SEX and BSOL edge types use Scott Berg’s HRDEND routine to find the change in
transverse position and transverse momentum due to the fringe field.
"""
begtag = 'EDGE'
endtag = ''
models = {
'model_descriptor': {
'desc': 'Name of model parameter descriptor',
'name': 'model',
'num_parms': 6,
'for001_format': {
'line_splits': [
1,
5]}},
'sol': {
'desc': 'Solenoid',
'doc': '',
'icool_model_name': 'SOL',
'parms': {
'model': {
'pos': 1,
'type': 'String',
'doc': ''},
'bs': {
'pos': 3,
'type': 'Real',
'doc': 'p1: BS [T] '
'If the main solenoid field is B, use p1=-B for the entrance edge and p1=+B for the '
'exit edge. (You can use this to get a tapered field profile)'}}},
}
def __init__(self, **kwargs):
Field.__init__(self, 'EDGE', kwargs)
def __call__(self, **kwargs):
Field.__call__(self, kwargs)
def __setattr__(self, name, value):
if name == 'ftag':
if value == 'EDGE':
object.__setattr__(self, name, value)
else:
# Should raise exception here
print '\n Illegal attempt to set incorrect ftag.\n'
else:
Field.__setattr__(self, name, value)
def __str__(self):
return Field.__str__(self)
def gen_fparm(self):
Field.gen_fparm(self) | jon2718/ipycool_2.0 | edge.py | Python | mit | 4,743 |
"""code_for_good URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^scraper/', include('scraper.urls')),
url(r'^admin/', admin.site.urls),
]
| Spferical/cure-alzheimers-fund-tracker | code_for_good/urls.py | Python | mit | 826 |
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^cards/', include('cards.urls')),
url(r'^tournaments/', include('tournaments.urls')),
url(r'^stats/', include('stats.urls'))
]
| thomasperrot/MTGTrader | mtg/config/urls.py | Python | mit | 915 |
from OpenGLCffi.GL import params
@params(api='gl', prms=['pname', 'index', 'val'])
def glGetMultisamplefvNV(pname, index, val):
pass
@params(api='gl', prms=['index', 'mask'])
def glSampleMaskIndexedNV(index, mask):
pass
@params(api='gl', prms=['target', 'renderbuffer'])
def glTexRenderbufferNV(target, renderbuffer):
pass
| cydenix/OpenGLCffi | OpenGLCffi/GL/EXT/NV/explicit_multisample.py | Python | mit | 332 |
import os, pickle, re, sys, rsa
from common.safeprint import safeprint
from common.call import parse
from multiprocessing import Lock
from hashlib import sha256
global bountyList
global bountyLock
global bountyPath
global masterKey
bountyList = []
bountyLock = Lock()
bounty_path = "data" + os.sep + "bounties.pickle"
masterKey = rsa.PublicKey(*pickle.load(open("master_public_key.pickle", "rb")))
def getUTC():
from calendar import timegm
from time import gmtime
return timegm(gmtime())
class Bounty(object):
"""An object representation of a Bounty
Parts:
ip -- The ip address of the requesting node
btc -- The Bitcoin address of the requesting party
reward -- The reward amount in satoshis to be given over 24 hours
(x == 0 or 1440 <= x <= 100000000) (1440 is 1 satoshi/min)
ident -- A value set by the issuer to help manage its related files
timeout -- Unix time at which the bounty expires (defaults to 24 hours)
data -- A dictionary containing optional, additional information
author -- String which represents the group providing the Bounty
reqs -- Dict containing requirements keyed by the related python call
("sys.platform":"win32")
perms -- Dict containing the minimum required security policies
(if empty, most restrictive assumed)
key -- A tuple which contains the RSA n and e values for this Bounty
(required only when reward is 0)
sig -- A Bytes object of str(Bounty) signed by the above key
(required only when reward is 0)
TDL -- More to be defined in later versions
"""
def __repr__(self):
"""Gives a string representation of the bounty"""
output = "<Bounty: ip=" + str(self.ip) + ", btc=" + str(self.btc) + ", reward=" + str(self.reward)
output = output + ", id=" + str(self.ident) + ", timeout=" + str(self.timeout)
output = output + ", author=" + str(self.data.get('author'))
if self.data.get('reqs') != {} and isinstance(self.data.get('reqs'), dict):
output = output + ", reqs=" + str(sorted(self.data.get('reqs').items(), key=lambda x: x[0]))
if self.data.get('perms') != {} and isinstance(self.data.get('perms'), dict):
output = output + ", perms=" + str(sorted(self.data.get('perms').items(), key=lambda x: x[0]))
return output + ">"
def __eq__(self, other):
"""Determines whether the bounties are equal"""
try:
return (self.reward == other.reward) and (self.ident == other.ident) and (self.data == other.data)
except:
return other is not None
def __ne__(self, other):
"""Determines whether the bounties are unequal"""
try:
return not self.__eq__(other)
except:
return other is None
def __lt__(self, other):
"""Determines whether this bounty has a lower priority"""
try:
if self.reward < other.reward:
return True
elif self.timeout < other.timeout:
return True
else:
return False
except:
return other is not None
def __gt__(self, other):
"""Determines whether this bounty has a higher priority"""
try:
if self.reward > other.reward:
return True
elif self.timeout > other.timeout:
return True
else:
return False
except:
return other is None
def __le__(self, other):
"""Determines whether this bounty has a lower priority or is equal"""
boolean = self.__lt__(other)
if boolean:
return boolean
else:
return self.__eq__(other)
def __ge__(self, other):
"""Determines whether this bounty has a higher or is equal"""
boolean = self.__gt__(other)
if boolean:
return boolean
else:
return self.__eq__(other)
def __hash__(self):
return hash((self.__repr__(), str(self.data)))
def __init__(self, ipAddress, btcAddress, rewardAmount, **kargs):
"""Initialize a Bounty; constructor"""
self.ip = ipAddress
self.btc = btcAddress
self.reward = rewardAmount
self.ident = ''
if kargs.get('timeout') is not None:
self.timeout = kargs.get('timeout')
else:
self.timeout = getUTC() + 86400
self.data = {'author': '',
'reqs': {},
'perms': {}}
if kargs.get('ident') is not None:
self.ident = kargs.get('ident')
if kargs.get('dataDict') is not None:
self.data.update(kargs.get('dataDict'))
if kargs.get('keypair') is not None:
self.sign(kargs.get('keypair'))
def isValid(self):
"""Internal method which checks the Bounty as valid in the most minimal version
ip -- Must be in valid range
btc -- Must be in valid namespace
reward -- Must be in valid range
timeout -- Must be greater than the current time
"""
try:
safeprint("Testing IP address", verbosity=1)
if not checkIPAddressValid(self.ip):
return False
safeprint("Testing Bitcoin address", verbosity=1)
# The following is a soft check
# A deeper check will be needed in order to assure this is correct
if not checkBTCAddressValid(self.btc):
return False
safeprint("Testing reward and/or signiture validity", verbosity=1)
if self.reward not in range(1440, 100000001) or (not self.reward and self.checkSign()):
return False
safeprint("Testing timeout", verbosity=1)
if self.timeout < getUTC(): # check against current UTC
return False
safeprint("Testing bounty requirements", verbosity=1)
if parse(self.data.get('reqs')):
return 1
return -1
except:
return False
def isPayable(self, factor):
"""check if address has enough"""
return True # later make this a wrapper for pywallet.balance()
def checkSign(self):
"""check if the signature attatched to the Bounty is valid"""
try:
from rsa import verify, PublicKey
safeprint(keyList)
if self.data.get('cert'): # where key = (PublicKey.n, PublicKey.e)
expected = str(self).encode('utf-8')
data = self.data
n = data.get('key')[0]
e = data.get('key')[1]
if rsa.verify(str((n, e)).encode('utf-8'), data.get('cert'), masterKey):
return verify(expected, data.get('sig'), PublicKey(n, e))
return False
except:
return False
def sign(self, privateKey): # where privateKey is a private key generated by rsa.PrivateKey()
"""Signa bounty and attach the key value"""
try:
from rsa import sign
expected = str(self).encode('utf-8')
self.data.update({'key': (privateKey.n, privateKey.e),
'sig': sign(expected, privateKey, 'SHA-256')})
except:
return False
def checkBTCAddressValid(address):
"""Check to see if a Bitcoin address is within the valid namespace. Will potentially give false positives based on leading 1s"""
if not re.match(re.compile("^[a-km-zA-HJ-Z1-9]{26,35}$"), address):
return False
decimal = 0
for char in address:
decimal = decimal * 58 + '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'.index(char)
bcbytes = ""
if sys.version_info[0] < 3:
"""long does not have a to_bytes() in versions less than 3. This is an equivalent function"""
bcbytes = (('%%0%dx' % (25 << 1) % decimal).decode('hex')[-25:])
else:
bcbytes = decimal.to_bytes(25, 'big')
return bcbytes[-4:] == sha256(sha256(bcbytes[:-4]).digest()).digest()[:4]
def checkIPAddressValid(address):
try:
import socket
socket.getaddrinfo(*address)
a = len(address[0].split(":")) == 1 # Make sure it's not ipv6
b = len(address[0].split(".")) == 4 # Make sure it's not shortened
return a and b and address[1] in range(49152)
except:
return False
def verify(string):
"""External method which checks the Bounty as valid under implementation-specific requirements. This can be defined per user.
ip -- Must be in valid range
btc -- Must be in valid namespace
reward -- Must be in valid range
timeout -- Must be greater than the current time
"""
test = depickle(string)
try:
safeprint("Testing IP address", verbosity=1)
if not checkIPAddressValid(test.ip):
return False
safeprint("Testing Bitcoin address", verbosity=1)
# The following is a soft check
# A deeper check will be needed in order to assure this is correct
if not checkBTCAddressValid(test.btc):
return False
safeprint("Testing reward and/or signiture validity", verbosity=1)
if test.reward not in range(1440, 100000001) or (not test.reward and test.checkSign()):
return False
safeprint("Testing timeout", verbosity=1)
if test.timeout < getUTC(): # check against current UTC
return False
safeprint("Testing bounty requirements", verbosity=1)
if parse(test.data.get('reqs')):
return 1
return -1
except:
return False
def getBountyList():
"""Retrieves the bounty list. Temporary method"""
temp = []
with bountyLock:
temp = bountyList[:]
return temp
def saveToFile():
"""Save the current bounty list to a file"""
if not os.path.exists(bounty_path.split(os.sep)[0]):
os.mkdir(bounty_path.split(os.sep)[0])
pickle.dump(getBountyList(), open(bounty_path, "wb"), 0)
return True
def loadFromFile():
"""Load a previous bounty list from a file"""
if os.path.exists(bounty_path):
with bountyLock:
try:
safeprint("Loading bounty list from file", verbosity=2)
templist = pickle.load(open(bounty_path, "rb"))
safeprint(addBounties(templist), verbosity=3)
safeprint("Bounty list loaded and added", verbosity=2)
except:
return False
return True
return False
def depickle(string):
"""Handles the potential errors in unpickling a bounty"""
if isinstance(string, Bounty):
return string
safeprint([sys.version_info[0], sys.version_info[1], sys.version_info[2]])
if sys.version_info[0] == 2 and sys.version_info[1] == 6 and (isinstance(string, str) or isinstance(string, unicode)):
safeprint("Fed as string in 2.6; encoding ascii and ignoring errors")
try:
string = string.encode('ascii', 'ignore')
except:
string = str(string)
elif isinstance(string, str) and sys.version_info[0] >= 3:
safeprint("Fed as string; encoding utf-8")
string = string.encode('utf-8')
try:
return pickle.loads(string)
except:
return None
def addBounty(bounty):
"""Verify a bounty, and add it to the list if it is valid"""
bounty = depickle(bounty)
safeprint("External verify", verbosity=1)
first = verify(bounty)
safeprint("Internal verify")
second = bounty.isValid()
if not second:
rval = -3
elif not first:
rval = -2
elif bounty in getBountyList():
rval = -1
elif second == -1:
rval = 0
else:
rval = 1
addValidBounty(bounty)
return rval
def addValidBounty(bounty):
"""This adds a bounty to the list under the assumption that it's already been validated. Must be of type common.bounty.Bounty"""
with bountyLock:
global bountyList
bountyList.append(bounty)
temp = list(set(bountyList)) # trim it in the simplest way possible. Doesn't protect against malleability
del bountyList[:]
bountyList.extend(temp)
def internalVerify(bounty): # pragma: no cover
"""Proxy for the Bounty.isValid() method, for multiprocessing.Pool"""
return bounty.isValid()
def addBounties(bounties):
"""Add a list of bounties in parallel using multiprocessing.Pool for verification"""
from multiprocessing.pool import ThreadPool
pool = ThreadPool()
safeprint("Mapping verifications", verbosity=3)
async = pool.map_async(verify, bounties) # defer this for possible efficiency boost
internal = pool.map(internalVerify, bounties)
safeprint("Waiting for verifications", verbosity=3)
external = async.get()
safeprint("Received verifications", verbosity=3)
rvals = []
safeprint(internal)
safeprint(external)
for i in range(len(bounties)):
safeprint("Finishing the processing of bounty " + str(i+1) + "/" + str(len(bounties)), verbosity=2)
if not internal[i]:
rvals.append(-3)
elif not external[i]:
rvals.append(-2)
elif bounties[i] in bountyList:
rvals.append(-1)
elif internal[i] == -1:
rvals.append(0)
else:
rvals.append(1)
addValidBounty(bounties[i])
safeprint("Passed first if", verbosity=3)
safeprint("Verifications parsed", verbosity=3)
return rvals
def getBounty(charity, factor):
"""Retrieve the next best bounty from the list"""
global bountyList, bountyLock
best = None
bountyLock.__enter__()
temp = bountyList[:]
safeprint("bountyList = " + str(temp), verbosity=3)
for bounty in temp:
if not bounty.timeout < getUTC():
bountyList.remove(bounty)
continue
elif charity:
best = bounty
break
elif bounty > best:
best = bounty
bountyLock.__exit__()
return best
| gappleto97/Senior-Project | common/bounty.py | Python | mit | 14,300 |
from collections import Counter
import sys
import numpy as np
import scipy as sp
from lexical_structure import WordEmbeddingDict
import dense_feature_functions as df
def _get_word2vec_ff(embedding_path, projection):
word2vec = df.EmbeddingFeaturizer(embedding_path)
if projection == 'mean_pool':
return word2vec.mean_args
elif projection == 'sum_pool':
return word2vec.additive_args
elif projection == 'max_pool':
return word2vec.max_args
elif projection == 'top':
return word2vec.top_args
else:
raise ValueError('projection must be one of {mean_pool, sum_pool, max_pool, top}. Got %s ' % projection)
def _get_zh_word2vec_ff(num_units, vec_type, projection, cdtb):
prefix = 'zh_gigaword3'
if cdtb:
file_name = '/data/word_embeddings/%s-%s%s-cdtb_vocab.txt' \
% (prefix, vec_type, num_units)
else:
file_name = '/data/word_embeddings/%s-%s%s.txt' \
% (prefix, vec_type, num_units)
word2vec = df.EmbeddingFeaturizer(file_name)
if projection == 'mean_pool':
return word2vec.mean_args
elif projection == 'sum_pool':
return word2vec.additive_args
elif projection == 'max_pool':
return word2vec.max_args
elif projection == 'top':
return word2vec.top_args
else:
raise ValueError('projection must be one of {mean_pool, sum_pool, max_pool, top}. Got %s ' % projection)
def _sparse_featurize_relation_list(relation_list, ff_list, alphabet=None):
if alphabet is None:
alphabet = {}
grow_alphabet = True
else:
grow_alphabet = False
feature_vectors = []
print 'Applying feature functions...'
for relation in relation_list:
feature_vector_indices = []
for ff in ff_list:
feature_vector = ff(relation)
for f in feature_vector:
if grow_alphabet and f not in alphabet:
alphabet[f] = len(alphabet)
if f in alphabet:
feature_vector_indices.append(alphabet[f])
feature_vectors.append(feature_vector_indices)
print 'Creating feature sparse matrix...'
feature_matrix = sp.sparse.lil_matrix((len(relation_list), len(alphabet)))
for i, fv in enumerate(feature_vectors):
feature_matrix[i, fv] = 1
return feature_matrix.tocsr(), alphabet
def sparse_featurize(relation_list_list, ff_list):
print 'Featurizing...'
data_list = []
alphabet = None
for relation_list in relation_list_list:
data, alphabet = _sparse_featurize_relation_list(relation_list, ff_list, alphabet)
data_list.append(data)
return (data_list, alphabet)
def convert_seconds_to_hours(num_seconds):
m, s = divmod(num_seconds, 60)
h, m = divmod(m, 60)
return (h, m, s)
def compute_mi(feature_matrix, label_vector):
"""Compute mutual information of each feature
"""
num_labels = np.max(label_vector) + 1
num_features = feature_matrix.shape[1]
num_rows = feature_matrix.shape[0]
total = num_rows + num_labels
c_y = np.zeros(num_labels)
for l in label_vector:
c_y[l] += 1.0
c_y += 1.0
c_x_y = np.zeros((num_features, num_labels))
c_x = np.zeros(num_features)
for i in range(num_rows):
c_x_y[:, label_vector[i]] += feature_matrix[i, :]
c_x += feature_matrix[i, :]
c_x_y += 1.0
c_x += 1.0
c_x_c_y = np.outer(c_x, c_y)
c_not_x_c_y = np.outer((total - c_x), c_y)
c_not_x_y = c_y - c_x_y
inner = c_x_y / total * np.log(c_x_y * total / c_x_c_y) + \
c_not_x_y / total * np.log(c_not_x_y * total / c_not_x_c_y)
mi_x = inner.sum(1)
return mi_x
def prune_feature_matrices(feature_matrices, mi, num_features):
sorted_indices = mi.argsort()[-num_features:]
return [x[:, sorted_indices] for x in feature_matrices]
class BrownDictionary(object):
def __init__(self):
self.word_to_brown_mapping = {}
self.num_clusters = 0
brown_cluster_file_name = 'brown-rcv1.clean.tokenized-CoNLL03.txt-c3200-freq1.txt'
#brown_cluster_file_name = 'brown-rcv1.clean.tokenized-CoNLL03.txt-c320-freq1.txt'
#brown_cluster_file_name = 'brown-rcv1.clean.tokenized-CoNLL03.txt-c100-freq1.txt'
self._load_brown_clusters('resources/%s' % brown_cluster_file_name)
def _load_brown_clusters(self, path):
try:
lexicon_file = open(path)
except:
print 'fail to load brown cluster data'
cluster_set = set()
for line in lexicon_file:
cluster_assn, word, _ = line.split('\t')
if cluster_assn not in cluster_set:
cluster_set.add(cluster_assn)
self.word_to_brown_mapping[word] = len(cluster_set) - 1
self.num_clusters = len(cluster_set)
def _get_brown_cluster_bag(self, tokens):
bag = set()
for token in tokens:
if token in self.word_to_brown_mapping:
cluster_assn = self.word_to_brown_mapping[token]
if cluster_assn not in bag:
bag.add(cluster_assn)
return bag
def get_brown_sparse_matrices_relations(self, relations):
X1 = sp.sparse.csr_matrix((len(relations), self.num_clusters),dtype=float)
X2 = sp.sparse.csr_matrix((len(relations), self.num_clusters),dtype=float)
for i, relation in enumerate(relations):
bag1 = self._get_brown_cluster_bag(relation.arg_tokens(1))
for cluster in bag1:
X1[i, cluster] = 1.0
bag2 = self._get_brown_cluster_bag(relation.arg_tokens(2))
for cluster in bag2:
X2[i, cluster] = 1.0
return (X1, X2)
def get_brown_matrices_data(self, relation_list_list, use_sparse):
"""Extract sparse
For each directory, returns
(X1, X2, Y)
X1 and X2 are sparse matrices from arg1 and arg2 respectively.
Y is an integer vector of type int32
"""
data = []
alphabet = None
# load the data
for relation_list in relation_list_list:
# turn them into a data matrix
print 'Making matrices'
X1, X2 = self.get_brown_sparse_matrices_relations(relation_list)
if not use_sparse:
X1 = X1.toarray()
X2 = X2.toarray()
Y, alphabet = level2_labels(relation_list, alphabet)
data.append((X1, X2, Y))
return (data, alphabet)
def label_vectorize(relation_list_list, lf):
alphabet = {}
for i, valid_label in enumerate(lf.valid_labels()):
alphabet[valid_label] = i
label_vectors = []
for relation_list in relation_list_list:
label_vector = [alphabet[lf.label(x)] for x in relation_list]
label_vectors.append(np.array(label_vector, np.int64))
return label_vectors, alphabet
def compute_baseline_acc(label_vector):
label_counter = Counter()
for label in label_vector:
label_counter[label] += 1.0
_, freq = label_counter.most_common(1)[0]
return round(freq / len(label_vector), 4)
def convert_level2_labels(relations):
# TODO: this is not enough because we have to exclude some tinay classes
new_relation_list = []
for relation in relations:
split_sense = relation.senses[0].split('.')
if len(split_sense) >= 2:
relation.relation_dict['Sense']= ['.'.join(split_sense[0:2])]
new_relation_list.append(relation)
return new_relation_list
def level2_labels(relations, alphabet=None):
if alphabet is None:
alphabet = {}
label_set = set()
for relation in relations:
label_set.add(relation.senses[0])
print label_set
sorted_label = sorted(list(label_set))
for i, label in enumerate(sorted_label):
alphabet[label] = i
label_vector = []
for relation in relations:
if relation.senses[0] not in alphabet:
alphabet[relation.senses[0]] = len(alphabet)
label_vector.append(alphabet[relation.senses[0]])
return np.array(label_vector, np.int64), alphabet
def get_wbm(num_units):
if num_units == 50:
dict_file = '/data/word_embeddings/wsj-skipgram50.npy'
vocab_file = '/data/word_embeddings/wsj-skipgram50_vocab.txt'
elif num_units == 100:
dict_file = '/data/word_embeddings/wsj-skipgram100.npy'
vocab_file = '/data/word_embeddings/wsj-skipgram100_vocab.txt'
elif num_units == 300:
#dict_file = '/home/j/llc/tet/nlp/lib/lexicon/google_word_vector/GoogleNews-vectors-negative300.npy'
dict_file = \
'/home/j/llc/tet/nlp/lib/lexicon/google_word_vector/GoogleNews-vectors-negative300-wsj_vocab.npy'
vocab_file = \
'/home/j/llc/tet/nlp/lib/lexicon/google_word_vector/GoogleNews-vectors.negative300-wsj_vocab-vocab.txt'
else:
# this will crash the next step and te's too lazy to make it throw an exception.
dict_file = None
vocab_file = None
wbm = WordEmbeddingDict(dict_file, vocab_file)
return wbm
def get_zh_wbm(num_units):
dict_file = '/data/word_embeddings/zh_gigaword3-skipgram%s-cdtb_vocab.npy' % num_units
vocab_file = '/data/word_embeddings/zh_gigaword3-skipgram%s-cdtb_vocab-vocab.txt' % num_units
return WordEmbeddingDict(dict_file, vocab_file)
def set_logger(file_name, dry_mode=False):
if not dry_mode:
sys.stdout = open('%s.log' % file_name, 'w', 1)
json_file = open('%s.json' % file_name, 'w', 1)
return json_file
import base_label_functions as l
from nets.learning import DataTriplet
from data_reader import extract_implicit_relations
from lstm import prep_serrated_matrix_relations
def get_data_srm(dir_list, wbm, max_length=75):
sense_lf = l.SecondLevelLabel()
relation_list_list = [extract_implicit_relations(dir, sense_lf)
for dir in dir_list]
data_list = []
for relation_list in relation_list_list:
data = prep_serrated_matrix_relations(relation_list, wbm, max_length)
data_list.append(data)
label_vectors, label_alphabet = \
label_vectorize(relation_list_list, sense_lf)
data_triplet = DataTriplet(
data_list, [[x] for x in label_vectors], [label_alphabet])
return data_triplet
def make_givens_srm(givens, input_vec, T_training_data,
output_vec, T_training_data_label, start_idx, end_idx):
"""Make the 'given' dict for SGD training for discourse
the input vecs should be [X1 mask1 X2 mask2]
X1 and X2 are TxNxd serrated matrices.
"""
# first arg embedding and mask
givens[input_vec[0]] = T_training_data[0][:,start_idx:end_idx, :]
givens[input_vec[1]] = T_training_data[1][:,start_idx:end_idx]
# second arg embedding and mask
givens[input_vec[2]] = T_training_data[2][:,start_idx:end_idx, :]
givens[input_vec[3]] = T_training_data[3][:,start_idx:end_idx]
for i, output_var in enumerate(output_vec):
givens[output_var] = T_training_data_label[i][start_idx:end_idx]
def make_givens(givens, input_vec, T_training_data,
output_vec, T_training_data_label, start_idx, end_idx):
"""Make the 'given' dict for SGD training for discourse
the input vecs should be [X1 X2]
X1 and X2 are Nxd matrices.
"""
# first arg embedding and mask
givens[input_vec[0]] = T_training_data[0][start_idx:end_idx]
givens[input_vec[1]] = T_training_data[1][start_idx:end_idx]
for i, output_var in enumerate(output_vec):
givens[output_var] = T_training_data_label[i][start_idx:end_idx]
if __name__ == '__main__':
fm = np.array([ [1, 0, 1],
[1, 0, 0],
[0, 0, 0],
[0, 1, 1],
[0, 1, 0],
[0, 0, 0]])
lv = np.array([0,0,0,1,1,1])
compute_mi(fm, lv)
| jimmycallin/master-thesis | architectures/nn_discourse_parser/nets/util.py | Python | mit | 11,972 |
import numpy as np
import numpy.linalg as la
import collections
import IPython
import tensorflow as tf
from utils import *
import time
from collections import defaultdict
class PolicyGradient(Utils):
"""
Calculates policy gradient
for given input state/actions.
Users should primarily be calling main
PolicyGradient class methods.
"""
def __init__(self, net_dims, filepath=None, q_net_dims=None, output_function=None, seed=0, seed_state=None):
"""
Initializes PolicyGradient class.
Parameters:
net_dims: array-like
1D list corresponding to dimensions
of each layer in the net.
output_function: string
Non-linearity function applied to output of
neural network.
Options are: 'tanh', 'sigmoid', 'relu', 'softmax'.
"""
self.q_dict = defaultdict(lambda: defaultdict(float))
self.prev_weight_grad = self.prev_bias_grad = self.prev_weight_update_vals = \
self.prev_bias_update_vals = self.prev_weight_inverse_hess = self.prev_bias_inverse_hess = \
self.total_weight_grad = self.total_bias_grad = None
self.init_action_neural_net(net_dims, output_function, filepath)
if seed_state is not None:
np.random.set_state(seed_state)
tf.set_random_seed(seed)
def train_agent(self, dynamics_func, reward_func, update_method, initial_state, num_iters, batch_size, traj_len, step_size=0.1, momentum=0.5, normalize=True):
"""
Trains agent using input dynamics and rewards functions.
Parameters:
dynamics_func: function
User-provided function that takes in
a state and action, and returns the next state.
reward_func: function
User-provided function that takes in
a state and action, and returns the associated reward.
initial_state: array-like
Initial state that each trajectory starts at.
Must be 1-dimensional NumPy array.
num_iters: int
Number of iterations to run gradient updates.
batch_size: int
Number of trajectories to run in a single iteration.
traj_len: int
Number of state-action pairs in a trajectory.
Output:
mean_rewards: array-like
Mean ending rewards of all iterations.
"""
mean_rewards = []
ending_states = []
for i in range(num_iters):
traj_states = []
traj_actions = []
rewards = []
for j in range(batch_size):
states = []
actions = []
curr_rewards = []
curr_state = initial_state
# Rolls out single trajectory
for k in range(traj_len):
# Get action from learner
curr_action = self.get_action(curr_state)
# Update values
states.append(curr_state)
curr_rewards.append(reward_func(curr_state, curr_action))
actions.append(curr_action)
# Update state
curr_state = dynamics_func(curr_state, curr_action)
# Append trajectory/rewards
traj_states.append(states)
traj_actions.append(actions)
rewards.append(curr_rewards)
# Apply policy gradient iteration
self.gradient_update(np.array(traj_states), np.array(traj_actions), np.array(rewards), \
update_method, step_size, momentum, normalize)
mean_rewards.append(np.mean([np.sum(reward_list) for reward_list in rewards]))
ending_states.append([traj[-1] for traj in traj_states])
return np.array(mean_rewards), ending_states
def gradient_update(self, traj_states, traj_actions, rewards, update_method='sgd', step_size=1.0, momentum=0.5, normalize=True):
"""
Estimates and applies gradient update according to a policy.
States, actions, rewards must be lists of lists; first dimension indexes
the ith trajectory, second dimension indexes the jth state-action-reward of that
trajectory.
Parameters:
traj_states: array-like
List of list of states.
traj_actions: array-like
List of list of actions.
rewards: array-like
List of list of rewards.
step_size: float
Step size.
momentum: float
Momentum value.
normalize: boolean
Determines whether to normalize gradient update.
Recommended if running into NaN/infinite value errors.
"""
assert update_method in ['sgd', 'momentum', 'lbfgs', 'adagrad', 'rmsprop', 'adam']
# Calculate updates and create update pairs
curr_weight_grad = 0
curr_bias_grad = 0
curr_weight_update_vals = []
curr_bias_update_vals = []
curr_weight_inverse_hess = []
curr_bias_inverse_hess = []
iters = traj_states.shape[0]
q_vals = self.estimate_q(traj_states, traj_actions, rewards)
assert traj_states.shape[0] == traj_actions.shape[0] == rewards.shape[0]
assert q_vals.shape[0] == iters
# Update for each example
for i in range(iters):
# Estimate q-values and extract gradients
curr_traj_states = traj_states[i]
curr_traj_actions = traj_actions[i]
curr_q_val_list = q_vals[i]
curr_traj_states = curr_traj_states.reshape(curr_traj_states.shape[0], curr_traj_states.shape[1] * curr_traj_states.shape[2])
curr_traj_actions = curr_traj_actions.reshape(curr_traj_actions.shape[0], curr_traj_actions.shape[1] * curr_traj_actions.shape[2])
curr_q_val_list = curr_q_val_list.reshape(curr_q_val_list.shape[0], 1)
curr_weight_grad_vals = self.sess.run(self.weight_grads, \
feed_dict={self.input_state: curr_traj_states, self.observed_action: curr_traj_actions, self.q_val: curr_q_val_list})
curr_bias_grad_vals = self.sess.run(self.bias_grads, \
feed_dict={self.input_state: curr_traj_states, self.observed_action: curr_traj_actions, self.q_val: curr_q_val_list})
curr_weight_grad += np.array(curr_weight_grad_vals) / np.float(iters)
curr_bias_grad += np.array(curr_bias_grad_vals) / np.float(iters)
# Update weights
for j in range(len(self.weights)):
if update_method == 'sgd':
update_val = step_size * curr_weight_grad[j]
elif update_method == 'momentum':
if self.prev_weight_grad is None:
update_val = step_size * curr_weight_grad[j]
else:
update_val = momentum * self.prev_weight_grad[j] + step_size * curr_weight_grad[j]
elif update_method == 'lbfgs':
if self.prev_weight_inverse_hess is None:
curr_inverse_hess = np.eye(curr_weight_grad[j].shape[0])
update_val = curr_weight_grad[j]
else:
update_val, curr_inverse_hess = \
self.bfgs_update(self.prev_inverse_hess[j], self.prev_update_val[j], self.prev_weight_grad[j], update_val)
update_val = update_val * step_size
curr_weight_inverse_hess.append(curr_inverse_hess)
elif update_method == 'adagrad':
if self.total_weight_grad is None:
self.total_weight_grad = curr_weight_grad
else:
self.total_weight_grad[j] += np.square(curr_weight_grad[j])
update_val = step_size * curr_weight_grad[j] / (np.sqrt(np.abs(self.total_weight_grad[j])) + 1e-8)
elif update_method == 'rmsprop':
decay = 0.99
if self.total_weight_grad is None:
self.total_weight_grad = curr_weight_grad
else:
self.total_weight_grad[j] = decay * self.total_weight_grad[j] + (1 - decay) * np.square(curr_weight_grad[j])
update_val = step_size * curr_weight_grad[j] / (np.sqrt(np.abs(self.total_weight_grad[j])) + 1e-8)
elif update_method == 'adam':
beta1, beta2 = 0.9, 0.999
if self.total_weight_grad is None:
self.total_weight_grad = curr_weight_grad
self.total_sq_weight_grad = np.square(curr_weight_grad)
else:
self.total_weight_grad[j] = beta1 * self.total_weight_grad[j] + (1 - beta1) * curr_weight_grad[j]
self.total_sq_weight_grad[j] = beta2 * self.total_sq_weight_grad[j] + (1 - beta2) * np.sqrt(np.abs(self.total_weight_grad[j]))
update_val = np.divide(step_size * self.total_weight_grad[j], (np.sqrt(np.abs(self.total_sq_weight_grad[j])) + 1e-8))
if normalize:
norm = la.norm(update_val)
if norm != 0:
update_val = update_val / norm
curr_weight_update_vals.append(update_val)
update = tf.assign(self.weights[j], self.weights[j] + update_val)
self.sess.run(update)
# Update biases
for j in range(len(self.biases)):
if update_method == 'sgd':
update_val = step_size * curr_bias_grad[j]
elif update_method == 'momentum':
if self.prev_bias_grad is None:
update_val = step_size * curr_bias_grad[j]
else:
update_val = momentum * self.prev_bias_grad[j] + step_size * curr_bias_grad[j]
elif update_method == 'lbfgs':
if self.prev_bias_inverse_hess is None:
curr_inverse_hess = np.eye(curr_bias_grad[j].shape[0])
update_val = curr_bias_grad[j]
else:
update_val, curr_inverse_hess = \
self.bfgs_update(self.prev_inverse_hess[j], self.prev_update_val[j], self.prev_bias_grad[j], update_val)
update_val = update_val * step_size
curr_bias_inverse_hess.append(curr_inverse_hess)
elif update_method == 'adagrad':
if self.total_bias_grad is None:
self.total_bias_grad = curr_bias_grad
else:
self.total_bias_grad[j] += np.square(curr_bias_grad[j])
update_val = step_size * curr_bias_grad[j] / (np.sqrt(np.abs(self.total_bias_grad[j])) + 1e-8)
elif update_method == 'rmsprop':
decay = 0.99
if self.total_bias_grad is None:
self.total_bias_grad = curr_bias_grad
else:
self.total_bias_grad[j] = decay * self.total_bias_grad[j] + (1 - decay) * np.square(curr_bias_grad[j])
update_val = step_size * curr_bias_grad[j] / (np.sqrt(np.abs(self.total_bias_grad[j])) + 1e-8)
elif update_method == 'adam':
beta1, beta2 = 0.9, 0.999
if self.total_bias_grad is None:
self.total_bias_grad = curr_bias_grad
self.total_sq_bias_grad = np.square(curr_bias_grad)
else:
self.total_bias_grad[j] = beta1 * self.total_bias_grad[j] + (1 - beta1) * curr_bias_grad[j]
self.total_sq_bias_grad[j] = beta2 * self.total_sq_bias_grad[j] + (1 - beta2) * np.sqrt(np.abs(self.total_bias_grad[j]))
update_val = np.divide(step_size * self.total_bias_grad[j], (np.sqrt(np.abs(self.total_sq_bias_grad[j])) + 1e-8))
if normalize:
norm = la.norm(update_val)
if norm != 0:
update_val = update_val / norm
curr_bias_update_vals.append(update_val)
update = tf.assign(self.biases[j], self.biases[j] + update_val)
self.sess.run(update)
self.prev_weight_grad = curr_weight_grad
self.prev_bias_grad = curr_bias_grad
self.prev_weight_update_vals = curr_weight_update_vals
self.prev_bias_update_vals = curr_weight_update_vals
def get_action(self, state):
"""
Returns action based on input state.
Input:
state: array-like
Input state.
Output:
action: array-like
Predicted action.
"""
state = state.T
curr_output_mean = self.sess.run(self.output_mean, feed_dict={self.input_state: state})
action = self.meanstd_sample(curr_output_mean)
return action
| WesleyHsieh/policy_gradient | policy_gradient/policy_gradient.py | Python | mit | 10,569 |
# -*- coding: utf8 -*-
"""
The ``queue` utils
==================
Some operation will require a queue. This utils file
"""
__author__ = 'Salas'
__copyright__ = 'Copyright 2014 LTL'
__credits__ = ['Salas']
__license__ = 'MIT'
__version__ = '0.2.0'
__maintainer__ = 'Salas'
__email__ = '[email protected]'
__status__ = 'Pre-Alpha' | salas106/irc-ltl-framework | utils/queue.py | Python | mit | 352 |
'''
evaluate result
'''
from keras.models import load_model
from keras.utils import np_utils
import numpy as np
import os
import sys
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
# input sentence dimensions
step_length = conf.ner_step_length
pos_length = conf.ner_pos_length
chunk_length = conf.ner_chunk_length
# gazetteer_length = conf.gazetteer_length
IOB = conf.ner_BIOES_decode
data = sys.argv[1]
best_epoch = sys.argv[2]
if data=="dev":
test_data = load_data.load_ner(dataset='eng.testa', form='BIOES')
elif data == "test":
test_data = load_data.load_ner(dataset='eng.testb', form='BIOES')
tokens = [len(x[0]) for x in test_data]
print(sum(tokens))
print('%s shape:'%data, len(test_data))
model_name = os.path.basename(__file__)[9:-3]
folder_path = './model/%s'%model_name
model_path = '%s/model_epoch_%s.h5'%(folder_path, best_epoch)
result = open('%s/predict.txt'%folder_path, 'w')
def convert(chunktags):
# convert BIOES to BIO
for p, q in enumerate(chunktags):
if q.startswith("E-"):
chunktags[p] = "I-" + q[2:]
elif q.startswith("S-"):
if p==0:
chunktags[p] = "I-" + q[2:]
elif q[2:]==chunktags[p-1][2:]:
chunktags[p] = "B-" + q[2:]
elif q[2:]!=chunktags[p-1][2:]:
chunktags[p] = "I-" + q[2:]
elif q.startswith("B-"):
if p==0:
chunktags[p] = "I-" + q[2:]
else:
if q[2:]!=chunktags[p-1][2:]:
chunktags[p] = "I-" + q[2:]
return chunktags
print('loading model...')
model = load_model(model_path)
print('loading model finished.')
for each in test_data:
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=[each], gram='bi', form='BIOES')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
prob = model.predict_on_batch([embed_index, hash_index, pos, chunk])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
chunktags = [IOB[j] for j in predict_label][:l]
word_pos_chunk = list(zip(*each))
# convert
word_pos_chunk = list(zip(*word_pos_chunk))
word_pos_chunk = [list(x) for x in word_pos_chunk]
# if data == "test":
# word_pos_chunk[3] = convert(word_pos_chunk[3])
word_pos_chunk = list(zip(*word_pos_chunk))
#convert
# if data == "test":
# chunktags = convert(chunktags)
# chunktags = prepare.gazetteer_lookup(each[0], chunktags, data)
for ind, chunktag in enumerate(chunktags):
result.write(' '.join(word_pos_chunk[ind])+' '+chunktag+'\n')
result.write('\n')
result.close()
print('epoch %s predict over !'%best_epoch)
os.system('../tools/conlleval < %s/predict.txt'%folder_path)
| danche354/Sequence-Labeling | ner_BIOES/evaluate-senna-hash-2-pos-chunk-128-64-rmsprop5.py | Python | mit | 3,163 |
import numpy as np
import cvxopt as co
def load_mnist_dataset():
import torchvision.datasets as datasets
mnist_train = datasets.MNIST(root='../data/mnist', train=True, download=True, transform=None)
mnist_test = datasets.MNIST(root='../data/mnist', train=False, download=True, transform=None)
test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int)
train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int)
test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float)
train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [28, 28]
def load_fashion_mnist_dataset():
import torchvision.datasets as datasets
mnist_train = datasets.FashionMNIST(root='../data/fashion-mnist', train=True, download=True, transform=None)
mnist_test = datasets.FashionMNIST(root='../data/fashion-mnist', train=False, download=True, transform=None)
test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int)
train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int)
test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float)
train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [28, 28]
def load_emnist_dataset():
import torchvision.datasets as datasets
mnist_train = datasets.EMNIST(root='../data/emnist', split='balanced', train=True, download=True, transform=None)
mnist_test = datasets.EMNIST(root='../data/emnist', split='balanced', train=False, download=True, transform=None)
test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int)
train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int)
test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float)
train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [28, 28]
def load_cifar10_dataset():
import torchvision.datasets as datasets
cifar_train = datasets.CIFAR10(root='../data/cifar10', train=True, download=True, transform=None)
cifar_test = datasets.CIFAR10(root='../data/cifar10', train=False, download=True, transform=None)
test_labels = np.array([cifar_test[i][1] for i in range(len(cifar_test))], dtype=np.int)
train_labels = np.array([cifar_train[i][1] for i in range(len(cifar_train))], dtype=np.int)
test = np.array([np.asarray(cifar_test[i][0].convert('F')).reshape(32*32) for i in range(len(cifar_test))], dtype=np.float)
train = np.array([np.asarray(cifar_train[i][0].convert('F')).reshape(32*32) for i in range(len(cifar_train))], dtype=np.float)
train /= 255. # normalize data to be in range [0,1]
test /= 255.
return train, train_labels, test, test_labels, [32, 32]
def get_gaussian(num, dims=2, means=[0,0], vars=[1,1]):
data = np.random.multivariate_normal(means, np.eye(dims), num)
return data
def get_2state_gaussian_seq(lens,dims=2,means1=[2,2,2,2],means2=[5,5,5,5],vars1=[1,1,1,1],vars2=[1,1,1,1],anom_prob=1.0):
seqs = np.zeros((dims, lens))
lbls = np.zeros((1, lens), dtype=np.int8)
marker = 0
# generate first state sequence
for d in range(dims):
seqs[d, :] = np.random.randn(lens)*vars1[d] + means1[d]
prob = np.random.uniform()
if prob < anom_prob:
# add second state blocks
while True:
max_block_len = 0.6*lens
min_block_len = 0.1*lens
block_len = np.int(max_block_len*np.random.uniform()+3)
block_start = np.int(lens*np.random.uniform())
if block_len - (block_start+block_len-lens)-3 > min_block_len:
break
block_len = min( [block_len, block_len - (block_start+block_len-lens)-3] )
lbls[block_start:block_start+block_len-1] = 1
marker = 1
for d in range(dims):
seqs[d,block_start:block_start+block_len-1] = np.random.randn(1,block_len-1)*vars2[d] + means2[d]
return seqs, lbls, marker
def get_2state_anom_seq(lens, comb_block_len, anom_prob=1.0, num_blocks=1):
seqs = co.matrix(0.0, (1, lens))
lbls = co.matrix(0, (1, lens))
marker = 0
# generate first state sequence, gaussian noise 0=mean, 1=variance
seqs = np.zeros((1, lens))
lbls = np.zeros((1, lens))
bak = seqs.copy()
prob = np.random.uniform()
if prob < anom_prob:
# add second state blocks
block_len = np.int(np.floor(comb_block_len / float(num_blocks)))
marker = 1
# add a single block
blen = 0
for b in range(np.int(num_blocks)):
if (b==num_blocks-1 and b>1):
block_len = np.round(comb_block_len-blen)
isDone = False
while isDone == False:
start = np.int(np.random.uniform()*float(lens-block_len+1))
if np.sum(lbls[0,start:start+block_len]) == 0:
lbls[0, start:start+block_len] = 1
seqs[0, start:start+block_len] = bak[0, start:start+block_len]+4.0
isDone = True
break
blen += block_len
return seqs, lbls, marker
| nicococo/tilitools | tilitools/utils_data.py | Python | mit | 5,881 |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 10:43:53 2019
@author: Heathro
Description: Reduces a vcf file to meta section and
one line for each chromosome number for testing and
debugging purposes.
"""
# Open files to read from and write to
vcfpath = open("D:/MG_GAP/Ali_w_767.vcf", "rU")
testvcf = open("REDUCED_ali.vcf", "w")
# Keep track of chromosome number so we can get one of each
temp_chrom = 0
counter = 0
for line_index, line in enumerate(vcfpath):
# Found a chromosome line
if line[0:8] == "sNNffold":
column = line.split('\t')
first_col = column[0].split('_')
current_chrom = first_col[1]
# Write up to 1000 lines of each chromosome
if current_chrom == temp_chrom:
counter = counter + 1
if counter < 1000:
testvcf.write(line)
# If a new chromosome, write a line, start counter at 0
elif current_chrom != temp_chrom:
counter = 0
temp_chrom = current_chrom
testvcf.write(line)
# Include the meta lines and header line
else:
testvcf.write(line)
testvcf.close()
vcfpath.close() | davidfarr/mg-gap | mg-gap/mg-gap-py/mg-gap/test_files/reduceVCF.py | Python | mit | 1,210 |
import random
rand = random.SystemRandom()
def rabinMiller(num):
if num % 2 == 0:
return False
s = num - 1
t = 0
while s % 2 == 0:
s = s // 2
t += 1
for trials in range(64):
a = rand.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1:
i = 0
while v != (num - 1):
if i == t - 1:
return False
else:
i = i + 1
v = (v ** 2) % num
return True
| Fitzgibbons/Cryptograpy | rabinmiller.py | Python | mit | 526 |
"""Tests exceptions and DB-API exception wrapping."""
from sqlalchemy import exc as sa_exceptions
from sqlalchemy.test import TestBase
# Py3K
#StandardError = BaseException
# Py2K
from exceptions import StandardError, KeyboardInterrupt, SystemExit
# end Py2K
class Error(StandardError):
"""This class will be old-style on <= 2.4 and new-style on >= 2.5."""
class DatabaseError(Error):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
def __str__(self):
return "<%s>" % self.bogus
class OutOfSpec(DatabaseError):
pass
class WrapTest(TestBase):
def test_db_error_normal(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], OperationalError())
except sa_exceptions.DBAPIError:
self.assert_(True)
def test_tostring(self):
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', None, OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' None"
def test_tostring_large_dict(self):
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', {'a':1, 'b':2, 'c':3, 'd':4, 'e':5, 'f':6, 'g':7, 'h':8, 'i':9, 'j':10, 'k':11}, OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc).startswith("(OperationalError) 'this is a message' {")
def test_tostring_large_list(self):
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc).startswith("(OperationalError) 'this is a message' [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]")
def test_tostring_large_executemany(self):
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' [{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}]", str(exc)
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' [{1: 1}, {1: 1}] ... and a total of 11 bound parameter sets"
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,)], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' [(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,)]"
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), ], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' [(1,), (1,)] ... and a total of 11 bound parameter sets"
def test_db_error_busted_dbapi(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], ProgrammingError())
except sa_exceptions.DBAPIError, e:
self.assert_(True)
self.assert_('Error in str() of DB-API' in e.args[0])
def test_db_error_noncompliant_dbapi(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], OutOfSpec())
except sa_exceptions.DBAPIError, e:
self.assert_(e.__class__ is sa_exceptions.DBAPIError)
except OutOfSpec:
self.assert_(False)
# Make sure the DatabaseError recognition logic is limited to
# subclasses of sqlalchemy.exceptions.DBAPIError
try:
raise sa_exceptions.DBAPIError.instance(
'', [], sa_exceptions.ArgumentError())
except sa_exceptions.DBAPIError, e:
self.assert_(e.__class__ is sa_exceptions.DBAPIError)
except sa_exceptions.ArgumentError:
self.assert_(False)
def test_db_error_keyboard_interrupt(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], KeyboardInterrupt())
except sa_exceptions.DBAPIError:
self.assert_(False)
except KeyboardInterrupt:
self.assert_(True)
def test_db_error_system_exit(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], SystemExit())
except sa_exceptions.DBAPIError:
self.assert_(False)
except SystemExit:
self.assert_(True)
| obeattie/sqlalchemy | test/base/test_except.py | Python | mit | 5,046 |
import re
from collections import OrderedDict
import compiler.lang as lang
doc_next = None
doc_prev_component = None
doc_root_component = None
class CustomParser(object):
def match(self, next):
raise Exception("Expression should implement match method")
escape_re = re.compile(r"[\0\n\r\v\t\b\f]")
escape_map = {
'\0': '\\0',
'\n': '\\n',
'\r': '\\r',
'\v': '\\v',
'\t': '\\t',
'\b': '\\b',
'\f': '\\f'
}
def escape(str):
return escape_re.sub(lambda m: escape_map[m.group(0)], str)
class StringParser(CustomParser):
def match(self, next):
n = len(next)
if n < 2:
return
quote = next[0]
if quote != "'" and quote != "\"":
return
pos = 1
while next[pos] != quote:
if next[pos] == "\\":
pos += 2
else:
pos += 1
if pos >= n:
raise Exception("Unexpected EOF while parsing string")
return next[:pos + 1]
skip_re = re.compile(r'(?:\s+|/\*.*?\*/|//[^\n]*(?:$|\n))', re.DOTALL)
COMPONENT_NAME = r'(?:[a-z][a-zA-Z0-9._]*\.)?[A-Z][A-Za-z0-9]*'
component_name = re.compile(COMPONENT_NAME)
component_name_lookahead = re.compile(COMPONENT_NAME + r'\s*{')
identifier_re = re.compile(r'[a-z_][A-Za-z0-9_]*')
property_type_re = re.compile(r'[a-z][a-z0-9]*', re.IGNORECASE)
nested_identifier_re = re.compile(r'[a-z_][A-Za-z0-9_\.]*')
function_name_re = re.compile(r'[a-z_][a-z0-9_\.]*', re.IGNORECASE)
string_re = StringParser()
kw_re = re.compile(r'(?:true|false|null)')
NUMBER_RE = r"(?:\d+\.\d+(e[+-]?\d+)?|(?:0x)?[0-9]+)"
number_re = re.compile(NUMBER_RE, re.IGNORECASE)
percent_number_re = re.compile(NUMBER_RE + r'%', re.IGNORECASE)
scale_number_re = re.compile(NUMBER_RE + r's', re.IGNORECASE)
rest_of_the_line_re = re.compile(r".*$", re.MULTILINE)
json_object_value_delimiter_re = re.compile(r"[,;]")
dep_var = re.compile(r"\${(.*?)}")
class Expression(object):
__slots__ = ('op', 'args')
def __init__(self, op, *args):
self.op, self.args = op, args
def __repr__(self):
return "Expression %s { %s }" %(self.op, ", ".join(map(repr, self.args)))
def __str__(self):
args = self.args
n = len(args)
if n == 1:
return "(%s %s)" %(self.op, args[0])
elif n == 2:
return "(%s %s %s)" %(args[0], self.op, args[1])
elif n == 3:
op = self.op
return "(%s %s %s %s %s)" %(args[0], op[0], args[1], op[1], args[2])
else:
raise Exception("invalid argument counter")
class Call(object):
__slots__ = ('func', 'args')
def __init__(self, func, args):
self.func = func
self.args = args
def __repr__(self):
return "Call %s { %s }" %(self.func, self.args)
def __str__(self):
if isinstance(self.func, Literal):
name = self.func.term
if name[0].islower():
if '.' in name:
name = '${%s}' %name
else:
name = '$this._context.%s' %name
else:
name = str(self.func)
#if lhs is not an literal, than we can't process deps, removing ${xxx}
name = dep_var.sub(lambda m: m.group(1), name)
return "%s(%s)" %(name, ",".join(map(str, self.args)))
class Dereference(object):
__slots__ = ('array', 'index')
def __init__(self, array, index):
self.array = array
self.index = index
def __str__(self):
return "(%s[%s])" %(self.array, self.index)
class Literal(object):
__slots__ = ('lbp', 'term', 'identifier')
def __init__(self, term, string = False, identifier = False):
self.term = escape(term) if string else term
self.lbp = 0
self.identifier = identifier
def nud(self, state):
return self
def __repr__(self):
return "Literal { %s }" %self.term
def __str__(self):
return "${%s}" %self.term if self.identifier and self.term[0].islower() else self.term
class PrattParserState(object):
def __init__(self, parent, parser, token):
self.parent, self.parser, self.token = parent, parser, token
class PrattParser(object):
def __init__(self, ops):
symbols = [(x.term, x) for x in ops]
symbols.sort(key=lambda x: len(x[0]), reverse=True)
self.symbols = symbols
def next(self, parser):
parser._skip()
next = parser.next
next_n = len(next)
for term, sym in self.symbols:
n = len(term)
if n > next_n:
continue
keyword = term[-1].isalnum()
if next.startswith(term):
if keyword and n < next_n and next[n].isalnum():
continue
parser.advance(len(term))
return sym
next = parser.maybe(kw_re)
if next:
return Literal(next)
next = parser.maybe(percent_number_re)
if next:
next = next[:-1]
return Literal("((%s) / 100 * ${parent.<property-name>})" %next) if next != 100 else "(${parent.<property-name>})"
next = parser.maybe(scale_number_re)
if next:
next = next[:-1]
return Literal("((%s) * ${context.<scale-property-name>})" %next)
next = parser.maybe(number_re)
if next:
return Literal(next)
next = parser.maybe(function_name_re)
if next:
return Literal(next, identifier=True)
next = parser.maybe(string_re)
if next:
return Literal(next, string=True)
return None
def advance(self, state, expect = None):
if expect is not None:
state.parser.read(expect, "Expected %s in expression" %expect)
state.token = self.next(state.parser)
def expression(self, state, rbp = 0):
parser = state.parser
t = state.token
state.token = self.next(parser)
if state.token is None:
return t
left = t.nud(state)
while state.token is not None and rbp < state.token.lbp:
t = state.token
self.advance(state)
left = t.led(state, left)
return left
def parse(self, parser):
token = self.next(parser)
if token is None:
parser.error("Unexpected expression")
state = PrattParserState(self, parser, token)
return self.expression(state)
class UnsupportedOperator(object):
__slots__ = ('term', 'lbp', 'rbp')
def __init__(self, term, lbp = 0, rbp = 0):
self.term, self.lbp, self.rbp = term, lbp, rbp
def nud(self, state):
state.parser.error("Unsupported prefix operator %s" %self.term)
def led(self, state, left):
state.parser.error("Unsupported postfix operator %s" %self.term)
def __repr__(self):
return "UnsupportedOperator { %s %s }" %(self.term, self.lbp)
class Operator(object):
__slots__ = ('term', 'lbp', 'rbp')
def __init__(self, term, lbp = 0, rbp = None):
self.term, self.lbp, self.rbp = term, lbp, rbp
def nud(self, state):
if self.rbp is not None:
return Expression(self.term, state.parent.expression(state, self.rbp))
state.parser.error("Unexpected token in infix expression: '%s'" %self.term)
def led(self, state, left):
if self.lbp is not None:
return Expression(self.term, left, state.parent.expression(state, self.lbp))
else:
state.parser.error("No left-associative operator defined")
def __repr__(self):
return "Operator { %s %s %s }" %(self.term, self.lbp, self.rbp)
class Conditional(object):
__slots__ = ('term', 'lbp')
def __init__(self, lbp):
self.term = '?'
self.lbp = lbp
def nud(self, state):
state.parser.error("Conditional operator can't be used as unary")
def led(self, state, left):
true = state.parent.expression(state)
state.parent.advance(state, ':')
false = state.parent.expression(state)
return Expression(('?', ':'), left, true, false)
def __repr__(self):
return "Conditional { }"
class LeftParenthesis(object):
__slots__ = ('term', 'lbp')
def __init__(self, lbp):
self.term = '('
self.lbp = lbp
def nud(self, state):
expr = state.parent.expression(state)
state.parent.advance(state, ')')
return expr
def led(self, state, left):
args = []
next = state.token
if next.term != ')':
while True:
args.append(state.parent.expression(state))
if state.token is not None:
state.parser.error("Unexpected token %s" %state.token)
if not state.parser.maybe(','):
break
state.parent.advance(state)
state.parent.advance(state, ')')
return Call(left, args)
def __repr__(self):
return "LeftParenthesis { %d }" %self.lbp
class LeftSquareBracket(object):
__slots__ = ('term', 'lbp')
def __init__(self, lbp):
self.term = '['
self.lbp = lbp
def nud(self, state):
state.parser.error("Invalid [] expression")
def led(self, state, left):
arg = state.parent.expression(state)
if state.token is not None:
state.parser.error("Unexpected token %s" %state.token)
state.parent.advance(state, ']')
return Dereference(left, arg)
def __repr__(self):
return "LeftSquareBracket { %d }" %self.lbp
infix_parser = PrattParser([
Operator('.', 19),
LeftParenthesis(19),
LeftSquareBracket(19),
UnsupportedOperator('++', 17, 16),
UnsupportedOperator('--', 17, 16),
UnsupportedOperator('void', None, 16),
UnsupportedOperator('delete', None, 16),
UnsupportedOperator('await', None, 16),
Operator('typeof', None, 16),
Operator('!', None, 16),
Operator('~', None, 16),
Operator('+', 13, 16),
Operator('-', 13, 16),
Operator('typeof', None, 16),
Operator('**', 15),
Operator('*', 14),
Operator('/', 14),
Operator('%', 14),
Operator('<<', 12),
Operator('>>', 12),
Operator('>>>', 12),
Operator('<', 11),
Operator('<=', 11),
Operator('>', 11),
Operator('>=', 11),
Operator('in', 11),
Operator('instanceof', 11),
Operator('==', 10),
Operator('!=', 10),
Operator('===', 10),
Operator('!==', 10),
Operator('&', 9),
Operator('^', 8),
Operator('|', 7),
Operator('&&', 6),
Operator('||', 5),
Conditional(4),
])
class Parser(object):
def __init__(self, text):
self.__text = text
self.__pos = 0
self.__lineno = 1
self.__colno = 1
self.__last_object = None
self.__next_doc = None
@property
def at_end(self):
return self.__pos >= len(self.__text)
@property
def next(self):
return self.__text[self.__pos:]
@property
def current_line(self):
text = self.__text
pos = self.__pos
begin = text.rfind('\n', 0, pos)
end = text.find('\n', pos)
if begin < 0:
begin = 0
else:
begin += 1
if end < 0:
end = len(text)
return text[begin:end]
def advance(self, n):
text = self.__text
pos = self.__pos
for i in range(n):
if text[pos] == '\n':
self.__lineno += 1
self.__colno = 1
else:
self.__colno += 1
pos += 1
self.__pos = pos
def __docstring(self, text, prev):
if prev:
if self.__last_object:
if self.__last_object.doc is not None:
self.__last_object.doc = lang.DocumentationString(self.__last_object.doc.text + " " + text)
else:
self.__last_object.doc = lang.DocumentationString(text)
else:
self.error("Found docstring without previous object")
else:
if self.__next_doc is not None:
self.__next_doc += " " + text
else:
self.__next_doc = text
def __get_next_doc(self):
if self.__next_doc is None:
return
doc = lang.DocumentationString(self.__next_doc)
self.__next_doc = None
return doc
def __return(self, object, doc):
if doc:
if object.doc:
object.doc = lang.DocumentationString(object.doc.text + " " + doc.text)
else:
object.doc = doc
self.__last_object = object
return object
def _skip(self):
while True:
m = skip_re.match(self.next)
if m is not None:
text = m.group(0).strip()
if text.startswith('///<'):
self.__docstring(text[4:], True)
elif text.startswith('///'):
self.__docstring(text[3:], False)
elif text.startswith('/**'):
end = text.rfind('*/')
self.__docstring(text[3:end], False)
self.advance(m.end())
else:
break
def error(self, msg):
lineno, col, line = self.__lineno, self.__colno, self.current_line
pointer = re.sub(r'\S', ' ', line)[:col - 1] + '^-- ' + msg
raise Exception("at line %d:%d:\n%s\n%s" %(lineno, col, self.current_line, pointer))
def lookahead(self, exp):
if self.at_end:
return
self._skip()
next = self.next
if isinstance(exp, str):
keyword = exp[-1].isalnum()
n, next_n = len(exp), len(next)
if n > next_n:
return
if next.startswith(exp):
#check that exp ends on word boundary
if keyword and n < next_n and next[n].isalnum():
return
else:
return exp
elif isinstance(exp, CustomParser):
return exp.match(next)
else:
m = exp.match(next)
if m:
return m.group(0)
def maybe(self, exp):
value = self.lookahead(exp)
if value is not None:
self.advance(len(value))
return value
def read(self, exp, error):
value = self.maybe(exp)
if value is None:
self.error(error)
return value
def __read_statement_end(self):
self.read(';', "Expected ; at the end of the statement")
def __read_list(self, exp, delimiter, error):
result = []
result.append(self.read(exp, error))
while self.maybe(delimiter):
result.append(self.read(exp, error))
return result
def __read_nested(self, begin, end, error):
begin_off = self.__pos
self.read(begin, error)
counter = 1
while not self.at_end:
if self.maybe(begin):
counter += 1
elif self.maybe(end):
counter -= 1
if counter == 0:
end_off = self.__pos
value = self.__text[begin_off: end_off]
return value
else:
if not self.maybe(string_re):
self.advance(1)
def __read_code(self):
return self.__read_nested('{', '}', "Expected code block")
def __read_expression(self, terminate = True):
if self.maybe('['):
values = []
while not self.maybe(']'):
values.append(self.__read_expression(terminate = False))
if self.maybe(']'):
break
self.read(',', "Expected ',' as an array delimiter")
if terminate:
self.__read_statement_end()
return "[%s]" % (",".join(map(str, values)))
else:
value = infix_parser.parse(self)
if terminate:
self.__read_statement_end()
return str(value)
def __read_property(self):
if self.lookahead(':'):
return self.__read_rules_with_id(["property"])
doc = self.__get_next_doc()
type = self.read(property_type_re, "Expected type after property keyword")
if type == 'enum':
type = self.read(identifier_re, "Expected type after enum keyword")
self.read('{', "Expected { after property enum")
values = self.__read_list(component_name, ',', "Expected capitalised enum element")
self.read('}', "Expected } after enum element declaration")
if self.maybe(':'):
def_value = self.read(component_name, "Expected capitalised default enum value")
else:
def_value = None
self.__read_statement_end()
return self.__return(lang.EnumProperty(type, values, def_value), doc)
if type == 'const':
name = self.read(identifier_re, "Expected const property name")
self.read(':', "Expected : before const property code")
code = self.__read_code()
return self.__return(lang.Property("const", [(name, code)]), doc)
if type == 'alias':
name = self.read(identifier_re, "Expected alias property name")
self.read(':', "Expected : before alias target")
target = self.read(nested_identifier_re, "Expected identifier as an alias target")
self.__read_statement_end()
return self.__return(lang.AliasProperty(name, target), doc)
names = self.__read_list(identifier_re, ',', "Expected identifier in property list")
if len(names) == 1:
#Allow initialisation for the single property
def_value = None
if self.maybe(':'):
if self.lookahead(component_name_lookahead):
def_value = self.__read_comp()
else:
def_value = self.__read_expression()
else:
self.__read_statement_end()
name = names[0]
return self.__return(lang.Property(type, [(name, def_value)]), doc)
else:
self.read(';', 'Expected ; at the end of property declaration')
return self.__return(lang.Property(type, map(lambda name: (name, None), names)), doc)
def __read_rules_with_id(self, identifiers):
args = []
doc = self.__get_next_doc()
if self.maybe('('):
if not self.maybe(')'):
args = self.__read_list(identifier_re, ',', "Expected argument list")
self.read(')', "Expected () as an argument list")
if self.maybe(':'):
if self.lookahead('{'):
code = self.__read_code()
return self.__return(lang.Method(identifiers, args, code, True, False), doc)
if len(identifiers) > 1:
self.error("Multiple identifiers are not allowed in assignment")
if self.lookahead(component_name_lookahead):
return self.__return(lang.Assignment(identifiers[0], self.__read_comp()), doc)
value = self.__read_expression()
return self.__return(lang.Assignment(identifiers[0], value), doc)
elif self.maybe('{'):
if len(identifiers) > 1:
self.error("Multiple identifiers are not allowed in assignment scope")
values = []
while not self.maybe('}'):
name = self.read(nested_identifier_re, "Expected identifier in assignment scope")
self.read(':', "Expected : after identifier in assignment scope")
value = self.__read_expression()
values.append(lang.Assignment(name, value))
return self.__return(lang.AssignmentScope(identifiers[0], values), doc)
else:
self.error("Unexpected identifier(s): %s" %",".join(identifiers))
def __read_function(self, async_f = False):
doc = self.__get_next_doc()
name = self.read(identifier_re, "Expected identifier")
args = []
self.read('(', "Expected (argument-list) in function declaration")
if not self.maybe(')'):
args = self.__read_list(identifier_re, ',', "Expected argument list")
self.read(')', "Expected ) at the end of argument list")
code = self.__read_code()
return self.__return(lang.Method([name], args, code, False, async_f), doc)
def __read_json_value(self):
value = self.maybe(kw_re)
if value is not None:
return value
value = self.maybe(number_re)
if value is not None:
return value
value = self.maybe(string_re)
if value is not None:
return lang.unescape_string(value[1:-1])
if self.lookahead('{'):
return self.__read_json_object()
if self.lookahead('['):
return self.__read_json_list
def __read_json_list(self):
self.read('[', "Expect JSON list starts with [")
result = []
while not self.maybe(']'):
result.append(self.__read_json_value)
if self.maybe(']'):
break
self.read(',', "Expected , as a JSON list delimiter")
return result
def __read_json_object(self):
self.read('{', "Expected JSON object starts with {")
object = OrderedDict()
while not self.maybe('}'):
name = self.maybe(identifier_re)
if not name:
name = self.read(string_re, "Expected string or identifier as property name")
self.read(':', "Expected : after property name")
value = self.__read_json_value()
object[name] = value
self.maybe(json_object_value_delimiter_re)
return object
def __read_scope_decl(self):
if self.maybe('ListElement'):
doc = self.__get_next_doc()
return self.__return(lang.ListElement(self.__read_json_object()), doc)
elif self.maybe('Behavior'):
self.read("on", "Expected on keyword after Behavior declaration")
doc = self.__get_next_doc()
targets = self.__read_list(nested_identifier_re, ",", "Expected identifier list after on keyword")
self.read("{", "Expected { after identifier list in behavior declaration")
comp = self.__read_comp()
self.read("}", "Expected } after behavior animation declaration")
return self.__return(lang.Behavior(targets, comp), doc)
elif self.maybe('signal'):
doc = self.__get_next_doc()
name = self.read(identifier_re, "Expected identifier in signal declaration")
self.__read_statement_end()
return self.__return(lang.Signal(name), doc)
elif self.maybe('property'):
return self.__read_property()
elif self.maybe('id'):
doc = self.__get_next_doc()
self.read(':', "Expected : after id keyword")
name = self.read(identifier_re, "Expected identifier in id assignment")
self.__read_statement_end()
return self.__return(lang.IdAssignment(name), doc)
elif self.maybe('const'):
doc = self.__get_next_doc()
type = self.read(property_type_re, "Expected type after const keyword")
name = self.read(component_name, "Expected Capitalised const name")
self.read(':', "Expected : after const identifier")
value = self.__read_json_value()
self.__read_statement_end()
return self.__return(lang.Const(type, name, value), doc)
elif self.maybe('async'):
self.error("async fixme")
elif self.maybe('function'):
return self.__read_function()
elif self.maybe('async'):
self.read('function', "Expected function after async")
return self.__read_function(async_f = True)
elif self.lookahead(component_name_lookahead):
return self.__read_comp()
else:
identifiers = self.__read_list(nested_identifier_re, ",", "Expected identifier (or identifier list)")
return self.__read_rules_with_id(identifiers)
def __read_comp(self):
doc = self.__get_next_doc()
comp_name = self.read(component_name, "Expected component name")
self.read(r'{', "Expected {")
children = []
while not self.maybe('}'):
children.append(self.__read_scope_decl())
return self.__return(lang.Component(comp_name, children), doc)
def parse(self, parse_all = True):
while self.maybe('import'):
self.read(rest_of_the_line_re, "Skip to the end of the line failed")
r = [self.__read_comp()]
self._skip()
if parse_all:
if self.__pos < len(self.__text):
self.error("Extra text after component declaration")
return r
def parse(data):
global doc_root_component
doc_root_component = None
parser = Parser(data)
return parser.parse()
| pureqml/qmlcore | compiler/grammar2.py | Python | mit | 21,146 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('app', '0008_playlistitem_network'),
]
operations = [
migrations.AddField(
model_name='playlistitem',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2014, 10, 6, 10, 0, 29, 893833), auto_now_add=True),
preserve_default=False,
),
]
| m-vdb/ourplaylists | ourplaylists/app/migrations/0009_playlistitem_created_at.py | Python | mit | 527 |
#!/usr/bin/env python
from math import pi, sin, log, exp, atan
DEG_TO_RAD = pi / 180
RAD_TO_DEG = 180 / pi
def minmax (a,b,c):
a = max(a,b)
a = min(a,c)
return a
class GoogleProjection:
"""
Google projection transformations. Sourced from the OSM.
Have not taken the time to figure out how this works.
"""
def __init__(self, levels=18):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 256
for d in range(levels + 1):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromLLtoPixel(self, ll, zoom):
d = self.zc[zoom]
e = round(d[0] + ll[0] * self.Bc[zoom])
f = minmax(sin(DEG_TO_RAD * ll[1]),-0.9999,0.9999)
g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom])
return (e,g)
def fromPixelToLL(self, px, zoom):
e = self.zc[zoom]
f = (px[0] - e[0])/self.Bc[zoom]
g = (px[1] - e[1])/-self.Cc[zoom]
h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)
return (f,h)
| onyxfish/invar | invar/projections.py | Python | mit | 1,202 |
import pytest
import pandas as pd
from lcdblib.pandas import utils
@pytest.fixture(scope='session')
def sample_table():
metadata = {
'sample': ['one', 'two'],
'tissue': ['ovary', 'testis']
}
return pd.DataFrame(metadata)
def test_cartesian_df(sample_table):
df2 = pd.DataFrame({'num': [100, 200]})
result = utils.cartesian_product(sample_table, df2)
# Compare a slice
sf = result.iloc[0, :].sort_index()
sf.name = ''
test_sf = pd.Series({'sample': 'one', 'tissue': 'ovary', 'num': 100}, name='').sort_index()
assert sf.equals(test_sf)
assert result.shape == (4, 3)
def test_cartesian_sf(sample_table):
sf2 = pd.Series([100, 200], name='num')
result = utils.cartesian_product(sample_table, sf2)
# Compare a slice
sf = result.iloc[0, :].sort_index()
sf.name = ''
test_sf = pd.Series({'sample': 'one', 'tissue': 'ovary', 'num': 100}, name='').sort_index()
assert sf.equals(test_sf)
assert result.shape == (4, 3)
def test_cartesian_dict(sample_table):
df2 = {'num': [100, 200]}
result = utils.cartesian_product(sample_table, df2)
# Compare a slice
sf = result.iloc[0, :].sort_index()
sf.name = ''
test_sf = pd.Series({'sample': 'one', 'tissue': 'ovary', 'num': 100}, name='').sort_index()
assert sf.equals(test_sf)
assert result.shape == (4, 3)
| lcdb/lcdblib | tests/test_pandas_utils.py | Python | mit | 1,376 |
import io
import tempfile
import unittest
from zipencrypt import ZipFile, ZipInfo, ZIP_DEFLATED
from zipencrypt.zipencrypt2 import _ZipEncrypter, _ZipDecrypter
class TestEncryption(unittest.TestCase):
def setUp(self):
self.plain = "plaintext" * 3
self.pwd = b"password"
def test_roundtrip(self):
encrypt = _ZipEncrypter(self.pwd)
encrypted = map(encrypt, self.plain)
decrypt = _ZipDecrypter(self.pwd)
decrypted = "".join(map(decrypt, encrypted))
self.assertEqual(self.plain, decrypted)
class TestZipfile(unittest.TestCase):
def setUp(self):
self.zipfile = io.BytesIO()
self.plain = "plaintext" * 3
self.pwd = "password"
def test_writestr(self):
with ZipFile(self.zipfile, mode="w") as zipfd:
zipfd.writestr("file1.txt", self.plain, pwd=self.pwd)
with ZipFile(self.zipfile) as zipfd:
content = zipfd.read("file1.txt", pwd=self.pwd)
self.assertEqual(self.plain, content)
def test_writestr_keep_file_open(self):
with ZipFile(self.zipfile, mode="w") as zipfd:
zipfd.writestr("file1.txt", self.plain, pwd=self.pwd)
content = zipfd.read("file1.txt", pwd=self.pwd)
self.assertEqual(self.plain, content)
def test_writestr_with_zipinfo(self):
zinfo = ZipInfo(filename="file1.txt")
zinfo.flag_bits |= 0x8
with ZipFile(self.zipfile, mode="w") as zipfd:
zipfd.writestr(zinfo, self.plain, pwd=self.pwd)
with ZipFile(self.zipfile) as zipfd:
content = zipfd.read("file1.txt", pwd=self.pwd)
self.assertEqual(self.plain, content)
def test_writestr_with_zipinfo_keep_file_open(self):
zinfo = ZipInfo(filename="file1.txt")
zinfo.flag_bits |= 0x8
with ZipFile(self.zipfile, mode="w") as zipfd:
zipfd.writestr(zinfo, self.plain, pwd=self.pwd)
content = zipfd.read("file1.txt", pwd=self.pwd)
self.assertEqual(self.plain, content)
def test_write_with_password(self):
with tempfile.NamedTemporaryFile(bufsize=0) as fd:
fd.write(self.plain)
with ZipFile(self.zipfile, mode="w") as zipfd:
zipfd.write(fd.name, arcname="file1.txt", pwd=self.pwd)
with ZipFile(self.zipfile) as zipfd:
content = zipfd.read("file1.txt", pwd=self.pwd)
self.assertEqual(self.plain, content)
def test_write_with_password_keep_file_open(self):
with tempfile.NamedTemporaryFile(bufsize=0) as fd:
fd.write(self.plain)
with ZipFile(self.zipfile, mode="w") as zipfd:
zipfd.write(fd.name, arcname="file1.txt", pwd=self.pwd)
content = zipfd.read("file1.txt", pwd=self.pwd)
self.assertEqual(self.plain, content)
def test_setcompressiontype(self):
with ZipFile(self.zipfile, mode="w") as zipfd:
zipfd.writestr("file1.txt", self.plain, compress_type=ZIP_DEFLATED, pwd=self.pwd)
with ZipFile(self.zipfile) as zipfd:
content = zipfd.read("file1.txt", pwd=self.pwd)
self.assertEqual(self.plain, content)
if __name__ == '__main__':
unittest.main()
| devthat/zipencrypt | tests/python2/test_encryption.py | Python | mit | 3,249 |
"""
Abstract base for a specific IP transports (TCP or UDP).
* It starts and stops a socket
* It handles callbacks for incoming frame service types
"""
from __future__ import annotations
from abc import ABC, abstractmethod
import asyncio
import logging
from typing import Callable, cast
from xknx.exceptions import CommunicationError
from xknx.knxip import HPAI, KNXIPFrame, KNXIPServiceType
TransportCallbackType = Callable[[KNXIPFrame, HPAI, "KNXIPTransport"], None]
knx_logger = logging.getLogger("xknx.knx")
class KNXIPTransport(ABC):
"""Abstract base class for KNX/IP transports."""
callbacks: list[KNXIPTransport.Callback]
local_hpai: HPAI
remote_addr: tuple[str, int]
transport: asyncio.BaseTransport | None
class Callback:
"""Callback class for handling callbacks for different 'KNX service types' of received packets."""
def __init__(
self,
callback: TransportCallbackType,
service_types: list[KNXIPServiceType] | None = None,
):
"""Initialize Callback class."""
self.callback = callback
self.service_types = service_types or []
def has_service(self, service_type: KNXIPServiceType) -> bool:
"""Test if callback is listening for given service type."""
return not self.service_types or service_type in self.service_types
def register_callback(
self,
callback: TransportCallbackType,
service_types: list[KNXIPServiceType] | None = None,
) -> KNXIPTransport.Callback:
"""Register callback."""
if service_types is None:
service_types = []
callb = KNXIPTransport.Callback(callback, service_types)
self.callbacks.append(callb)
return callb
def unregister_callback(self, callb: KNXIPTransport.Callback) -> None:
"""Unregister callback."""
self.callbacks.remove(callb)
def handle_knxipframe(self, knxipframe: KNXIPFrame, source: HPAI) -> None:
"""Handle KNXIP Frame and call all callbacks matching the service type ident."""
handled = False
for callback in self.callbacks:
if callback.has_service(knxipframe.header.service_type_ident):
callback.callback(knxipframe, source, self)
handled = True
if not handled:
knx_logger.debug(
"Unhandled: %s from: %s",
knxipframe.header.service_type_ident,
source,
)
@abstractmethod
async def connect(self) -> None:
"""Connect transport."""
@abstractmethod
def send(self, knxipframe: KNXIPFrame, addr: tuple[str, int] | None = None) -> None:
"""Send KNXIPFrame via transport."""
def getsockname(self) -> tuple[str, int]:
"""Return socket IP and port."""
if self.transport is None:
raise CommunicationError(
"No transport defined. Socket information not resolveable"
)
return cast(tuple[str, int], self.transport.get_extra_info("sockname"))
def getremote(self) -> str | None:
"""Return peername."""
return (
self.transport.get_extra_info("peername")
if self.transport is not None
else None
)
def stop(self) -> None:
"""Stop socket."""
if self.transport is not None:
self.transport.close()
| XKNX/xknx | xknx/io/transport/ip_transport.py | Python | mit | 3,449 |
class Base:
def meth(self):
print("in Base meth")
class Sub(Base):
def meth(self):
print("in Sub meth")
return super().meth()
a = Sub()
a.meth()
| aitjcize/micropython | tests/basics/class-super.py | Python | mit | 181 |
from neo.Core.UIntBase import UIntBase
class UInt256(UIntBase):
def __init__(self, data=None):
super(UInt256, self).__init__(num_bytes=32, data=data)
@staticmethod
def ParseString(value):
"""
Parse the input str `value` into UInt256
Raises:
ValueError: if the input `value` length (after '0x' if present) != 64
"""
if value[0:2] == '0x':
value = value[2:]
if not len(value) == 64:
raise ValueError(f"Invalid UInt256 input: {len(value)} chars != 64 chars")
reversed_data = bytearray.fromhex(value)
reversed_data.reverse()
return UInt256(data=reversed_data)
| hal0x2328/neo-python | neo/Core/UInt256.py | Python | mit | 688 |
'''
Build a tweet sentiment analyzer
'''
from __future__ import print_function
import cPickle as pickle
import sys
import time
from collections import OrderedDict
import numpy
import theano
import theano.tensor as tensor
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import imdb
datasets = {'imdb': (imdb.load_data, imdb.prepare_data)}
# Set the random number generators' seeds for consistency
SEED = 123
numpy.random.seed(SEED)
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
# NOTE (bitesandbytes) : Important; set minibatch_size = 1 ?
def get_minibatches_idx(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = numpy.arange(n, dtype="int32")
if shuffle:
numpy.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# NOTE (bitesandbytes) : Not needed.
def get_dataset(name):
return datasets[name][0], datasets[name][1]
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
for kk, vv in params.items():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
new_params = OrderedDict()
for kk, vv in zipped.items():
new_params[kk] = vv.get_value()
return new_params
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
(state_before *
trng.binomial(state_before.shape,
p=0.5, n=1,
dtype=state_before.dtype)),
state_before * 0.5)
return proj
def _p(pp, name):
return '%s_%s' % (pp, name)
def init_params(options):
"""
Global (not LSTM) parameter. For the embedding and the classifier.
"""
params = OrderedDict()
params = get_layer(options['encoder'])[0](options,
params,
prefix=options['encoder'])
# classifier
params['U'] = 0.01 * numpy.random.randn(options['dim_proj'],
options['ydim']).astype(config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(config.floatX)
return params
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.items():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.items():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def get_layer(name):
fns = layers[name]
return fns
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype(config.floatX)
def param_init_lstm(options, params, prefix='lstm'):
"""
Init the LSTM parameter:
:see: init_params
"""
W = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U
b = numpy.zeros((4 * options['dim_proj'],))
params[_p(prefix, 'b')] = b.astype(config.floatX)
return params
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
assert mask is not None
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, options['dim_proj']))
f = tensor.nnet.sigmoid(_slice(preact, 1, options['dim_proj']))
o = tensor.nnet.sigmoid(_slice(preact, 2, options['dim_proj']))
c = tensor.tanh(_slice(preact, 3, options['dim_proj']))
c = f * c_ + i * c
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * tensor.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
# Donno what this is doing :/
state_below = (tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
dim_proj = options['dim_proj']
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=[tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj),
tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj)],
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval[0]
# ff: Feed Forward (normal neural net), only useful to put after lstm
# before the classifier.
layers = {'lstm': (param_init_lstm, lstm_layer)}
def sgd(lr, tparams, grads, x, mask, y, cost):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.items()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, x, mask, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared')
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, x, mask, y, cost):
"""
A variant of SGD that scales the step size by running average of the
recent step norms.
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [Hint2014]_.
.. [Hint2014] Geoff Hinton, *Neural Networks for Machine Learning*,
lecture 6a,
http://cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.items()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def build_model(tparams, options):
trng = RandomStreams(SEED)
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
x = tensor.matrix('x', dtype='int64')
mask = tensor.matrix('mask', dtype=config.floatX)
y = tensor.vector('y', dtype='int64')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
proj = get_layer(options['encoder'])[1](tparams, x, options,
prefix=options['encoder'],
mask=mask)
if options['encoder'] == 'lstm':
proj = (proj * mask[:, :, None]).sum(axis=0)
proj = proj / mask.sum(axis=0)[:, None]
if options['use_dropout']:
proj = dropout_layer(proj, use_noise, trng)
pred = tensor.nnet.softmax(tensor.dot(proj.T, tparams['U']) + tparams['b'])
f_pred_prob = theano.function([x, mask], pred, name='f_pred_prob')
f_pred = theano.function([x, mask], pred.argmax(axis=1), name='f_pred')
off = 1e-8
if pred.dtype == 'float16':
off = 1e-6
cost = -tensor.log(pred[tensor.arange(n_samples), y] + off).mean()
return use_noise, x, mask, y, f_pred_prob, f_pred, cost
def pred_probs(f_pred_prob, prepare_data, data, iterator, verbose=False):
""" If you want to use a trained model, this is useful to compute
the probabilities of new examples.
"""
n_samples = len(data[0])
probs = numpy.zeros((n_samples, 2)).astype(config.floatX)
n_done = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
pred_probs = f_pred_prob(x, mask)
probs[valid_index, :] = pred_probs
n_done += len(valid_index)
if verbose:
print('%d/%d samples classified' % (n_done, n_samples))
return probs
def pred_error(f_pred, prepare_data, data, iterator, verbose=False):
"""
Just compute the error
f_pred: Theano fct computing the prediction
prepare_data: usual prepare_data for that dataset.
"""
valid_err = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
preds = f_pred(x, mask)
targets = numpy.array(data[1])[valid_index]
valid_err += (preds == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(data[0])
return valid_err
def train_lstm(
dim_proj=128, # word embeding dimension and LSTM number of hidden units.
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=5000, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.0001, # Learning rate for sgd (not used for adadelta and rmsprop)
n_words=10000, # Vocabulary size
optimizer=adadelta,
# sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
encoder='lstm', # TODO: can be removed must be lstm.
saveto='lstm_model.npz', # The best model will be saved there
validFreq=370, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=100, # Sequence longer then this get ignored
batch_size=16, # The batch size during training.
valid_batch_size=64, # The batch size used for validation/test set.
dataset='imdb',
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger model.
reload_model=None, # Path to a saved model we want to start from.
test_size=-1, # If >0, we keep only this number of test example.
):
# Model options
model_options = locals().copy()
print("model options", model_options)
load_data, prepare_data = get_dataset(dataset)
print('Loading data')
train, valid, test = load_data(n_words=n_words, valid_portion=0.05,
maxlen=maxlen)
if test_size > 0:
# The test set is sorted by size, but we want to keep random
# size example. So we must select a random selection of the
# examples.
idx = numpy.arange(len(test[0]))
numpy.random.shuffle(idx)
idx = idx[:test_size]
test = ([test[0][n] for n in idx], [test[1][n] for n in idx])
ydim = numpy.max(train[1]) + 1
# TOOD(bitesandbytes) : Change ydim to |num words| + 1 (0 -> no word | empty)
model_options['ydim'] = ydim
print('Building model')
# This create the initial parameters as numpy ndarrays.
# Dict name (string) -> numpy ndarray
params = init_params(model_options)
if reload_model:
load_params('lstm_model.npz', params)
# This create Theano Shared Variable from the parameters.
# Dict name (string) -> Theano Tensor Shared Variable
# params and tparams have different copy of the weights.
tparams = init_tparams(params)
# use_noise is for dropout
(use_noise, x, mask,
y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options)
if decay_c > 0.:
decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c')
weight_decay = 0.
weight_decay += (tparams['U'] ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
f_cost = theano.function([x, mask, y], cost, name='f_cost')
grads = tensor.grad(cost, wrt=list(tparams.values()))
f_grad = theano.function([x, mask, y], grads, name='f_grad')
lr = tensor.scalar(name='lr')
f_grad_shared, f_update = optimizer(lr, tparams, grads,
x, mask, y, cost)
print('Optimization')
kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size)
kf_test = get_minibatches_idx(len(test[0]), valid_batch_size)
print("%d train examples" % len(train[0]))
print("%d valid examples" % len(valid[0]))
print("%d test examples" % len(test[0]))
history_errs = []
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0]) // batch_size
if saveFreq == -1:
saveFreq = len(train[0]) // batch_size
uidx = 0 # the number of update done
estop = False # early stop
start_time = time.time()
try:
for eidx in range(max_epochs):
n_samples = 0
# Get new shuffled index for the training set.
kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True)
for _, train_index in kf:
uidx += 1
use_noise.set_value(1.)
# Select the random examples for this minibatch
y = [train[1][t] for t in train_index]
x = [train[0][t] for t in train_index]
# Get the data in numpy.ndarray format
# This swap the axis!
# Return something of shape (minibatch maxlen, n samples)
x, mask, y = prepare_data(x, y)
n_samples += x.shape[1]
cost = f_grad_shared(x, mask, y)
f_update(lrate)
if numpy.isnan(cost) or numpy.isinf(cost):
print('bad cost detected: ', cost)
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print('Epoch ', eidx, 'Update ', uidx, 'Cost ', cost)
if saveto and numpy.mod(uidx, saveFreq) == 0:
print('Saving...')
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pickle.dump(model_options, open('%s.pkl' % saveto, 'wb'), -1)
print('Done')
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
train_err = pred_error(f_pred, prepare_data, train, kf)
valid_err = pred_error(f_pred, prepare_data, valid,
kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
history_errs.append([valid_err, test_err])
if (best_p is None or
valid_err <= numpy.array(history_errs)[:,
0].min()):
best_p = unzip(tparams)
bad_counter = 0
print('Train ', train_err, 'Valid ', valid_err,
'Test ', test_err)
if (len(history_errs) > patience and
valid_err >= numpy.array(history_errs)[:-patience,
0].min()):
bad_counter += 1
if bad_counter > patience:
print('Early Stop!')
estop = True
break
print('Seen %d samples' % n_samples)
if estop:
break
except KeyboardInterrupt:
print("Training interupted")
end_time = time.time()
if best_p is not None:
zipp(best_p, tparams)
else:
best_p = unzip(tparams)
use_noise.set_value(0.)
kf_train_sorted = get_minibatches_idx(len(train[0]), batch_size)
train_err = pred_error(f_pred, prepare_data, train, kf_train_sorted)
valid_err = pred_error(f_pred, prepare_data, valid, kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
print('Train ', train_err, 'Valid ', valid_err, 'Test ', test_err)
if saveto:
numpy.savez(saveto, train_err=train_err,
valid_err=valid_err, test_err=test_err,
history_errs=history_errs, **best_p)
print('The code run for %d epochs, with %f sec/epochs' % (
(eidx + 1), (end_time - start_time) / (1. * (eidx + 1))))
print(('Training took %.1fs' %
(end_time - start_time)), file=sys.stderr)
return train_err, valid_err, test_err
if __name__ == '__main__':
# See function train for all possible parameter and there definition.
train_lstm(
max_epochs=100,
test_size=500,
)
| bitesandbytes/upgraded-system | src/vanilla_lstm.py | Python | mit | 22,530 |
import rinocloud
import requests
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from clint.textui.progress import Bar as ProgressBar
from clint.textui import progress
import json
def upload(filepath=None, meta=None):
encoder = MultipartEncoder(
fields={
'file': ('file', open(filepath, 'rb')),
'json': json.dumps(meta)
}
)
encoder_len = encoder.len
bar = ProgressBar(expected_size=encoder_len, filled_char='#')
def callback(monitor):
bar.show(monitor.bytes_read)
m = MultipartEncoderMonitor(encoder, callback)
headers = {
'Authorization': 'Token %s' % rinocloud.api_key,
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': m.content_type
}
try:
return requests.post(rinocloud.urls["upload"], data=m, headers=headers)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to specified domain %s" % rinocloud.api_domain)
def upload_meta(meta):
headers = {
'Authorization': 'Token %s' % rinocloud.api_key,
'X-Requested-With': 'XMLHttpRequest'
}
try:
return requests.post(rinocloud.urls["upload_meta"], json=meta, headers=headers)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to specified domain %s" % rinocloud.api_domain)
def create_folder(meta):
headers = {
'Authorization': 'Token %s' % rinocloud.api_key,
'X-Requested-With': 'XMLHttpRequest'
}
try:
return requests.post(rinocloud.urls["create_folder"], json=meta, headers=headers)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to specified domain %s" % rinocloud.api_domain)
def get_metadata(_id, truncate_metadata=True):
headers = {
'Authorization': 'Token %s' % rinocloud.api_key,
'X-Requested-With': 'XMLHttpRequest'
}
try:
return requests.post(rinocloud.urls["get_metadata"], json={'id': _id, 'truncate_metadata': truncate_metadata}, headers=headers)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to specified domain %s" % rinocloud.api_domain)
def download(_id, filepath, size):
headers = {
'Authorization': 'Token %s' % rinocloud.api_key,
'X-Requested-With': 'XMLHttpRequest'
}
try:
r = requests.get(rinocloud.urls["download"] + str(_id), stream=True, headers=headers)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to specified domain %s" % rinocloud.api_domain)
with open(filepath, 'wb') as f:
total_length = size
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length / 1024) + 1):
if chunk:
f.write(chunk)
f.flush()
return r
def query(query, sort=None, truncate_metadata=True, limit=20, offset=0):
headers = {
'Authorization': 'Token %s' % rinocloud.api_key,
'X-Requested-With': 'XMLHttpRequest'
}
payload = {
'query': query,
'truncate_metadata': truncate_metadata,
'limit': limit,
'offset': offset
}
if sort:
payload["sort"] = sort
try:
return requests.post(rinocloud.urls["query"], json=payload, headers=headers)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to specified domain %s" % rinocloud.api_domain)
def count(query):
headers = {
'Authorization': 'Token %s' % rinocloud.api_key,
'X-Requested-With': 'XMLHttpRequest'
}
try:
return requests.post(rinocloud.urls["count"], json={'query': query}, headers=headers)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to specified domain %s" % rinocloud.api_domain)
| rinocloud/rinocloud-python | rinocloud/http.py | Python | mit | 4,102 |
import os
import json
import pytest
from .. import bot, PACKAGEDIR
EXAMPLE_TWEET = json.load(open(os.path.join(PACKAGEDIR, 'tests', 'examples', 'example-tweet.json'), 'r'))
EXAMPLE_RETWEET = json.load(open(os.path.join(PACKAGEDIR, 'tests', 'examples', 'retweeted-status.json'), 'r'))
EXAMPLE_NARCISSISTIC = json.load(open(os.path.join(PACKAGEDIR, 'tests', 'examples', 'narcissistic-tweet.json'), 'r'))
TESTDB = 'test_goldstar.db'
def test_recipients():
handler = bot.TweetHandler(EXAMPLE_TWEET, dbfile=TESTDB, dry_run=True)
recipients = handler.get_recipients()
assert len(recipients) == 1
assert recipients[0]['screen_name'] == 'exoplaneteer'
def test_responses():
handler = bot.TweetHandler(EXAMPLE_TWEET, dbfile=TESTDB, dry_run=True)
responses = handler.handle()
assert len(responses) == 1 # only 1 star handed out
assert len(responses[0]) < 140 # max tweet length
assert responses[0] == '@exoplaneteer Congratulations, you just earned a 🌟 from @GeertHub! Your total is 1. https://twitter.com/GeertHub/status/745616020581265408'
def test_retweet():
"""A retweet should not result in a star!"""
with pytest.raises(bot.InvalidTweetException):
handler = bot.TweetHandler(EXAMPLE_RETWEET, dbfile=TESTDB, dry_run=True)
def test_narcisstic():
"""Don't allow people to give stars to themselves!"""
handler = bot.TweetHandler(EXAMPLE_NARCISSISTIC, dbfile=TESTDB, dry_run=True)
responses = handler.handle()
assert len(responses) == 1
assert responses[0] == "@exoplaneteer I'm sorry, Dan. I'm afraid I can't do that."
| mrtommyb/AstroFuckOffs | bot/tests/test_bot.py | Python | mit | 1,600 |
"""Defines the SMEFT class that provides the main API to smeftrunner."""
from . import rge
from . import io
from . import definitions
from . import beta
from . import smpar
import pylha
from collections import OrderedDict
from math import sqrt
import numpy as np
import ckmutil.phases, ckmutil.diag
class SMEFT(object):
"""Parameter point in the Standard Model Effective Field Theory."""
def __init__(self):
"""Initialize the SMEFT instance."""
self.C_in = None
self.scale_in = None
self.scale_high = None
def set_initial(self, C_in, scale_in, scale_high):
r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`, setting the new physics scale $\Lambda$ to
`scale_high`."""
self.C_in = C_in
self.scale_in = scale_in
self.scale_high = scale_high
def load_initial(self, streams):
"""Load the initial values for parameters and Wilson coefficients from
one or several files.
`streams` should be a tuple of file-like objects strings."""
d = {}
for stream in streams:
s = io.load(stream)
if 'BLOCK' not in s:
raise ValueError("No BLOCK found")
d.update(s['BLOCK'])
d = {'BLOCK': d}
C = io.wc_lha2dict(d)
sm = io.sm_lha2dict(d)
C.update(sm)
C = definitions.symmetrize(C)
self.C_in = C
def set_initial_wcxf(self, wc, scale_high=None, get_smpar=False):
"""Load the initial values for Wilson coefficients from a
wcxf.WC instance.
Parameters:
- `scale_high`: since Wilson coefficients are dimensionless in
smeftrunner but not in WCxf, the high scale in GeV has to be provided.
If this parameter is None (default), either a previously defined
value will be used, or the scale attribute of the WC instance will
be used.
- `get_smpar`: boolean, optional, defaults to True. If True, an attempt
is made to determine the SM parameters from the requirement of
reproducing the correct SM masses and mixings at the electroweak
scale. As approximations are involved, the result might or might not
be reliable, depending on the size of the Wilson coefficients
affecting the SM masses and mixings. If False, Standard Model
parameters have to be provided separately and are assumed to be in
the weak basis used for the Warsaw basis as defined in WCxf,
i.e. in the basis where the down-type and charged lepton mass
matrices are diagonal.
"""
import wcxf
if wc.eft != 'SMEFT':
raise ValueError("Wilson coefficients use wrong EFT.")
if wc.basis != 'Warsaw':
raise ValueError("Wilson coefficients use wrong basis.")
if scale_high is not None:
self.scale_high = scale_high
elif self.scale_high is None:
self.scale_high = wc.scale
C = wcxf.translators.smeft.wcxf2arrays(wc.dict)
keys_dim5 = ['llphiphi']
keys_dim6 = list(set(definitions.WC_keys_0f + definitions.WC_keys_2f + definitions.WC_keys_4f) - set(keys_dim5))
self.scale_in = wc.scale
for k in keys_dim5:
if k in C:
C[k] = C[k]*self.scale_high
for k in keys_dim6:
if k in C:
C[k] = C[k]*self.scale_high**2
C = definitions.symmetrize(C)
# fill in zeros for missing WCs
for k, s in definitions.C_keys_shape.items():
if k not in C and k not in definitions.SM_keys:
if s == 1:
C[k] = 0
else:
C[k] = np.zeros(s)
if self.C_in is None:
self.C_in = C
else:
self.C_in.update(C)
if get_smpar:
self.C_in.update(self._get_sm_scale_in())
def load_wcxf(self, stream, get_smpar=True):
"""Load the initial values for Wilson coefficients from
a file-like object or a string in WCxf format.
Note that Standard Model parameters have to be provided separately
and are assumed to be in the weak basis used for the Warsaw basis as
defined in WCxf, i.e. in the basis where the down-type and charged
lepton mass matrices are diagonal."""
import wcxf
wc = wcxf.WC.load(stream)
self.set_initial_wcxf(wc, get_smpar=get_smpar)
def dump(self, C_out, scale_out=None, stream=None, fmt='lha', skip_redundant=True):
"""Return a string representation of the parameters and Wilson
coefficients `C_out` in DSixTools output format. If `stream` is
specified, export it to a file. `fmt` defaults to `lha` (the SLHA-like
DSixTools format), but can also be `json` or `yaml` (see the
pylha documentation)."""
C = OrderedDict()
if scale_out is not None:
C['SCALES'] = {'values': [[1, self.scale_high], [2, scale_out]]}
else:
C['SCALES'] = {'values': [[1, self.scale_high]]}
sm = io.sm_dict2lha(C_out)['BLOCK']
C.update(sm)
wc = io.wc_dict2lha(C_out, skip_redundant=skip_redundant)['BLOCK']
C.update(wc)
return pylha.dump({'BLOCK': C}, fmt=fmt, stream=stream)
def get_wcxf(self, C_out, scale_out):
"""Return the Wilson coefficients `C_out` as a wcxf.WC instance.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
import wcxf
C = self.rotate_defaultbasis(C_out)
d = wcxf.translators.smeft.arrays2wcxf(C)
basis = wcxf.Basis['SMEFT', 'Warsaw']
d = {k: v for k, v in d.items() if k in basis.all_wcs and v != 0}
keys_dim5 = ['llphiphi']
keys_dim6 = list(set(definitions.WC_keys_0f + definitions.WC_keys_2f
+ definitions.WC_keys_4f) - set(keys_dim5))
for k in d:
if k.split('_')[0] in keys_dim5:
d[k] = d[k] / self.scale_high
for k in d:
if k.split('_')[0] in keys_dim6:
d[k] = d[k] / self.scale_high**2
d = wcxf.WC.dict2values(d)
wc = wcxf.WC('SMEFT', 'Warsaw', scale_out, d)
return wc
def dump_wcxf(self, C_out, scale_out, fmt='yaml', stream=None, **kwargs):
"""Return a string representation of the Wilson coefficients `C_out`
in WCxf format. If `stream` is specified, export it to a file.
`fmt` defaults to `yaml`, but can also be `json`.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
wc = self.get_wcxf(C_out, scale_out)
return wc.dump(fmt=fmt, stream=stream, **kwargs)
def rgevolve(self, scale_out, **kwargs):
"""Solve the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients at
`scale_out`. Additional keyword arguments will be passed to
the ODE solver `scipy.integrate.odeint`."""
self._check_initial()
return rge.smeft_evolve(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out,
**kwargs)
def rgevolve_leadinglog(self, scale_out):
"""Compute the leading logarithmix approximation to the solution
of the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients.
Much faster but less precise that `rgevolve`.
"""
self._check_initial()
return rge.smeft_evolve_leadinglog(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out)
def _check_initial(self):
"""Check if initial values and scale as well as the new physics scale
have been set."""
if self.C_in is None:
raise Exception("You have to specify the initial conditions first.")
if self.scale_in is None:
raise Exception("You have to specify the initial scale first.")
if self.scale_high is None:
raise Exception("You have to specify the high scale first.")
def rotate_defaultbasis(self, C):
"""Rotate all parameters to the basis where the running down-type quark
and charged lepton mass matrices are diagonal and where the running
up-type quark mass matrix has the form V.S, with V unitary and S real
diagonal, and where the CKM and PMNS matrices have the standard
phase convention."""
v = sqrt(2*C['m2'].real/C['Lambda'].real)
Mep = v/sqrt(2) * (C['Ge'] - C['ephi'] * v**2/self.scale_high**2/2)
Mup = v/sqrt(2) * (C['Gu'] - C['uphi'] * v**2/self.scale_high**2/2)
Mdp = v/sqrt(2) * (C['Gd'] - C['dphi'] * v**2/self.scale_high**2/2)
Mnup = -v**2 * C['llphiphi']
UeL, Me, UeR = ckmutil.diag.msvd(Mep)
UuL, Mu, UuR = ckmutil.diag.msvd(Mup)
UdL, Md, UdR = ckmutil.diag.msvd(Mdp)
Unu, Mnu = ckmutil.diag.mtakfac(Mnup)
UuL, UdL, UuR, UdR = ckmutil.phases.rephase_standard(UuL, UdL, UuR, UdR)
Unu, UeL, UeR = ckmutil.phases.rephase_pmns_standard(Unu, UeL, UeR)
return definitions.flavor_rotation(C, Uq=UdL, Uu=UuR, Ud=UdR, Ul=UeL, Ue=UeR)
def _run_sm_scale_in(self, C_out, scale_sm=91.1876):
"""Get the SM parameters at the EW scale, using an estimate `C_out`
of the Wilson coefficients at that scale, and run them to the
input scale."""
# initialize an empty SMEFT instance
smeft_sm = SMEFT()
C_in_sm = beta.C_array2dict(np.zeros(9999))
# set the SM parameters to the values obtained from smpar.smeftpar
C_SM = smpar.smeftpar(scale_sm, self.scale_high, C_out, basis='Warsaw')
C_SM = {k: v for k, v in C_SM.items() if k in definitions.SM_keys}
# set the Wilson coefficients at the EW scale to C_out
C_in_sm.update(C_out)
C_in_sm.update(C_SM)
smeft_sm.set_initial(C_in_sm, scale_sm, scale_high=self.scale_high)
# run up (with 1% relative precision, ignore running of Wilson coefficients)
C_SM_high = smeft_sm.rgevolve(self.scale_in, newphys=False, rtol=0.01, atol=1)
return {k: v for k, v in C_SM_high.items() if k in definitions.SM_keys}
def _get_sm_scale_in(self, scale_sm=91.1876):
"""Get an estimate of the SM parameters at the input scale by running
them from the EW scale using constant values for the Wilson coefficients
(corresponding to their leading log approximated values at the EW
scale).
Note that this is not guaranteed to work and will fail if some of the
Wilson coefficients (the ones affecting the extraction of SM parameters)
are large."""
# intialize a copy of ourselves
_smeft = SMEFT()
_smeft.set_initial(self.C_in, self.scale_in, self.scale_high)
# Step 1: run the SM up, using the WCs at scale_input as (constant) estimate
_smeft.C_in.update(self._run_sm_scale_in(self.C_in, scale_sm=scale_sm))
# Step 2: run the WCs down in LL approximation
C_out = _smeft.rgevolve_leadinglog(scale_sm)
# Step 3: run the SM up again, this time using the WCs at scale_sm as (constant) estimate
return self._run_sm_scale_in(C_out, scale_sm=scale_sm)
| DsixTools/python-smeftrunner | smeftrunner/classes.py | Python | mit | 11,842 |
from collections import namedtuple
import sublime
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
MenuOption = namedtuple("MenuOption", ["requires_action", "menu_text", "filename", "is_untracked"])
CLEAN_WORKING_DIR = "Nothing to commit, working directory clean."
ADD_ALL_UNSTAGED_FILES = " ? All unstaged files"
ADD_ALL_FILES = " + All files"
class GsQuickStageCommand(WindowCommand, GitCommand):
"""
Display a quick panel of unstaged files in the current git repository,
allowing the user to select one or more files for staging.
Display filenames with one of the following indicators:
* [M] modified
* [A] added
* [D] deleted
* [R] renamed/moved
* [C] copied
* [U] updated but unmerged
* [?] untracked
"""
def run(self):
sublime.set_timeout_async(self.run_async)
def run_async(self):
menu_options = self.get_menu_options()
menu_entries = [f.menu_text for f in menu_options]
def on_selection(id):
if id == -1:
return
selection = menu_options[id]
if not selection.requires_action:
return
elif selection.menu_text == ADD_ALL_UNSTAGED_FILES:
self.git("add", "--update", ".")
scope_of_action = "all unstaged files"
elif selection.menu_text == ADD_ALL_FILES:
self.git("add", "--all")
scope_of_action = "all files"
elif selection.is_untracked:
self.git("add", "--", selection.filename)
scope_of_action = "`{}`".format(selection.filename)
else:
self.git("add", "--update", "--", selection.filename)
scope_of_action = "`{}`".format(selection.filename)
sublime.status_message("Successfully added `{}`.".format(
scope_of_action))
sublime.set_timeout_async(self.run_async, 0)
self.window.show_quick_panel(
menu_entries,
on_selection,
flags=sublime.MONOSPACE_FONT
)
def get_menu_options(self):
"""
Determine the git status of the current working directory, and return
a list of menu options for each file that is shown.
"""
status_entries = self.get_status()
menu_options = []
for entry in status_entries:
if entry.working_status in ("M", "D", "?"):
filename = (entry.path if not entry.index_status == "R"
else entry.path + " <- " + entry.path_alt)
menu_text = "[{0}] {1}".format(entry.working_status, filename)
menu_options.append(MenuOption(True, menu_text, filename, entry.index_status == "?"))
if not menu_options:
return [MenuOption(False, CLEAN_WORKING_DIR, None, None)]
menu_options.append(MenuOption(True, ADD_ALL_UNSTAGED_FILES, None, None))
menu_options.append(MenuOption(True, ADD_ALL_FILES, None, None))
return menu_options
| ypersyntelykos/GitSavvy | core/commands/quick_stage.py | Python | mit | 3,133 |
# standard imports
import os
import logging
import traceback
# Qt imports
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QPlainTextEdit
# toolbox imports
from dltb.util.debug import edit
# GUI imports
from ..utils import protect
# logging
LOG = logging.getLogger(__name__)
class QLogHandler(QPlainTextEdit, logging.Handler):
"""A log handler that displays log messages in a QWidget.
A :py:class:`QLogHandler` can be used
"""
_message_signal = pyqtSignal(str)
def __init__(self, parent=None):
# FIXME[question]: can we use real python multiple inheritance here?
# (that is just super().__init__(*args, **kwargs))
QPlainTextEdit.__init__(self, parent)
logging.Handler.__init__(self)
self.setReadOnly(True)
self._records = []
self._counter = 1
self._message_signal.connect(self.appendMessage)
self._message_signal.emit("Log view initialized")
def __len__(self):
"""The number of lines in this QLogHandler.
"""
return self._counter
def clear(self):
"""Clear this :py:class:QLogHandler.
"""
super().clear()
self._records.clear()
self._counter = 1
self._message_signal.emit("Log view cleared")
@pyqtSlot(str)
def appendMessage(self, message: str):
message = message.replace(os.linesep, '\\n')
self.appendPlainText(message)
self.verticalScrollBar().setValue(self.verticalScrollBar().maximum())
def emit(self, record: logging.LogRecord) -> None:
"""Handle a :py:class:logging.logRecord.
"""
# Here we have to be careful: adding the text directly to the
# widget from another thread causes problems: The program
# crashes with the following message:
# QObject::connect: Cannot queue arguments of type 'QTextBlock'
# (Make sure 'QTextBlock' is registered using qRegisterMetaType().)
# Hence we are doing this via a signal now.
self._counter += 1
self.setToolTip(f"List of log records ({self._counter} entries)")
try:
self._records.append(record)
self._message_signal.emit(self.format(record))
except AttributeError as error:
# FIXME[bug/problem]
# When quitting the program while running some background
# thread (e.g. camera loop), we get the following exception:
# AttributeError: 'QLogHandler' does not have a signal with
# the signature _message_signal(QString)
#print(error)
#print(f" type of record: {type(record)}")
#print(f" record: {record}")
#print(f" signal: {self._message_signal}")
pass
@protect
def mouseReleaseEvent(self, event):
cursor = self.cursorForPosition(event.pos())
block = cursor.blockNumber()
print(block, len(self._records))
if block < len(self._records):
print(self._records[block])
record = self._records[block]
LOG.info(f"Trying to open file {record.pathname}, "
f"line {record.lineno}, in an external editor.")
try:
retcode = edit(record.pathname, record.lineno)
if retcode < 0:
LOG.error("Edit command was terminated by signal "
f"{-retcode}")
else:
LOG.info(f"Edit command returned: {retcode}")
except OSError as error:
LOG.error(f"Edit command failed: {error}")
class QExceptionView(QPlainTextEdit):
"""A view for Python exceptions. This is basically a text field in
which a :py:class:`BaseException` can be displayed, including its
stack trace.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.setReadOnly(True)
self._exception = None
self._traceback = None
def setException(self, exception: BaseException) -> None:
"""Set the :py:class:`BaseException` to be displayed in this
:py:class:`QExceptionView`
"""
self._exception = exception
self._traceback = traceback.extract_tb(exception.__traceback__)
# _traceback is basicall a list of traceback.FrameSummary,
# each providing the following attributes:
# - filename
# - line
# - lineno
# - locals
# - name
self.clear()
for m in traceback.format_list(self._traceback):
self.appendPlainText(m)
self.verticalScrollBar().setValue(self.verticalScrollBar().maximum())
@protect
def mouseReleaseEvent(self, event) -> None:
"""Handle a mouse release event. When pressed on a frame in the stack
trace, open the correspoding code line in an external editor.
"""
cursor = self.cursorForPosition(event.pos())
frame_number = cursor.blockNumber() // 2
if self._traceback is not None and frame_number < len(self._traceback):
self.editFrame(self._traceback[frame_number])
def editFrame(self, frame: traceback.FrameSummary):
"""Edit the the code file described by the given stack frame in an
external editor.
"""
LOG.info(f"Trying to open file {frame.filename}, "
f"line {frame.lineno}, in an external editor.")
try:
retcode = edit(frame.filename, frame.lineno)
if retcode < 0:
LOG.error("Edit command was terminated by signal "
f"{-retcode}")
else:
LOG.info(f"Edit command returned: {retcode}"
f"({'error' if retcode else 'success'})")
except OSError as error:
LOG.error(f"Edit command failed: {error}")
| Petr-By/qtpyvis | qtgui/widgets/logging.py | Python | mit | 5,929 |
from __future__ import unicode_literals
import django
from django.core.exceptions import FieldError
from django.test import SimpleTestCase, TestCase
from .models import (
AdvancedUserStat, Child1, Child2, Child3, Child4, Image, LinkedList,
Parent1, Parent2, Product, StatDetails, User, UserProfile, UserStat,
UserStatResult,
)
class ReverseSelectRelatedTestCase(TestCase):
def setUp(self):
user = User.objects.create(username="test")
UserProfile.objects.create(user=user, state="KS", city="Lawrence")
results = UserStatResult.objects.create(results='first results')
userstat = UserStat.objects.create(user=user, posts=150,
results=results)
StatDetails.objects.create(base_stats=userstat, comments=259)
user2 = User.objects.create(username="bob")
results2 = UserStatResult.objects.create(results='moar results')
advstat = AdvancedUserStat.objects.create(user=user2, posts=200, karma=5,
results=results2)
StatDetails.objects.create(base_stats=advstat, comments=250)
p1 = Parent1(name1="Only Parent1")
p1.save()
c1 = Child1(name1="Child1 Parent1", name2="Child1 Parent2", value=1)
c1.save()
p2 = Parent2(name2="Child2 Parent2")
p2.save()
c2 = Child2(name1="Child2 Parent1", parent2=p2, value=2)
c2.save()
def test_basic(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
def test_follow_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results").get(username="test")
self.assertEqual(u.userstat.posts, 150)
self.assertEqual(u.userstat.results.results, 'first results')
def test_follow_two(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile", "userstat").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
self.assertEqual(u.userstat.posts, 150)
def test_follow_two_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results", "userstat__statdetails").get(username="test")
self.assertEqual(u.userstat.results.results, 'first results')
self.assertEqual(u.userstat.statdetails.comments, 259)
def test_forward_and_back(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related("user__userprofile").get(user__username="test")
self.assertEqual(stat.user.userprofile.state, 'KS')
self.assertEqual(stat.user.userstat.posts, 150)
def test_back_and_forward(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat").get(username="test")
self.assertEqual(u.userstat.user.username, 'test')
def test_not_followed_by_default(self):
with self.assertNumQueries(2):
u = User.objects.select_related().get(username="test")
self.assertEqual(u.userstat.posts, 150)
def test_follow_from_child_class(self):
with self.assertNumQueries(1):
stat = AdvancedUserStat.objects.select_related('user', 'statdetails').get(posts=200)
self.assertEqual(stat.statdetails.comments, 250)
self.assertEqual(stat.user.username, 'bob')
def test_follow_inheritance(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related('user', 'advanceduserstat').get(posts=200)
self.assertEqual(stat.advanceduserstat.posts, 200)
self.assertEqual(stat.user.username, 'bob')
with self.assertNumQueries(1):
self.assertEqual(stat.advanceduserstat.user.username, 'bob')
def test_nullable_relation(self):
im = Image.objects.create(name="imag1")
p1 = Product.objects.create(name="Django Plushie", image=im)
p2 = Product.objects.create(name="Talking Django Plushie")
with self.assertNumQueries(1):
result = sorted(Product.objects.select_related("image"), key=lambda x: x.name)
self.assertEqual([p.name for p in result], ["Django Plushie", "Talking Django Plushie"])
self.assertEqual(p1.image, im)
# Check for ticket #13839
self.assertIsNone(p2.image)
def test_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 1-1 relation.
"""
with self.assertNumQueries(1):
user = User.objects.select_related('userprofile').get(username='bob')
with self.assertRaises(UserProfile.DoesNotExist):
user.userprofile
def test_nullable_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 0-1 relation.
"""
Image.objects.create(name="imag1")
with self.assertNumQueries(1):
image = Image.objects.select_related('product').get()
with self.assertRaises(Product.DoesNotExist):
image.product
def test_parent_only(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Only Parent1")
with self.assertNumQueries(0):
with self.assertRaises(Child1.DoesNotExist):
p.child1
def test_multiple_subclass(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
def test_onetoone_with_subclass(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2').get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
def test_onetoone_with_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
with self.assertRaises(Child3.DoesNotExist):
p.child2.child3
p3 = Parent2(name2="Child3 Parent2")
p3.save()
c2 = Child3(name1="Child3 Parent1", parent2=p3, value=2, value3=3)
c2.save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child3 Parent2")
self.assertEqual(p.child2.name1, 'Child3 Parent1')
self.assertEqual(p.child2.child3.value3, 3)
self.assertEqual(p.child2.child3.value, p.child2.value)
self.assertEqual(p.child2.name1, p.child2.child3.name1)
def test_multiinheritance_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1', 'child1__child4').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
self.assertEqual(p.child1.name1, p.name1)
with self.assertRaises(Child4.DoesNotExist):
p.child1.child4
Child4(name1='n1', name2='n2', value=1, value4=4).save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child1', 'child1__child4').get(name2="n2")
self.assertEqual(p.name2, 'n2')
self.assertEqual(p.child1.name1, 'n1')
self.assertEqual(p.child1.name2, p.name2)
self.assertEqual(p.child1.value, 1)
self.assertEqual(p.child1.child4.name1, p.child1.name1)
self.assertEqual(p.child1.child4.name2, p.child1.name2)
self.assertEqual(p.child1.child4.value, p.child1.value)
self.assertEqual(p.child1.child4.value4, 4)
def test_inheritance_deferred(self):
if django.VERSION < (1, 10, 0):
self.skipTest('does not work on older version of Django')
c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4)
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
self.assertEqual(p.id2, c.id2)
self.assertEqual(p.child1.value, 1)
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.name2, 'n2')
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name2, 'n2')
def test_inheritance_deferred2(self):
if django.VERSION < (1, 10, 0):
self.skipTest('does not work on older version of Django')
c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4)
qs = Parent2.objects.select_related('child1', 'child1__child4').only(
'id2', 'child1__value', 'child1__child4__value4')
with self.assertNumQueries(1):
p = qs.get(name2="n2")
self.assertEqual(p.id2, c.id2)
self.assertEqual(p.child1.value, 1)
self.assertEqual(p.child1.child4.value4, 4)
self.assertEqual(p.child1.child4.id2, c.id2)
p = qs.get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name2, 'n2')
p = qs.get(name2="n2")
with self.assertNumQueries(0):
self.assertEqual(p.child1.name1, 'n1')
self.assertEqual(p.child1.child4.name1, 'n1')
def test_self_relation(self):
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older version of Django")
item1 = LinkedList.objects.create(name='item1')
LinkedList.objects.create(name='item2', previous_item=item1)
with self.assertNumQueries(1):
item1_db = LinkedList.objects.select_related('next_item').get(name='item1')
self.assertEqual(item1_db.next_item.name, 'item2')
class ReverseSelectRelatedValidationTests(SimpleTestCase):
"""
Rverse related fields should be listed in the validation message when an
invalid field is given in select_related().
"""
non_relational_error = "Non-relational field given in select_related: '%s'. Choices are: %s"
invalid_error = "Invalid field name(s) given in select_related: '%s'. Choices are: %s"
def test_reverse_related_validation(self):
fields = 'userprofile, userstat'
with self.assertRaisesMessage(FieldError, self.invalid_error % ('foobar', fields)):
list(User.objects.select_related('foobar'))
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('username', fields)):
list(User.objects.select_related('username'))
| denisenkom/django-sqlserver | tests/select_related_onetoone/tests.py | Python | mit | 11,118 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsLocationsOperations(object):
"""ExpressRoutePortsLocationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortsLocationListResult"]
"""Retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each
location. Available bandwidths can only be obtained when retrieving a specific peering
location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortsLocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.ExpressRoutePortsLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortsLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations'} # type: ignore
def get(
self,
location_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePortsLocation"
"""Retrieves a single ExpressRoutePort peering location, including the list of available
bandwidths available at said peering location.
:param location_name: Name of the requested ExpressRoutePort peering location.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePortsLocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.ExpressRoutePortsLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePortsLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_express_route_ports_locations_operations.py | Python | mit | 7,987 |
#!/usr/bin/env python
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from simple_convnet import SimpleConvNet
from common.trainer import Trainer
# データの読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
# 処理に時間のかかる場合はデータを削減
# x_train, t_train = x_train[:5000], t_train[:5000]
# x_test, t_test = x_test[:1000], t_test[:1000]
max_epochs = 20
network = SimpleConvNet(input_dim=(1, 28, 28),
conv_param={'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=max_epochs, mini_batch_size=100,
optimizer='Adam', optimizer_param={'lr': 0.001},
evaluate_sample_num_per_epoch=1000)
trainer.train()
# パラメータの保存
network.save_params(os.path.dirname(os.path.abspath(__file__)) + "/params.pkl")
print("Saved Network Parameters!")
# グラフの描画
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
plt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
| kgsn1763/deep-learning-from-scratch | ch07/train_convnet.py | Python | mit | 1,553 |
from copy import deepcopy
import settings
from twitch.player_manager import PlayerManager
class QuestPlayerManager(PlayerManager):
"""
Functions like add_gold perform a raw store action and then save. __add_gold is the raw store action in this case.
Properties of raw store actions:
- Call username.lower()
- Touch self.players with that name
- Do not save to file
Properties of store actions:
- Do nothing other than call a raw action and then save
Some actions can also take a list of elements. These are all of the form:
def foo(username **kwargs):
if not (isinstance(username), str):
for user in username:
foo(username, **kwargs)
else:
ORIGINAL FUNCTION BODY
Note that both store actions and raw store actions qualify for this.
"""
default_player = deepcopy(PlayerManager.default_player)
default_player.update({
'exp': 0,
'prestige': 0,
'gold': 0,
'items': {}
})
def __add_gold(self, username, gold, prestige_benefits=True):
"""
Gives gold to the specified player.
:param username: str - The player who you are modifying
:param gold: float - How much gold to give that player
:param prestige_benefits: bool - Whether this gold increase is affected by prestige bonuses
"""
# Don't magnify negative amounts of gold
if prestige_benefits and gold > 0:
gold *= 1 + self.players[username]['prestige'] * settings.PRESTIGE_GOLD_AMP
self.players[username]['gold'] += gold
if self.players[username]['gold'] < 0:
self.players[username]['gold'] = 0
def add_gold(self, username, gold, prestige_benefits=True):
"""
Gives gold to the specified player.
:param username: str - The player who you are modifying
:param gold: float - How much gold to give that player
:param prestige_benefits: bool - Whether this gold increase is affected by prestige bonuses
"""
self.__add_gold(username, gold, prestige_benefits=prestige_benefits)
self.save_player(username)
def __add_exp(self, username, exp):
"""
Gives exp to the specified player.
:param username: str - The player who you are modifying
:param exp: float - How much exp to give that player
"""
self.players[username]['exp'] += exp
def add_exp(self, username, exp):
"""
Gives exp to the specified player.
:param username: str - The player who you are modifying
:param exp: float - How much exp to give that player
"""
self.__add_exp(username, exp)
self.save_player(username)
def __add_item(self, username, item):
"""
Item to give to the specified player.
:param username: str - The player who you are modifying
:param item: str or list<str> - The name of the item(s) we are giving to the player
"""
if not isinstance(item, str):
# We must be a list of items
for single_item in item:
self.__add_item(username, single_item)
else:
if item not in self.players[username]['items']:
self.players[username]['items'][item] = 1
else:
self.players[username]['items'][item] += 1
def add_item(self, username, item):
"""
Item to give to the specified player.
:param username: str - The player who you are modifying
:param item: str or list<str> - The name of the item(s) we are giving to the player
"""
self.__add_item(username, item)
self.save_player(username)
def __remove_item(self, username, item):
"""
Item to take from the specified player.
:param username: str - The player who you are modifying
:param item: str or list<str> - The name of the item(s) we are giving to the player
"""
if not isinstance(item, str):
# We must be a list of items
for single_item in item:
self.__remove_item(username, single_item)
else:
# If we don't have the item, do nothing
if item in self.players[username]['items']:
self.players[username]['items'][item] -= 1
if self.players[username]['items'][item] <= 0:
del self.players[username]['items'][item]
def remove_item(self, username, item):
"""
Item to take from the specified player.
:param username: str - The player who you are modifying
:param item: str or list<str> - The name of the item(s) we are giving to the player
"""
self.__remove_item(username, item)
self.save_player(username)
def __reward(self, username, gold=0, exp=0, item=None, prestige_benefits=True):
"""
Gives gold and exp to the specified player.
:param username: str - The player who you are modifying
:param gold: float - How much gold to give that player
:param exp: float - How much exp to give that player
"""
if not isinstance(username, str):
# We must be a list of users
for user in username:
self.__reward(user, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
else:
self.__add_gold(username, gold, prestige_benefits=prestige_benefits)
self.__add_exp(username, exp)
if item:
self.__add_item(username, item)
def reward(self, username, gold=0, exp=0, item=None, prestige_benefits=True):
"""
Gives gold and exp to the specified player(s).
:param username: str or list<str> - The player(s) who you are modifying
:param gold: float - How much gold to give that player
:param exp: float - How much exp to give that player
"""
if not isinstance(username, str):
# We must be a list of users
for user in username:
self.reward(user, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
else:
self.__reward(username, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
self.save_player(username)
def __penalize(self, username, gold=0, exp=0, item=None, prestige_benefits=True):
"""
Gives gold and exp to the specified player(s).
:param username: str or list<str> - The player(s) who you are modifying
:param gold: float - How much gold to give that player
:param exp: float - How much exp to give that player
"""
if not isinstance(username, str):
# We must be a list of users
for user in username:
self.__penalize(user, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
else:
self.__reward(username, gold=-gold, exp=-exp, item=None, prestige_benefits=prestige_benefits)
if item:
self.__remove_item(username, item)
def penalize(self, username, gold=0, exp=0, item=None, prestige_benefits=True):
"""
Gives gold and exp to the specified player(s).
:param username: str or list<str> - The player(s) who you are modifying
:param gold: float - How much gold to give that player
:param exp: float - How much exp to give that player
"""
if not isinstance(username, str):
# We must be a list of users
for user in username:
self.penalize(user, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
else:
self.__penalize(username, gold=gold, exp=exp, item=item, prestige_benefits=prestige_benefits)
self.save_player(username)
def get_gold(self, username):
"""
Gets how much gold a given player has.
:param username: str - The player who you are modifying
"""
return self.players[username]['gold']
def get_exp(self, username):
"""
Gets how much exp a given player has.
:param username: str - The player who you are modifying
"""
return self.players[username]['exp']
@staticmethod
def exp_to_level(exp):
# The value for every member of the list is the minimum experience to be a given level
for level, exp_req in enumerate(settings.EXP_LEVELS, start=-1):
if exp < exp_req:
return level
return settings.LEVEL_CAP
def get_level(self, username):
"""
Gets what level a given player is.
:param username: str - The player who you are modifying
"""
exp = self.players[username]['exp']
return self.exp_to_level(exp)
def get_prestige(self, username):
"""
Gets what prestige level a given player is.
:param username: str - The player who you are modifying
"""
return self.players[username]['prestige']
def get_items(self, username):
"""
Gets the items of a given player.
:param username: str - The player who you are modifying
"""
return self.players[username]['items']
def prestige(self, username):
"""
Prestige advances a player.
:param username: str - The player who you are modifying
:return: bool - True if successfully prestiged, False if no change
"""
if self.players[username]['exp'] >= settings.EXP_LEVELS[settings.LEVEL_CAP] and (
self.players[username]['gold'] >= settings.PRESTIGE_COST):
self.players[username]['exp'] -= settings.EXP_LEVELS[settings.LEVEL_CAP]
self.players[username]['gold'] -= settings.PRESTIGE_COST
self.players[username]['prestige'] += 1
self.save_player(username)
return True
else:
return False
@staticmethod
def list_items(items):
msg = ''
for item, quantity in items.items():
if quantity <= 0:
continue
if quantity == 1:
msg += '{}, '.format(item)
else:
msg += '{} ({}), '.format(item, quantity)
msg = msg.rstrip(', ')
return msg
def whisper_stats(self, username):
"""
Whispers a player their relevant stats.
:param username: str - The player who is requesting stat information
"""
player = self.players[username]
msg = '{}Level: {} ({} Exp), Gold: {}{}'.format(
'Prestige: {}, '.format(player['prestige']) if player['prestige'] else '',
self.get_level(username), round(player['exp'], 1), round(player['gold'], 1),
', Items: {}'.format(self.list_items(player['items'])) if player['items'] else '')
self.bot.send_whisper(username, msg)
def save_player(self, username):
"""
Saves a specific player's data to persistent storage. Deletes items with quantity 0 or less.
:param username: str - The player whose data you want to save
"""
# Remove duplicate items. Doesn't use a dict comprehension because items is a custom dict type
remove_items = []
for item, quantity in self.players[username]['items'].items():
if quantity <= 0:
remove_items.append(item)
for remove_item in remove_items:
del self.players[username]['items'][remove_item]
super().save_player(username)
| Xelaadryth/Xelabot | quest_bot/quest_player_manager.py | Python | mit | 11,668 |
import pyactiveresource.connection
from pyactiveresource.activeresource import ActiveResource, ResourceMeta, formats
import shopify.yamlobjects
import shopify.mixins as mixins
import shopify
import threading
import sys
from six.moves import urllib
import six
from shopify.collection import PaginatedCollection
from pyactiveresource.collection import Collection
# Store the response from the last request in the connection object
class ShopifyConnection(pyactiveresource.connection.Connection):
response = None
def __init__(self, site, user=None, password=None, timeout=None, format=formats.JSONFormat):
super(ShopifyConnection, self).__init__(site, user, password, timeout, format)
def _open(self, *args, **kwargs):
self.response = None
try:
self.response = super(ShopifyConnection, self)._open(*args, **kwargs)
except pyactiveresource.connection.ConnectionError as err:
self.response = err.response
raise
return self.response
# Inherit from pyactiveresource's metaclass in order to use ShopifyConnection
class ShopifyResourceMeta(ResourceMeta):
@property
def connection(cls):
"""HTTP connection for the current thread"""
local = cls._threadlocal
if not getattr(local, "connection", None):
# Make sure these variables are no longer affected by other threads.
local.user = cls.user
local.password = cls.password
local.site = cls.site
local.timeout = cls.timeout
local.headers = cls.headers
local.format = cls.format
local.version = cls.version
local.url = cls.url
if cls.site is None:
raise ValueError("No shopify session is active")
local.connection = ShopifyConnection(cls.site, cls.user, cls.password, cls.timeout, cls.format)
return local.connection
def get_user(cls):
return getattr(cls._threadlocal, "user", ShopifyResource._user)
def set_user(cls, value):
cls._threadlocal.connection = None
ShopifyResource._user = cls._threadlocal.user = value
user = property(get_user, set_user, None, "The username for HTTP Basic Auth.")
def get_password(cls):
return getattr(cls._threadlocal, "password", ShopifyResource._password)
def set_password(cls, value):
cls._threadlocal.connection = None
ShopifyResource._password = cls._threadlocal.password = value
password = property(get_password, set_password, None, "The password for HTTP Basic Auth.")
def get_site(cls):
return getattr(cls._threadlocal, "site", ShopifyResource._site)
def set_site(cls, value):
cls._threadlocal.connection = None
ShopifyResource._site = cls._threadlocal.site = value
if value is not None:
parts = urllib.parse.urlparse(value)
host = parts.hostname
if parts.port:
host += ":" + str(parts.port)
new_site = urllib.parse.urlunparse((parts.scheme, host, parts.path, "", "", ""))
ShopifyResource._site = cls._threadlocal.site = new_site
if parts.username:
cls.user = urllib.parse.unquote(parts.username)
if parts.password:
cls.password = urllib.parse.unquote(parts.password)
site = property(get_site, set_site, None, "The base REST site to connect to.")
def get_timeout(cls):
return getattr(cls._threadlocal, "timeout", ShopifyResource._timeout)
def set_timeout(cls, value):
cls._threadlocal.connection = None
ShopifyResource._timeout = cls._threadlocal.timeout = value
timeout = property(get_timeout, set_timeout, None, "Socket timeout for HTTP requests")
def get_headers(cls):
if not hasattr(cls._threadlocal, "headers"):
cls._threadlocal.headers = ShopifyResource._headers.copy()
return cls._threadlocal.headers
def set_headers(cls, value):
cls._threadlocal.headers = value
headers = property(get_headers, set_headers, None, "The headers sent with HTTP requests")
def get_format(cls):
return getattr(cls._threadlocal, "format", ShopifyResource._format)
def set_format(cls, value):
cls._threadlocal.connection = None
ShopifyResource._format = cls._threadlocal.format = value
format = property(get_format, set_format, None, "Encoding used for request and responses")
def get_prefix_source(cls):
"""Return the prefix source, by default derived from site."""
try:
return cls.override_prefix()
except AttributeError:
if hasattr(cls, "_prefix_source"):
return cls.site + cls._prefix_source
else:
return cls.site
def set_prefix_source(cls, value):
"""Set the prefix source, which will be rendered into the prefix."""
cls._prefix_source = value
prefix_source = property(get_prefix_source, set_prefix_source, None, "prefix for lookups for this type of object.")
def get_version(cls):
if hasattr(cls._threadlocal, "version") or ShopifyResource._version:
return getattr(cls._threadlocal, "version", ShopifyResource._version)
elif ShopifyResource._site is not None:
return ShopifyResource._site.split("/")[-1]
def set_version(cls, value):
ShopifyResource._version = cls._threadlocal.version = value
version = property(get_version, set_version, None, "Shopify Api Version")
def get_url(cls):
return getattr(cls._threadlocal, "url", ShopifyResource._url)
def set_url(cls, value):
ShopifyResource._url = cls._threadlocal.url = value
url = property(get_url, set_url, None, "Base URL including protocol and shopify domain")
@six.add_metaclass(ShopifyResourceMeta)
class ShopifyResource(ActiveResource, mixins.Countable):
_format = formats.JSONFormat
_threadlocal = threading.local()
_headers = {"User-Agent": "ShopifyPythonAPI/%s Python/%s" % (shopify.VERSION, sys.version.split(" ", 1)[0])}
_version = None
_url = None
def __init__(self, attributes=None, prefix_options=None):
if attributes is not None and prefix_options is None:
prefix_options, attributes = self.__class__._split_options(attributes)
return super(ShopifyResource, self).__init__(attributes, prefix_options)
def is_new(self):
return not self.id
def _load_attributes_from_response(self, response):
if response.body.strip():
self._update(self.__class__.format.decode(response.body))
@classmethod
def activate_session(cls, session):
cls.site = session.site
cls.url = session.url
cls.user = None
cls.password = None
cls.version = session.api_version.name
cls.headers["X-Shopify-Access-Token"] = session.token
@classmethod
def clear_session(cls):
cls.site = None
cls.url = None
cls.user = None
cls.password = None
cls.version = None
cls.headers.pop("X-Shopify-Access-Token", None)
@classmethod
def find(cls, id_=None, from_=None, **kwargs):
"""Checks the resulting collection for pagination metadata."""
collection = super(ShopifyResource, cls).find(id_=id_, from_=from_, **kwargs)
if isinstance(collection, Collection) and "headers" in collection.metadata:
return PaginatedCollection(collection, metadata={"resource_class": cls}, **kwargs)
return collection
| Shopify/shopify_python_api | shopify/base.py | Python | mit | 7,625 |
from django.conf import settings
from images.models import S3Connection
from shutil import copyfileobj
import tinys3
import os
import urllib
class LocalStorage(object):
def __init__(self, filename):
self.filename = filename
def get_file_data(self):
"""
Returns the raw data for the specified file
"""
image_path = os.path.join(settings.MEDIA_ROOT, self.filename)
# TODO: do you need to close this?
data = open(image_path, 'r').read()
return data
def get_remote_path(self):
"""
Builds a relative remote path by combining the MEDIA_URL setting and the filename
"""
return '%s%s' % (settings.MEDIA_URL, self.filename)
def store(self, file_instance, content_type=None):
"""
Copy over the `file_instance` to the local storage
"""
image_path = os.path.join(settings.MEDIA_ROOT, self.filename)
with open(image_path, 'w') as fw:
copyfileobj(file_instance, fw)
@staticmethod
def create_argument_slug(arguments_dict):
"""
Converts an arguments dictionary into a string that can be stored in a filename
"""
# TODO: is there a possible bug if an invalid key/value is presented?
args_list = ['%s-%s' % (key, value) for key, value in arguments_dict.items()]
return '--'.join(args_list)
class S3Storage(LocalStorage):
def __init__(self, *args, **kwargs):
"""
Overrides the LocalStorage and initializes a shared S3 connection
"""
super(S3Storage, self).__init__(*args, **kwargs)
self.conn = tinys3.Connection(self.S3_ACCESS_KEY, self.S3_SECRET_KEY, default_bucket=self.S3_BUCKET, tls=True)
def get_remote_path(self):
"""
Returns an absolute remote path for the filename from the S3 bucket
"""
return 'https://%s.%s/%s' % (self.conn.default_bucket, self.conn.endpoint, self.filename)
def get_file_data(self):
"""
Returns the raw data for the specific file, downloading it from S3
"""
path = self.get_remote_path()
data = urllib.urlopen(path).read()
return data
def store(self, file_instance, content_type=None):
"""
Copy over the `file_instance` from memory to S3
"""
self.conn.upload(self.filename, file_instance, content_type=content_type)
@property
def S3_BUCKET(self):
"""
Returns the S3_BUCKET. Checks local environment variables first, database-stored settings second
"""
return os.environ.get('S3_BUCKET', self.database_settings.bucket)
@property
def S3_ACCESS_KEY(self):
"""
Returns the S3_ACCESS_KEY. Checks local environment variables first, database-stored settings second
"""
return os.environ.get('S3_ACCESS_KEY', self.database_settings.access_key)
@property
def S3_SECRET_KEY(self):
"""
Returns the S3_SECRET_KEY. Checks local environment variables first, database-stored settings second
"""
return os.environ.get('S3_SECRET_KEY', self.database_settings.secret_key)
@property
def database_settings(self):
"""
Pulls an S3Connection instance, which contains S3 connection settings, from the databas. Result is cached locally
"""
if not getattr(self, '__database_settings', None):
self.__database_settings = S3Connection.objects.get()
return self.__database_settings
| sokanu/frame | images/storage.py | Python | mit | 3,547 |
# -*- coding: utf-8 -*-
#
# powerschool_apps documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'powerschool_apps'
copyright = """2017, Iron County School District"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'powerschool_appsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'powerschool_apps.tex',
'powerschool_apps Documentation',
"""Iron County School District""", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'powerschool_apps', 'powerschool_apps Documentation',
["""Iron County School District"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'powerschool_apps', 'powerschool_apps Documentation',
"""Iron County School District""", 'powerschool_apps',
"""PowerSchool customizations written in Django""", 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| IronCountySchoolDistrict/powerschool_apps | docs/conf.py | Python | mit | 8,001 |
#!/usr/bin/env python
import atexit
import argparse
import getpass
import sys
import textwrap
import time
from pyVim import connect
from pyVmomi import vim
import requests
requests.packages.urllib3.disable_warnings()
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
def get_args():
parser = argparse.ArgumentParser()
# because -h is reserved for 'help' we use -s for service
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSphere service to connect to')
# because we want -p for password, we use -o for port
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-n', '--name',
required=True,
action='store',
help='Name of the virtual_machine to look for.')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
return args
def _create_char_spinner():
"""Creates a generator yielding a char based spinner.
"""
while True:
for c in '|/-\\':
yield c
_spinner = _create_char_spinner()
def spinner(label=''):
"""Prints label with a spinner.
When called repeatedly from inside a loop this prints
a one line CLI spinner.
"""
sys.stdout.write("\r\t%s %s" % (label, _spinner.next()))
sys.stdout.flush()
def answer_vm_question(virtual_machine):
print "\n"
choices = virtual_machine.runtime.question.choice.choiceInfo
default_option = None
if virtual_machine.runtime.question.choice.defaultIndex is not None:
ii = virtual_machine.runtime.question.choice.defaultIndex
default_option = choices[ii]
choice = None
while choice not in [o.key for o in choices]:
print "VM power on is paused by this question:\n\n"
print "\n".join(textwrap.wrap(
virtual_machine.runtime.question.text, 60))
for option in choices:
print "\t %s: %s " % (option.key, option.label)
if default_option is not None:
print "default (%s): %s\n" % (default_option.label,
default_option.key)
choice = raw_input("\nchoice number: ").strip()
print "..."
return choice
# form a connection...
args = get_args()
si = connect.SmartConnect(host=args.host, user=args.user, pwd=args.password,
port=args.port)
# doing this means you don't need to remember to disconnect your script/objects
atexit.register(connect.Disconnect, si)
# search the whole inventory tree recursively... a brutish but effective tactic
vm = None
entity_stack = si.content.rootFolder.childEntity
while entity_stack:
entity = entity_stack.pop()
if entity.name == args.name:
vm = entity
del entity_stack[0:len(entity_stack)]
elif hasattr(entity, 'childEntity'):
entity_stack.extend(entity.childEntity)
elif isinstance(entity, vim.Datacenter):
entity_stack.append(entity.vmFolder)
if not isinstance(vm, vim.VirtualMachine):
print "could not find a virtual machine with the name %s" % args.name
sys.exit(-1)
print "Found VirtualMachine: %s Name: %s" % (vm, vm.name)
if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
# using time.sleep we just wait until the power off action
# is complete. Nothing fancy here.
print "reset the vm"
task = vm.ResetVM_Task()
while task.info.state not in [vim.TaskInfo.State.success,
vim.TaskInfo.State.error]:
time.sleep(1)
print "resetting vm ..."
sys.exit(0) | kenelite/vmapi | 005reset.py | Python | mit | 4,622 |
__author__ = 'Akshay'
"""
File contains code to Mine reviews and stars from a state reviews.
This is just an additional POC that we had done on YELP for visualising number of 5 star reviews per state on a map.
For each business per state, 5 reviews are taken and the count of the review is kept in the dictionary for each state.
Use the resulting json to plot it onto the map.
For the actual map visualisation, please refer State Review Nightlife POC.
Since only 5 business reviews were taken per state, this still needs work.
"""
##############################################
from __future__ import division
import sys
reload(sys)
import json
import datetime
sys.setdefaultencoding('utf8')
state_5_star_dict = {}
state_4_star_dict = {}
state_3_star_dict = {}
state_2_star_dict = {}
state_1_star_dict = {}
state_business = {}
def create_set_for_business_with_cat(category):
business_count = 0
with open('Data\yelp_academic_dataset_business.json') as fp:
for line in fp.readlines():
temp = json.loads(line, encoding='utf-8')
categories = str(temp["categories"])
state = str(temp["state"])
if state == "ON" or state == "ELN" or state == "EDH" or state == "MLN" or state == "NTH" or state == "FIF":
continue
if state not in state_business:
state_business[state] = 0
if len(state_business.keys()) == 50:
break
if category in categories:
print state
business_id = str(temp["business_id"])
city = str(temp["city"])
name = str(temp["name"])
create_yelp_set(business_id, state, city, name)
print "set prepared."
def create_yelp_set(business_id, state, city, name):
file_write = open('Data\state_stars_date_business.txt', mode='a')
if state_business[state] == 5:
print state, " is already completed."
return
with open('Data\yelp_academic_dataset_review.json') as fp:
for line in fp.readlines():
temp = json.loads(line, encoding='utf-8')
if str(temp["business_id"]) == business_id:
state_business[state] += 1
star = str(temp["stars"])
date = str(temp["date"])
date_tm = datetime.datetime.strptime(date, "%Y-%m-%d").date()
file_write.write(business_id)
file_write.write('\t')
file_write.write(state)
file_write.write('\t')
file_write.write(star)
file_write.write('\t')
file_write.write(city)
file_write.write('\t')
file_write.write(name)
file_write.write('\t')
file_write.write(str(date_tm))
file_write.write('\n')
if state_business[state] == 5:
break
for key, value in state_5_star_dict.iteritems():
print key, value
file_write.close()
print "Done."
def state_review_trends():
count = 0
with open('Data\state_stars_date_business.txt') as fp:
for line in fp.readlines():
count += 1
tup = (line.split("\t")[0], line.split("\t")[1], line.split("\t")[2], line.split("\t")[3],
line.split("\t")[4], line.split("\t")[5])
state = tup[1]
star_rating = int(tup[2])
if int(star_rating) != 5:
continue
if state not in state_5_star_dict:
state_5_star_dict[state] = 0
if state not in state_4_star_dict:
state_4_star_dict[state] = 0
if state not in state_3_star_dict:
state_3_star_dict[state] = 0
if state not in state_2_star_dict:
state_2_star_dict[state] = 0
if state not in state_1_star_dict:
state_1_star_dict[state] = 0
if star_rating == 5:
state_5_star_dict[state] += 1
if star_rating == 4:
state_4_star_dict[state] += 1
if star_rating == 3:
state_3_star_dict[state] += 1
if star_rating == 2:
state_2_star_dict[state] += 1
if star_rating == 1:
state_1_star_dict[state] += 1
response = []
print "Number of 5 star reviews per state."
for key, value in state_5_star_dict.iteritems():
response.append({'id': key, 'value': value})
print key, value
json_data = json.dumps(response)
print json_data
print "Done."
print count
def main():
# Uncomment the line to run mining data.
# create_set_for_business_with_cat("Nightlife")
state_review_trends()
if __name__ == "__main__":
print "Execute Script!!"
main()
| akshaykamath/StateReviewTrendAnalysisYelp | StateReviewTrendsPOC.py | Python | mit | 4,899 |
# Author: Samuel Genheden, [email protected]
"""
Program to build lipids from a template, similarly to MARTINI INSANE
Is VERY experimental!
"""
import argparse
import os
import xml.etree.ElementTree as ET
import numpy as np
from sgenlib import pdb
class BeadDefinition(object):
def __init__(self):
self.name = None
self.xyz = None
def parse(self, element):
if "name" in element.attrib:
self.name = element.attrib["name"]
else:
return
if "xyz" in element.attrib:
self.xyz = np.array(element.attrib["xyz"].split(), dtype=float)
def __str__(self):
return "%s (%s)"%(self.name,",".join("%.2f"%c for c in self.xyz))
class LipidTemplate(object):
def __init__(self):
self.name = None
self.beads = []
self.headname = []
self.tailname = []
self.head = []
self.tail = []
def make(self, bd=3.0):
struct = pdb.PDBFile()
res = pdb.Residue()
for i, bead in enumerate(self.beads):
atom = pdb.Atom()
atom.idx = i
atom.serial = i + 1
atom.name = bead.name
atom.resname = self.name
atom.residue = 1
atom.set_xyz(bead.xyz*bd)
res.atoms.append(atom)
struct.atoms.append(atom)
struct.residues.append(res)
allcoord = np.asarray([a.xyz for a in struct.atoms])
offset = allcoord.mean(axis=0) + 50.0
for a in struct.atoms:
a.set_xyz(a.xyz+offset)
struct.box = np.asarray([100,100,100])
return struct
def parse(self, element):
if "name" in element.attrib:
self.name = element.attrib["name"]
else:
return
if "head" in element.attrib:
self.headname = element.attrib["head"].split()
if "tail" in element.attrib:
self.tailname = element.attrib["tail"].split()
for child in element:
if child.tag != "bead":
continue
b = BeadDefinition()
b.parse(child)
if b.name is not None:
self.beads.append(b)
if b.name in self.headname :
self.head.append(b)
elif b.name in self.tailname :
self.tail.append(b)
def __str__(self):
return self.name+"\n\t"+"\n\t".join(b.__str__() for b in self.beads)
class LipidCollection(object):
def __init__(self):
self.lipids = {}
def load(self, filename):
tree = ET.parse(filename)
# Parse lipids
for child in tree.getroot():
if child.tag != "lipid":
continue
lipid = LipidTemplate()
lipid.parse(child)
if lipid.name is not None:
self.lipids[lipid.name] = lipid
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description="Building lipids from templates")
parser.add_argument('-l','--lipid',help="the lipid to build")
parser.add_argument('-x','--xml',help="the definition of templates")
parser.add_argument('-o','--out',help="the output name",default="lipid.pdb")
parser.add_argument('--bd',type=float,help="the spacing between beads",default=3.0)
args = parser.parse_args()
lipidbook = LipidCollection()
if args.xml is None:
thispath = os.path.dirname(os.path.abspath(__file__))
args.xml = os.path.join(thispath,"lipid_templates.xml")
lipidbook.load(args.xml)
if args.lipid in lipidbook.lipids:
struct = lipidbook.lipids[args.lipid].make(bd=args.bd)
struct.write(args.out)
else:
"%s not in the XML file"%args.lipid
| SGenheden/Scripts | Membrane/build_lipid.py | Python | mit | 3,765 |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-std=gnu11',
'-x',
'c',
'-isystem',
'/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
# try:
# final_flags.remove( '-stdlib=libc++' )
# except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| darthdeus/dotfiles | c_ycm_conf.py | Python | mit | 5,178 |
class RepeatError(ValueError):
pass
class NoneError(ValueError):
pass
| HarborYuan/cashier | Errors.py | Python | mit | 79 |
from storytext.javaswttoolkit import describer as swtdescriber
from org.eclipse.core.internal.runtime import InternalPlatform
from org.eclipse.ui.forms.widgets import ExpandableComposite
import os
from pprint import pprint
class Describer(swtdescriber.Describer):
swtdescriber.Describer.stateWidgets = [ ExpandableComposite ] + swtdescriber.Describer.stateWidgets
swtdescriber.Describer.ignoreChildren = (ExpandableComposite,) + swtdescriber.Describer.ignoreChildren
def buildImages(self):
swtdescriber.Describer.buildImages(self)
self.buildImagesFromBundles()
def buildImagesFromBundles(self):
allImageTypes = [ "gif", "png", "jpg" ]
allImageTypes += [ i.upper() for i in allImageTypes ]
cacheFile = os.path.join(os.getenv("STORYTEXT_HOME"), "osgi_bundle_image_types")
cacheExists = os.path.isfile(cacheFile)
bundleImageTypes = eval(open(cacheFile).read()) if cacheExists else {}
for bundle in InternalPlatform.getDefault().getBundleContext().getBundles():
usedTypes = []
name = bundle.getSymbolicName()
imageTypes = bundleImageTypes.get(name, allImageTypes)
for imageType in imageTypes:
self.logger.debug("Searching bundle " + name + " for images of type " + imageType)
images = bundle.findEntries("/", "*." + imageType, True)
if images and images.hasMoreElements():
self.storeAllImages(images)
usedTypes.append(imageType)
if not cacheExists:
bundleImageTypes[name] = usedTypes
if not cacheExists:
f = open(cacheFile, "w")
pprint(bundleImageTypes, f)
f.close()
def storeAllImages(self, entries):
while entries.hasMoreElements():
url = entries.nextElement()
self.storeImageData(url)
def getExpandableCompositeState(self, widget):
return widget.isExpanded()
def getExpandableCompositeDescription(self, widget):
state = self.getExpandableCompositeState(widget)
self.widgetsWithState[widget] = state
desc = "Expandable '" + widget.getText() + "' "
desc += "(expanded)" if state else "(collapsed)"
if state:
clientDesc = self.getDescription(widget.getClient())
desc += "\n " + clientDesc.replace("\n", "\n ")
return desc
| emilybache/texttest-runner | src/main/python/storytext/lib/storytext/javarcptoolkit/describer.py | Python | mit | 2,499 |
import os
from setuptools import find_packages
from setuptools import setup
version = '1.0'
project = 'kotti_mb'
install_requires=[
'Kotti',
],
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
setup(name=project,
version=version,
description="AddOn for Kotti",
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"License :: Repoze Public License",
],
keywords='kotti addon',
author='Christoph Boehner',
author_email='[email protected]',
url='http://pypi.python.org/pypi/',
license='bsd',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=[],
entry_points={
'fanstatic.libraries': [
'kotti_mb = kotti_mb.fanstatic:library',
],
},
extras_require={},
message_extractors={'kotti_mb': [
('**.py', 'lingua_python', None),
('**.zcml', 'lingua_xml', None),
('**.pt', 'lingua_xml', None),
]},
)
| potzenheimer/kotti_mb | setup.py | Python | mit | 1,275 |
# Natural Language Toolkit: POS Tag Simplification
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Steven Bird <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
# Brown Corpus
# http://khnt.hit.uib.no/icame/manuals/brown/INDEX.HTM
brown_mapping1 = {
'j': 'ADJ', 'p': 'PRO', 'm': 'MOD', 'q': 'DET',
'w': 'WH', 'r': 'ADV', 'i': 'P',
'u': 'UH', 'e': 'EX', 'o': 'NUM', 'b': 'V',
'h': 'V', 'f': 'FW', 'a': 'DET', 't': 'TO',
'cc': 'CNJ', 'cs': 'CNJ', 'cd': 'NUM',
'do': 'V', 'dt': 'DET',
'nn': 'N', 'nr': 'N', 'np': 'NP', 'nc': 'N',
'--': '--'
}
brown_mapping2 = {
'vb': 'V', 'vbd': 'VD', 'vbg': 'VG', 'vbn': 'VN'
}
def simplify_brown_tag(tag):
tag = tag.lower()
if tag[0] in brown_mapping1:
return brown_mapping1[tag[0]]
elif tag[:2] in brown_mapping1: # still doesn't handle DOD tag correctly
return brown_mapping1[tag[:2]]
try:
if '-' in tag:
tag = tag.split('-')[0]
return brown_mapping2[tag]
except KeyError:
return tag.upper()
# Wall Street Journal tags (Penn Treebank)
wsj_mapping = {
'-lrb-': '(', '-rrb-': ')', '-lsb-': '(',
'-rsb-': ')', '-lcb-': '(', '-rcb-': ')',
'-none-': '', 'cc': 'CNJ', 'cd': 'NUM',
'dt': 'DET', 'ex': 'EX', 'fw': 'FW', # existential "there", foreign word
'in': 'P', 'jj': 'ADJ', 'jjr': 'ADJ',
'jjs': 'ADJ', 'ls': 'L', 'md': 'MOD', # list item marker
'nn': 'N', 'nnp': 'NP', 'nnps': 'NP',
'nns': 'N', 'pdt': 'DET', 'pos': '',
'prp': 'PRO', 'prp$': 'PRO', 'rb': 'ADV',
'rbr': 'ADV', 'rbs': 'ADV', 'rp': 'PRO',
'sym': 'S', 'to': 'TO', 'uh': 'UH',
'vb': 'V', 'vbd': 'VD', 'vbg': 'VG',
'vbn': 'VN', 'vbp': 'V', 'vbz': 'V',
'wdt': 'WH', 'wp': 'WH', 'wp$': 'WH',
'wrb': 'WH',
'bes': 'V', 'hvs': 'V', 'prp^vbp': 'PRO' # additions for NPS Chat corpus
}
def simplify_wsj_tag(tag):
if tag and tag[0] == '^':
tag = tag[1:]
try:
tag = wsj_mapping[tag.lower()]
except KeyError:
pass
return tag.upper()
indian_mapping = {
'nn': 'N', 'vm': 'MOD', 'jj': 'ADJ', 'nnp': 'NP',
'prp': 'PRO', 'prep': 'PRE', 'vaux': 'V', 'vfm': 'V',
'cc': 'CNJ', 'nnpc': 'NP', 'nnc': 'N', 'qc': 'QC',
'dem': 'DET', 'vrb': 'V', 'qfnum': 'NUM', 'rb': 'ADV',
'qf': 'DET', 'punc': '.', 'rp': 'PRT', 'psp': 'PSP',
'nst': 'N', 'nvb': 'N', 'vjj': 'V', 'neg': 'NEG',
'vnn': 'V', 'xc': 'XC', 'intf': 'INTF', 'nloc': 'N',
'jvb': 'ADJ', 'wq': 'WH', 'qw': 'WH', 'jj:?': 'ADJ',
'"cc': 'CNJ', 'nnp,': 'NP', 'sym\xc0\xa7\xb7': 'SYM',
'symc': 'SYM'}
def simplify_indian_tag(tag):
if ':' in tag:
tag = tag.split(':')[0]
try:
tag = indian_mapping[tag.lower()]
except KeyError:
pass
return tag.upper()
# Alpino tags
alpino_mapping = {
'noun':'N', 'name': 'NP', 'vg': 'VG', 'punct':'.',
'verb':'V', 'pron': 'PRO', 'prep':'P'
}
def simplify_alpino_tag(tag):
try:
tag = alpino_mapping[tag]
except KeyError:
pass
return tag.upper()
# Default tag simplification
def simplify_tag(tag):
return tag[0].upper()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| syllog1sm/TextBlob | text/nltk/tag/simplify.py | Python | mit | 3,411 |
import codecs
f = codecs.open("/Users/hjp/Downloads/task/data/dev.txt", 'r', 'utf-8')
for line in f.readlines():
print(line)
sents = line.split('\t')
print(sents[1] + "\t" + sents[3])
for i in range(len(sents)):
print(sents[i])
f.close()
| hjpwhu/Python | src/hjp.edu.nlp.data.task/semeval.py | Python | mit | 269 |
"""nubrain URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
from nubrain.settings import BASE_URL, APP_NAME
from django.views.generic import RedirectView
from django.utils.translation import ugettext_lazy
urlpatterns = patterns('',
(r'^$', RedirectView.as_view(url='%s/admin/' % BASE_URL)),
url(r'^admin/', include(admin.site.urls)),
)
admin.site.site_title = ugettext_lazy(APP_NAME)
admin.site.site_header = ugettext_lazy('%s Admin' % APP_NAME)
admin.site.index_title = ugettext_lazy('%s Dashboard' % APP_NAME)
admin.autodiscover()
| NuChwezi/nubrain | nubrain/urls.py | Python | mit | 1,184 |
from django.contrib import admin
from .models import SimpleModel, Type, FKModel
admin.site.register(SimpleModel)
admin.site.register(Type)
admin.site.register(FKModel)
| wq/django-data-wizard | tests/data_app/admin.py | Python | mit | 170 |
from .logic import LogicAdapter
from chatterbot.conversation import Statement
from chatterbot.utils.pos_tagger import POSTagger
import re
import forecastio
class WeatherLogicAdapter(LogicAdapter):
"""
A logic adapter that returns information regarding the weather and
the forecast for a specific location. Currently, only basic information
is returned, but additional features are planned in the future.
"""
def __init__(self, **kwargs):
super(WeatherLogicAdapter, self).__init__(**kwargs)
self.tagger = POSTagger()
self.forecastio_api_key = kwargs.get("forecastio_api_key")
def process(self, statement):
"""
Returns the forecast for a location (using latitude and longitude).
"""
user_input = statement.text.lower()
if "weather" not in user_input:
return 0, Statement("")
latitude = self.get_latitude(user_input)
longitude = self.get_longitude(user_input)
if latitude is not "" and longitude is not "":
# @TODO: Add more options for getting weather. This could include
# the current temperature, the current cloud cover, etc. This
# might require removing the forecastio library (which is
# probably a good idea).
return 1, Statement("The forecast for tomorrow is: " + self.get_weather(latitude, longitude))
return 0, Statement("")
def get_latitude(self, user_input):
"""
Returns the latitude extracted from the input.
"""
for token in self.tagger.tokenize(user_input):
if "latitude=" in token:
return re.sub("latitude=", "", token)
return ""
def get_longitude(self, user_input):
"""
Returns the longitude extracted from the input.
"""
for token in self.tagger.tokenize(user_input):
if "longitude=" in token:
return re.sub("longitude=", "", token)
return ""
def get_weather(self, latitude, longitude):
"""
Returns the weather for a given latitude and longitude.
"""
# @TODO: Find some way to suppress the warnings generated by this.
forecast = forecastio.load_forecast(self.forecastio_api_key, latitude, longitude)
return forecast.hourly().summary
| imminent-tuba/thesis | server/chatterbot/chatterbot/adapters/logic/weather.py | Python | mit | 2,362 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Alarm.last_checked'
db.alter_column(u'ddsc_core_alarm', 'last_checked', self.gf('django.db.models.fields.DateTimeField')(null=True))
def backwards(self, orm):
# Changing field 'Alarm.last_checked'
db.alter_column(u'ddsc_core_alarm', 'last_checked', self.gf('django.db.models.fields.DateTimeField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ddsc_core.alarm': {
'Meta': {'object_name': 'Alarm'},
'active_status': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_cr': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True', 'blank': 'True'}),
'first_born': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'logical_check': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'message_type': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'previous_alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']", 'null': 'True', 'blank': 'True'}),
'single_or_group': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['contenttypes.ContentType']"}),
'template': ('django.db.models.fields.TextField', [], {'default': "u'this is a alarm message template'"}),
'urgency': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
u'ddsc_core.alarm_active': {
'Meta': {'object_name': 'Alarm_Active'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']"}),
'deactivated_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1900, 1, 1, 0, 0)'}),
'first_triggered_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1900, 1, 1, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {})
},
u'ddsc_core.alarm_item': {
'Meta': {'object_name': 'Alarm_Item'},
'alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']"}),
'alarm_type': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['contenttypes.ContentType']"}),
'comparision': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_born': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logical_check': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value_bool': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'value_double': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_int': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'ddsc_core.compartment': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Compartment'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.folder': {
'Meta': {'object_name': 'Folder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.idmapping': {
'Meta': {'object_name': 'IdMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'timeseries': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Timeseries']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.ipaddress': {
'Meta': {'object_name': 'IPAddress'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.location': {
'Meta': {'object_name': 'Location'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 14, 0, 0)'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geometry_precision': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'point_geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '4258', 'null': 'True', 'blank': 'True'}),
'real_geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'srid': '4258', 'null': 'True', 'blank': 'True'}),
'relative_location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'show_on_map': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
u'ddsc_core.locationtype': {
'Meta': {'object_name': 'LocationType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'location_types'", 'blank': 'True', 'to': u"orm['ddsc_core.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
u'ddsc_core.logicalgroup': {
'Meta': {'ordering': "[u'owner', u'name']", 'unique_together': "((u'owner', u'name'),)", 'object_name': 'LogicalGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']"}),
'timeseries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'logical_groups'", 'blank': 'True', 'to': u"orm['ddsc_core.Timeseries']"})
},
u'ddsc_core.logicalgroupedge': {
'Meta': {'unique_together': "((u'child', u'parent'),)", 'object_name': 'LogicalGroupEdge'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parents'", 'to': u"orm['ddsc_core.LogicalGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'childs'", 'to': u"orm['ddsc_core.LogicalGroup']"})
},
u'ddsc_core.logrecord': {
'Meta': {'object_name': 'LogRecord'},
'host': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '8', 'db_index': 'True'}),
'line': ('django.db.models.fields.SmallIntegerField', [], {}),
'message': ('django.db.models.fields.TextField', [], {}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
u'ddsc_core.manufacturer': {
'Meta': {'object_name': 'Manufacturer'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'ddsc_core.measuringdevice': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringDevice'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.measuringmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '600', 'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.parameter': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Parameter'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'cas_number': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sikb_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.processingmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ProcessingMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.referenceframe': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ReferenceFrame'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.source': {
'Meta': {'object_name': 'Source'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 14, 0, 0)'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manufacturer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Manufacturer']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'source_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
u'ddsc_core.sourcegroup': {
'Meta': {'object_name': 'SourceGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Source']", 'symmetrical': 'False'})
},
u'ddsc_core.statuscache': {
'Meta': {'object_name': 'StatusCache'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mean_val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'min_val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'modify_timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1900, 1, 1, 0, 0)'}),
'nr_of_measurements_doubtful': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'nr_of_measurements_reliable': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'nr_of_measurements_total': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'nr_of_measurements_unreliable': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status_date': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'std_val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'timeseries': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Timeseries']"})
},
u'ddsc_core.timeseries': {
'Meta': {'object_name': 'Timeseries'},
'compartment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Compartment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 14, 0, 0)'}),
'data_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'timeseries'", 'blank': 'True', 'to': "orm['lizard_security.DataSet']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'first_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_value_number': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'timeseries'", 'null': 'True', 'to': u"orm['ddsc_core.Location']"}),
'measuring_device': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringDevice']", 'null': 'True', 'blank': 'True'}),
'measuring_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringMethod']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']", 'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Parameter']"}),
'processing_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ProcessingMethod']", 'null': 'True', 'blank': 'True'}),
'reference_frame': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ReferenceFrame']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Source']", 'null': 'True', 'blank': 'True'}),
'supplying_systems': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'through': u"orm['ddsc_core.IdMapping']", 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Unit']"}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'}),
'validate_diff_hard': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_diff_soft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_max_hard': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_max_soft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_min_hard': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_min_soft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'ddsc_core.timeseriesgroup': {
'Meta': {'object_name': 'TimeseriesGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'parameters': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Parameter']", 'symmetrical': 'False'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Source']", 'symmetrical': 'False'})
},
u'ddsc_core.timeseriesselectionrule': {
'Meta': {'ordering': "[u'pk']", 'object_name': 'TimeseriesSelectionRule'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'criterion': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'operator': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
u'ddsc_core.unit': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Unit'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'conversion_factor': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'lizard_security.dataowner': {
'Meta': {'ordering': "['name']", 'object_name': 'DataOwner'},
'data_managers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'remarks': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['owner', 'name']", 'unique_together': "(('owner', 'name'),)", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['ddsc_core'] | ddsc/ddsc-core | ddsc_core/migrations/0078_auto__chg_field_alarm_last_checked.py | Python | mit | 28,517 |
#!/usr/bin/env python
print " Formated number:", "{:,}".format(102403)
| daltonmenezes/learning-C | src/Python/format/thousands_separator.py | Python | mit | 72 |
# Name: Seline, Li, Taylor, Son
# Leap Motion project
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import time
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = Axes3D(fig)
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = np.linspace(-2, 2, 100)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
ax.plot(x, y, z, label='parametric curve')
ax.legend()
plt.ion()
plt.show()
for ii in xrange(0,360,1):
ax.view_init(elev=10, azim=ii)
plt.draw()
print "drawn? " + str(ii)
time.sleep(0.01)
| BoolLi/LeapMotionDesignChallenge | Plotting.py | Python | mit | 602 |
#!/usr/bin/env python3
# imports go here
import sched
import time
#
# Free Coding session for 2015-05-04
# Written by Matt Warren
#
scheduler = sched.scheduler(time.time, time.sleep)
def print_time():
print(time.time())
return True
scheduler.enter(3, 1, print_time)
scheduler.enter(5, 1, print_time)
print(scheduler.queue)
scheduler.run() # blocking until all scheduled things finish
print("done")
| mfwarren/FreeCoding | 2015/05/fc_2015_05_04.py | Python | mit | 412 |
# -*- coding: utf-8 -*-
# pylint: disable=C0302,fixme, protected-access
""" The core module contains the SoCo class that implements
the main entry to the SoCo functionality
"""
from __future__ import unicode_literals
import socket
import logging
import re
import requests
from .services import DeviceProperties, ContentDirectory
from .services import RenderingControl, AVTransport, ZoneGroupTopology
from .services import AlarmClock
from .groups import ZoneGroup
from .exceptions import DIDLMetadataError, SoCoUPnPException
from .data_structures import DidlPlaylistContainer,\
SearchResult, Queue, DidlObject, DidlMusicAlbum,\
from_didl_string, to_didl_string, DidlResource
from .utils import really_utf8, camel_to_underscore, really_unicode,\
url_escape_path
from .xml import XML
from soco import config
_LOG = logging.getLogger(__name__)
class _ArgsSingleton(type):
""" A metaclass which permits only a single instance of each derived class
sharing the same `_class_group` class attribute to exist for any given set
of positional arguments.
Attempts to instantiate a second instance of a derived class, or another
class with the same `_class_group`, with the same args will return the
existing instance.
For example:
>>> class ArgsSingletonBase(object):
... __metaclass__ = _ArgsSingleton
...
>>> class First(ArgsSingletonBase):
... _class_group = "greeting"
... def __init__(self, param):
... pass
...
>>> class Second(ArgsSingletonBase):
... _class_group = "greeting"
... def __init__(self, param):
... pass
>>> assert First('hi') is First('hi')
>>> assert First('hi') is First('bye')
AssertionError
>>> assert First('hi') is Second('hi')
"""
_instances = {}
def __call__(cls, *args, **kwargs):
key = cls._class_group if hasattr(cls, '_class_group') else cls
if key not in cls._instances:
cls._instances[key] = {}
if args not in cls._instances[key]:
cls._instances[key][args] = super(_ArgsSingleton, cls).__call__(
*args, **kwargs)
return cls._instances[key][args]
class _SocoSingletonBase( # pylint: disable=too-few-public-methods,no-init
_ArgsSingleton(str('ArgsSingletonMeta'), (object,), {})):
""" The base class for the SoCo class.
Uses a Python 2 and 3 compatible method of declaring a metaclass. See, eg,
here: http://www.artima.com/weblogs/viewpost.jsp?thread=236234 and
here: http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/
"""
pass
# pylint: disable=R0904,too-many-instance-attributes
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
Public functions::
play -- Plays the current item.
play_uri -- Plays a track or a music stream by URI.
play_from_queue -- Plays an item in the queue.
pause -- Pause the currently playing track.
stop -- Stop the currently playing track.
seek -- Move the currently playing track a given elapsed time.
next -- Go to the next track.
previous -- Go back to the previous track.
switch_to_line_in -- Switch the speaker's input to line-in.
switch_to_tv -- Switch the speaker's input to TV.
get_current_track_info -- Get information about the currently playing
track.
get_speaker_info -- Get information about the Sonos speaker.
partymode -- Put all the speakers in the network in the same group.
join -- Join this speaker to another "master" speaker.
unjoin -- Remove this speaker from a group.
get_queue -- Get information about the queue.
get_artists -- Get artists from the music library
get_album_artists -- Get album artists from the music library
get_albums -- Get albums from the music library
get_genres -- Get genres from the music library
get_composers -- Get composers from the music library
get_tracks -- Get tracks from the music library
get_playlists -- Get playlists from the music library
get_music_library_information -- Get information from the music library
get_current_transport_info -- get speakers playing state
browse_by_idstring -- Browse (get sub-elements) a given type
add_uri_to_queue -- Adds an URI to the queue
add_to_queue -- Add a track to the end of the queue
remove_from_queue -- Remove a track from the queue
clear_queue -- Remove all tracks from queue
get_favorite_radio_shows -- Get favorite radio shows from Sonos'
Radio app.
get_favorite_radio_stations -- Get favorite radio stations.
create_sonos_playlist -- Create a new empty Sonos playlist
create_sonos_playlist_from_queue -- Create a new Sonos playlist
from the current queue.
add_item_to_sonos_playlist -- Adds a queueable item to a Sonos'
playlist
get_item_album_art_uri -- Get an item's Album Art absolute URI.
search_track -- Search for an artist, artist's albums, or track.
get_albums_for_artist -- Get albums for an artist.
get_tracks_for_album -- Get tracks for an artist's album.
start_library_update -- Trigger an update of the music library.
Properties::
uid -- The speaker's unique identifier
mute -- The speaker's mute status.
volume -- The speaker's volume.
bass -- The speaker's bass EQ.
treble -- The speaker's treble EQ.
loudness -- The status of the speaker's loudness compensation.
cross_fade -- The status of the speaker's crossfade.
status_light -- The state of the Sonos status light.
player_name -- The speaker's name.
play_mode -- The queue's repeat/shuffle settings.
queue_size -- Get size of queue.
library_updating -- Whether music library update is in progress.
album_artist_display_option -- album artist display option
.. warning::
These properties are not cached and will obtain information over the
network, so may take longer than expected to set or return a value. It
may be a good idea for you to cache the value in your own code.
"""
_class_group = 'SoCo'
# Key words used when performing searches
SEARCH_TRANSLATION = {'artists': 'A:ARTIST',
'album_artists': 'A:ALBUMARTIST',
'albums': 'A:ALBUM',
'genres': 'A:GENRE',
'composers': 'A:COMPOSER',
'tracks': 'A:TRACKS',
'playlists': 'A:PLAYLISTS',
'share': 'S:',
'sonos_playlists': 'SQ:',
'categories': 'A:'}
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._player_name = None
self._uid = None
self._visible_zones = set()
self._zgs_cache = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
""" The speaker's name. A string. """
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
""" Set the speaker's name """
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
""" A unique identifier. Looks like: RINCON_000XXXXXXXXXX1400 """
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def is_visible(self):
""" Is this zone visible? A zone might be invisible if, for example it
is a bridge, or the slave part of stereo pair.
return True or False
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
""" Is this zone a bridge? """
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
""" Return True if this zone is a group coordinator, otherwise False.
return True or False
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def play_mode(self):
""" The queue's play mode. Case-insensitive options are:
NORMAL -- Turns off shuffle and repeat.
REPEAT_ALL -- Turns on repeat and turns off shuffle.
SHUFFLE -- Turns on shuffle *and* repeat. (It's strange, I know.)
SHUFFLE_NOREPEAT -- Turns on shuffle and turns off repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
def play_mode(self, playmode):
""" Set the speaker's mode """
playmode = playmode.upper()
if playmode not in PLAY_MODES:
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
@property
def cross_fade(self):
""" The speaker's cross fade state.
True if enabled, False otherwise """
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return True if int(cross_fade_state) else False
@cross_fade.setter
def cross_fade(self, crossfade):
""" Set the speaker's cross fade state. """
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def play_from_queue(self, index, start=True):
""" Play a track from the queue by index. The index number is
required as an argument, where the first index is 0.
index: the index of the track to play; first item in the queue is 0
start: If the item that has been set should start playing
Returns:
True if the Sonos speaker successfully started playing the track.
False if the track did not start (this may be because it was not
requested to start because "start=False")
Raises SoCoException (or a subclass) upon errors.
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
return self.play()
return False
def play(self):
"""Play the currently selected track.
Returns:
True if the Sonos speaker successfully started playing the track.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
def play_uri(self, uri='', meta='', title='', start=True):
""" Play a given stream. Pauses the queue.
If there is no metadata passed in and there is a title set then a
metadata object will be created. This is often the case if you have
a custom stream, it will need at least the title in the metadata in
order to play.
Arguments:
uri -- URI of a stream to be played.
meta -- The track metadata to show in the player, DIDL format.
title -- The track title to show in the player
start -- If the URI that has been set should start playing
Returns:
True if the Sonos speaker successfully started playing the track.
False if the track did not start (this may be because it was not
requested to start because "start=False")
Raises SoCoException (or a subclass) upon errors.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(title=title, service=tunein_service)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
def pause(self):
""" Pause the currently playing track.
Returns:
True if the Sonos speaker successfully paused the track.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
def stop(self):
""" Stop the currently playing track.
Returns:
True if the Sonos speaker successfully stopped the playing track.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
def seek(self, timestamp):
""" Seeks to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Returns:
True if the Sonos speaker successfully seeked to the timecode.
Raises SoCoException (or a subclass) upon errors.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
def next(self):
""" Go to the next track.
Returns:
True if the Sonos speaker successfully skipped to the next track.
Raises SoCoException (or a subclass) upon errors.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
def previous(self):
""" Go back to the previously played track.
Returns:
True if the Sonos speaker successfully went to the previous track.
Raises SoCoException (or a subclass) upon errors.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
def mute(self):
""" The speaker's mute state. True if muted, False otherwise """
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return True if int(mute_state) else False
@mute.setter
def mute(self, mute):
""" Mute (or unmute) the speaker """
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
""" The speaker's volume. An integer between 0 and 100. """
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
""" Set the speaker's volume """
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
""" The speaker's bass EQ. An integer between -10 and 10. """
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
""" Set the speaker's bass """
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
""" The speaker's treble EQ. An integer between -10 and 10. """
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
""" Set the speaker's treble """
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
def loudness(self):
""" The Sonos speaker's loudness compensation. True if on, otherwise
False.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return True if int(loudness) else False
@loudness.setter
def loudness(self, loudness):
""" Switch on/off the speaker's loudness compensation """
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
def _parse_zone_group_state(self):
""" The Zone Group State contains a lot of useful information. Retrieve
and parse it, and populate the relevant properties. """
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
""" Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it. """
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = False if member_attribs.get(
'Invisible') == '1' else True
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache_timeout=5)['ZoneGroupState']
if zgs == self._zgs_cache:
return
self._zgs_cache = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = True if member_element.attrib.get(
'IsZoneBridge') == '1' else False
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
""" Return a set of all the available groups"""
self._parse_zone_group_state()
return self._groups
@property
def group(self):
"""The Zone Group of which this device is a member.
group will be None if this zone is a slave in a stereo pair."""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
""" Return a set of all the available zones"""
self._parse_zone_group_state()
return self._all_zones
@property
def visible_zones(self):
""" Return an set of all visible zones"""
self._parse_zone_group_state()
return self._visible_zones
def partymode(self):
""" Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
""" Join this speaker to another "master" speaker.
.. note:: The signature of this method has changed in 0.8. It now
requires a SoCo instance to be passed as `master`, not an IP
address
"""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
def unjoin(self):
""" Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
Returns:
True if this speaker has left the group.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
def switch_to_line_in(self):
""" Switch the speaker's input to line-in.
Returns:
True if the Sonos speaker successfully switched to line-in.
If an error occurs, we'll attempt to parse the error and return a UPnP
error code. If that fails, the raw response sent back from the Sonos
speaker will be returned.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(self.uid)),
('CurrentURIMetaData', '')
])
def switch_to_tv(self):
""" Switch the speaker's input to TV.
Returns:
True if the Sonos speaker successfully switched to TV.
If an error occurs, we'll attempt to parse the error and return a UPnP
error code. If that fails, the raw response sent back from the Sonos
speaker will be returned.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
""" The white Sonos status light between the mute button and the volume
up button on the speaker. True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return True if LEDState == "On" else False
@status_light.setter
def status_light(self, led_on):
""" Switch on/off the speaker's status light """
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def _build_album_art_full_uri(self, url):
""" Ensure an Album Art URI is an absolute URI
:param url: The album art URI
"""
# Add on the full album art link, as the URI version
# does not include the ipaddress
if not url.startswith(('http:', 'https:')):
url = 'http://' + self.ip_address + ':1400' + url
return url
def get_current_track_info(self):
""" Get information about the currently playing track.
Returns:
A dictionary containing the following information about the currently
playing track: playlist_position, duration, title, artist, album,
position and a link to the album art.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent')
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
_LOG.warning('Could not handle track info: "%s"',
trackinfo)
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = self._build_album_art_full_uri(
album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=(3, 5)):
""" Get information about the Sonos speaker.
Arguments:
refresh -- Refresh the speaker info cache.
timeout -- How long to wait for the server to send
data before giving up, as a float, or a
(`connect timeout, read timeout`_) tuple.
If not specified a default (3, 5) timeout
will be used, to disable it at all set
explicitly to None.
Returns:
Information about the Sonos speaker, such as the UID, MAC Address, and
Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/status/zp', timeout=timeout)
dom = XML.fromstring(response.content)
if dom.findtext('.//ZoneName') is not None:
self.speaker_info['zone_name'] = \
dom.findtext('.//ZoneName')
self.speaker_info['zone_icon'] = dom.findtext('.//ZoneIcon')
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = \
dom.findtext('.//SerialNumber')
self.speaker_info['software_version'] = \
dom.findtext('.//SoftwareVersion')
self.speaker_info['hardware_version'] = \
dom.findtext('.//HardwareVersion')
self.speaker_info['mac_address'] = dom.findtext('.//MACAddress')
return self.speaker_info
def get_current_transport_info(self):
""" Get the current playback state
Returns:
A dictionary containing the following information about the speakers
playing state
current_transport_state (PLAYING, PAUSED_PLAYBACK, STOPPED),
current_trasnport_status (OK, ?), current_speed(1,?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
""" Get information about the queue
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
""" Get size of queue """
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
""" Convenience method for:
get_music_library_information('sonos_playlists')
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_artists(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='artists'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['artists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_album_artists(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='album_artists'`. For details on remaining arguments
refer to the docstring for that method.
"""
args = tuple(['album_artists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_albums(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='albums'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['albums'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_genres(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='genres'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['genres'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_composers(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='composers'`. For details on remaining arguments
refer to the docstring for that method.
"""
args = tuple(['composers'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_tracks(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='tracks'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['tracks'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_playlists(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='playlists'`. For details on remaining arguments
refer to the docstring for that method.
NOTE: The playlists that are referred to here are the playlist (files)
imported from the music library, they are not the Sonos playlists.
"""
args = tuple(['playlists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
# pylint: disable=too-many-locals, too-many-arguments, too-many-branches
def get_music_library_information(self, search_type, start=0,
max_items=100, full_album_art_uri=False,
search_term=None, subcategories=None,
complete_result=False):
""" Retrieve music information objects from the music library
This method is the main method to get music information items, like
e.g. tracks, albums etc., from the music library with. It can be used
in a few different ways:
The **search_term** argument performs a fuzzy search on that string in
the results, so e.g calling::
get_music_library_items('artist', search_term='Metallica')
will perform a fuzzy search for the term 'Metallica' among all the
artists.
Using the **subcategories** argument, will jump directly into that
subcategory of the search and return results from there. So. e.g
knowing that among the artist is one called 'Metallica', calling::
get_music_library_items('artist', subcategories=['Metallica'])
will jump directly into the 'Metallica' sub category and return the
albums associated with Metallica and::
get_music_library_items('artist', subcategories=['Metallica',
'Black'])
will return the tracks of the album 'Black' by the artist 'Metallica'.
The order of sub category types is: Genres->Artists->Albums->Tracks.
It is also possible to combine the two, to perform a fuzzy search in a
sub category.
The **start**, **max_items** and **complete_result** arguments all
has to do with paging of the results. Per default, the searches are
always paged, because there is a limit to how many items we can get at
a time. This paging is exposed to the user with the start and max_items
arguments. So calling::
get_music_library_items('artists', start=0, max_items=100)
get_music_library_items('artists', start=100, max_items=100)
will get the first and next 100 items, respectively. It is also
possible to ask for all the elements at once::
get_music_library_items('artists', complete_result=True)
This will perform the paging internally and simply return all the
items.
:param search_type: The kind of information to retrieve. Can be one of:
'artists', 'album_artists', 'albums', 'genres', 'composers',
'tracks', 'share', 'sonos_playlists', and 'playlists', where
playlists are the imported file based playlists from the
music library
:param start: Starting number of returned matches (zero based).
:param max_items: Maximum number of returned matches. NOTE: The maximum
may be restricted by the unit, presumably due to transfer
size consideration, so check the returned number against the
requested.
:param full_album_art_uri: If the album art URI should include the
IP address
:param search_term: A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed in the
subcategory
:param subcategories: A list of strings that indicate one or more
subcategories to dive into
:param complete_result: Will disable paging (ignore start and
max_items) and return all results for the search. WARNING! Getting
e.g. all the tracks in a large collection might take some time.
:returns: A :py:class:`~.soco.data_structures.SearchResult` object
:raises: :py:class:`SoCoException` upon errors
NOTE: The playlists that are returned with the 'playlists' search, are
the playlists imported from (files in) the music library, they are not
the Sonos playlists.
The information about the which searches can be performed and the form
of the query has been gathered from the Janos project:
http://sourceforge.net/projects/janos/ Props to the authors of that
project.
"""
search = self.SEARCH_TRANSLATION[search_type]
# Add sub categories
if subcategories is not None:
for category in subcategories:
search += '/' + url_escape_path(really_unicode(category))
# Add fuzzy search
if search_term is not None:
search += ':' + url_escape_path(really_unicode(search_term))
item_list = []
metadata = {'total_matches': 100000}
while len(item_list) < metadata['total_matches']:
# Change start and max for complete searches
if complete_result:
start, max_items = len(item_list), 100000
# Try and get this batch of results
try:
response, metadata =\
self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
# 'No such object' UPnP errors
if exception.error_code == '701':
return SearchResult([], search_type, 0, 0, None)
else:
raise exception
# Parse the results
items = from_didl_string(response['Result'])
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self._update_album_art_to_full_uri(item)
# Append the item to the list
item_list.append(item)
# If we are not after the complete results, the stop after 1
# iteration
if not complete_result:
break
metadata['search_type'] = search_type
if complete_result:
metadata['number_returned'] = len(item_list)
# pylint: disable=star-args
return SearchResult(item_list, **metadata)
def browse(self, ml_item=None, start=0, max_items=100,
full_album_art_uri=False, search_term=None, subcategories=None):
"""Browse (get sub-elements) a music library item
:param ml_item: The MusicLibraryItem to browse, if left out or passed
None, the items at the base level will be returned
:type ml_item: MusicLibraryItem
:param start: The starting index of the results
:type start: int
:param max_items: The maximum number of items to return
:type max_items: int
:param full_album_art_uri: If the album art URI should include the IP
address
:type full_album_art_uri: bool
:param search_term: A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed on the
subcategory. NOTE: Searching will not work if ml_item is None.
:type search_term: str
:param subcategories: A list of strings that indicate one or more
subcategories to dive into. NOTE: Providing sub categories will
not work if ml_item is None.
:type subcategories: list
:returns: A :py:class:`~.soco.data_structures.SearchResult` object
:rtype: :py:class:`~.soco.data_structures.SearchResult`
:raises: AttributeError: If ``ml_item`` has no ``item_id`` attribute
SoCoUPnPException: With ``error_code='701'`` if the item cannot be
browsed
"""
if ml_item is None:
search = 'A:'
else:
search = ml_item.item_id
# Add sub categories
if subcategories is not None:
for category in subcategories:
search += '/' + url_escape_path(really_unicode(category))
# Add fuzzy search
if search_term is not None:
search += ':' + url_escape_path(really_unicode(search_term))
try:
response, metadata =\
self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
# 'No such object' UPnP errors
if exception.error_code == '701':
return SearchResult([], 'browse', 0, 0, None)
else:
raise exception
metadata['search_type'] = 'browse'
# Parse the results
containers = from_didl_string(response['Result'])
item_list = []
for container in containers:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self._update_album_art_to_full_uri(container)
item_list.append(container)
# pylint: disable=star-args
return SearchResult(item_list, **metadata)
# pylint: disable=too-many-arguments
def browse_by_idstring(self, search_type, idstring, start=0,
max_items=100, full_album_art_uri=False):
"""Browse (get sub-elements) a given type
:param search_type: The kind of information to retrieve. Can be one of:
'artists', 'album_artists', 'albums', 'genres', 'composers',
'tracks', 'share', 'sonos_playlists', and 'playlists', where
playlists are the imported file based playlists from the
music library
:param idstring: String ID to search for
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches. NOTE: The maximum
may be restricted by the unit, presumably due to transfer
size consideration, so check the returned number against the
requested.
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A dictionary with metadata for the search, with the
keys 'number_returned', 'update_id', 'total_matches' and an
'item_list' list with the search results.
"""
search = self.SEARCH_TRANSLATION[search_type]
# Check if the string ID already has the type, if so we do not want to
# add one also Imported playlist have a full path to them, so they do
# not require the A:PLAYLISTS part first
if idstring.startswith(search) or (search_type == 'playlists'):
search = ""
search_item_id = search + idstring
search_uri = "#" + search_item_id
# Not sure about the res protocol. But this seems to work
res = [DidlResource(
uri=search_uri, protocol_info="x-rincon-playlist:*:*:*")]
search_item = DidlObject(
resources=res, title='', parent_id='',
item_id=search_item_id)
# Call the base version
return self.browse(search_item, start, max_items, full_album_art_uri)
def _music_lib_search(self, search, start, max_items):
"""Perform a music library search and extract search numbers
You can get an overview of all the relevant search prefixes (like
'A:') and their meaning with the request:
.. code ::
response = device.contentDirectory.Browse([
('ObjectID', '0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 100),
('SortCriteria', '')
])
Args:
search (str): The ID to search
start: The index of the forst item to return
max_items: The maximum number of items to return
Returns:
tuple: (response, metadata) where response is the returned metadata
and metadata is a dict with the 'number_returned',
'total_matches' and 'update_id' integers
"""
response = self.contentDirectory.Browse([
('ObjectID', search),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
# Get result information
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
return response, metadata
def add_uri_to_queue(self, uri):
"""Adds the URI to the queue
:param uri: The URI to be added to the queue
:type uri: str
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item)
def add_to_queue(self, queueable_item):
""" Adds a queueable item to the queue """
metadata = to_didl_string(queueable_item)
metadata.encode('utf-8')
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 1)
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def remove_from_queue(self, index):
""" Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
index: the index of the track to remove; first item in the queue is 0
Returns:
True if the Sonos speaker successfully removed the track
Raises SoCoException (or a subclass) upon errors.
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
def clear_queue(self):
""" Removes all tracks from the queue.
Returns:
True if the Sonos speaker cleared the queue.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
def get_favorite_radio_shows(self, start=0, max_items=100):
""" Get favorite radio shows from Sonos' Radio app.
Returns:
A list containing the total number of favorites, the number of
favorites returned, and the actual list of favorite radio shows,
represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
return self.__get_radio_favorites(RADIO_SHOWS, start, max_items)
def get_favorite_radio_stations(self, start=0, max_items=100):
""" Get favorite radio stations from Sonos' Radio app.
Returns:
A list containing the total number of favorites, the number of
favorites returned, and the actual list of favorite radio stations,
represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
return self.__get_radio_favorites(RADIO_STATIONS, start, max_items)
def __get_radio_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Arguments:
favorite_type -- Specify either `RADIO_STATIONS` or `RADIO_SHOWS`.
start -- Which number to start the retrieval from. Used for paging.
max_items -- The total number of results to return.
"""
if favorite_type != RADIO_SHOWS or RADIO_STATIONS:
favorite_type = RADIO_STATIONS
response = self.contentDirectory.Browse([
('ObjectID', 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def _update_album_art_to_full_uri(self, item):
"""Update an item's Album Art URI to be an absolute URI
:param item: The item to update the URI for
"""
if getattr(item, 'album_art_uri', False):
item.album_art_uri = self._build_album_art_full_uri(
item.album_art_uri)
def create_sonos_playlist(self, title):
""" Create a new empty Sonos playlist.
:params title: Name of the playlist
:returns: An instance of
:py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
""" Create a new Sonos playlist from the current queue.
:params title: Name of the playlist
:returns: An instance of
:py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
""" Adds a queueable item to a Sonos' playlist
:param queueable_item: the item to add to the Sonos' playlist
:param sonos_playlist: the Sonos' playlist to which the item should
be added
"""
# Check if the required attributes are there
for attribute in ['didl_metadata', 'uri']:
if not hasattr(queueable_item, attribute):
message = 'queueable_item has no attribute {0}'.\
format(attribute)
raise AttributeError(message)
# Get the metadata
try:
metadata = XML.tostring(queueable_item.didl_metadata)
except DIDLMetadataError as exception:
message = ('The queueable item could not be enqueued, because it '
'raised a DIDLMetadataError exception with the '
'following message:\n{0}').format(str(exception))
raise ValueError(message)
if isinstance(metadata, str):
metadata = metadata.encode('utf-8')
response, _ = self._music_lib_search(sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.uri),
('EnqueuedURIMetaData', metadata),
('AddAtIndex', 4294967295) # this field has always this value, we
# do not known the meaning of this
# "magic" number.
])
def get_item_album_art_uri(self, item):
""" Get an item's Album Art absolute URI. """
if getattr(item, 'album_art_uri', False):
return self._build_album_art_full_uri(item.album_art_uri)
else:
return None
# pylint: disable=too-many-locals
def search_track(self, artist, album=None, track=None,
full_album_art_uri=False):
"""Search for an artist, artist's albums, or specific track.
:param artist: Artist name
:type artist: str
:param album: Album name
:type album: str
:param track: Track name
:type track: str
:param full_album_art_uri: If the album art URI should include the
IP address
:type full_album_art_uri: bool
:returns: A :py:class:`~.soco.data_structures.SearchResult` object.
:rtype: :py:class:`~.soco.data_structures.SearchResult`
"""
subcategories = [artist]
subcategories.append(album or '')
# Perform the search
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories, search_term=track,
complete_result=True)
result._metadata['search_type'] = 'search_track'
return result
def get_albums_for_artist(self, artist, full_album_art_uri=False):
"""Get albums for an artist.
:param artist: Artist name
:type artist: str
:param full_album_art_uri: If the album art URI should include the
IP address
:type full_album_art_uri: bool
:returns: A :py:class:`~.soco.data_structures.SearchResult` object.
:rtype: :py:class:`~.soco.data_structures.SearchResult`
"""
subcategories = [artist]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
reduced = [item for item in result if item.__class__ == DidlMusicAlbum]
# It is necessary to update the list of items in two places, due to
# a bug in SearchResult
result[:] = reduced
result._metadata.update({
'item_list': reduced,
'search_type': 'albums_for_artist',
'number_returned': len(reduced),
'total_matches': len(reduced)
})
return result
def get_tracks_for_album(self, artist, album, full_album_art_uri=False):
"""Get tracks for an artist's album.
:param artist: Artist name
:type artist: str
:param album: Album name
:type album: str
:param full_album_art_uri: If the album art URI should include the
IP address
:type full_album_art_uri: bool
:returns: A :py:class:`~.soco.data_structures.SearchResult` object.
:rtype: :py:class:`~.soco.data_structures.SearchResult`
"""
subcategories = [artist, album]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
result._metadata['search_type'] = 'tracks_for_album'
return result
@property
def library_updating(self):
"""True if the music library is in the process of being updated
:returns: True if the music library is in the process of being updated
:rtype: bool
"""
result = self.contentDirectory.GetShareIndexInProgress()
return result['IsIndexing'] != '0'
def start_library_update(self, album_artist_display_option=''):
"""Start an update of the music library.
If specified, album_artist_display_option changes the album
artist compilation setting (see also album_artist_display_option).
"""
return self.contentDirectory.RefreshShareIndex([
('AlbumArtistDisplayOption', album_artist_display_option),
])
@property
def album_artist_display_option(self):
"""Return the current value of the album artist compilation
setting (see
http://www.sonos.com/support/help/3.4/en/sonos_user_guide/
Chap07_new/Compilation_albums.htm)
This is a string. Possible values:
* "WMP" - Use Album Artists
* "ITUNES" - Use iTunes® Compilations
* "NONE" - Do not group compilations
To change the current setting, call `start_library_update` and
pass the new setting.
"""
result = self.contentDirectory.GetAlbumArtistDisplayOption()
return result['AlbumArtistDisplayOption']
# definition section
RADIO_STATIONS = 0
RADIO_SHOWS = 1
NS = {'dc': '{http://purl.org/dc/elements/1.1/}',
'upnp': '{urn:schemas-upnp-org:metadata-1-0/upnp/}',
'': '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}'}
# Valid play modes
PLAY_MODES = ('NORMAL', 'SHUFFLE_NOREPEAT', 'SHUFFLE', 'REPEAT_ALL')
if config.SOCO_CLASS is None:
config.SOCO_CLASS = SoCo
| xxdede/SoCo | soco/core.py | Python | mit | 72,281 |
# coding=UTF-8
'''
Created on 24.09.2017
@author: sysoev
'''
from google.appengine.ext import db
from google.appengine.api import users
import datetime
import time
import logging
from myusers import MyUser
def force_unicode(string):
if type(string) == unicode:
return string
return string.decode('utf-8')
class Project(db.Model):
name = db.StringProperty(multiline=False)
def getProjectsList(user):
return None
def updateProject(key, name):
p = Project.get(key)
if not p:
return
p.name = name
p.put()
def addProject(name):
p = Project()
p.name = name
p.put()
return p.key()
class UserProject(db.Model):
user_key = db.ReferenceProperty(MyUser)
project_key = db.ReferenceProperty(Project)
number = 0
def addUserProject(user_name, project_key_str):
user_query = MyUser.all()
user = user_query.filter('username = ', user_name).get()
if user is None:
return None
true_project_key = Project.get(project_key_str).key()
if check_user_have_project(user, true_project_key):
return False
up = UserProject()
up.user_key = user.key()
up.project_key = true_project_key
up.put()
return True
def check_user_have_project(user, true_project_key):
user_project_keys = [user_proj.project_key.key() for user_proj in
UserProject.all().filter('user_key = ', user.key()).fetch(None)]
return true_project_key in user_project_keys
def deleteUserProject(user_key, project_key):
query = UserProject.all()
query.filter('user_key = ', MyUser.get(user_key)).filter('project_key = ', Project.get(project_key))
user_project = query.get()
if user_project is None:
return None
# project.key().delete()
db.delete(user_project.key())
return True
def getUserProjects(user):
if user is None:
return []
query = UserProject.all().filter('user_key = ', user.key())
return [user_project.project_key for user_project in query]
# return [Project.get(user_project.project_key) for user_project in query]
class Request(db.Model):
number = int
name = db.StringProperty()
description = db.StringProperty(multiline=True)
state = int
perfomer = db.ReferenceProperty() #???
def addRequests(project_key, name, description):
print("log")
req = Request(parent=project_key)
req.name = name
req.description = description
req.perfomer = ""
req.state = 1
req.number = Request(ancestor = project_key).all().length + 1
req.put()
Project.set(project_key).number += 1
return True
def getRequests(project_key):
if project_key is None:
return []
query = Request(ancestor = project_key).all()
return query | sysoevss/WebApps17 | data.py | Python | mit | 2,774 |
Subsets and Splits