repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
stvstnfrd/edx-platform | lms/djangoapps/verify_student/tasks.py | 1 | 5646 | """
Django Celery tasks for service status app
"""
import logging
from smtplib import SMTPException
import requests
import simplejson
from celery import Task, shared_task
from celery.states import FAILURE
from django.conf import settings
from django.core.mail import EmailMessage
from edx_django_utils.monitoring import set_code_owner_attribute
from common.djangoapps.edxmako.shortcuts import render_to_string
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
class BaseSoftwareSecureTask(Task): # lint-amnesty, pylint: disable=abstract-method
"""
Base task class for use with Software Secure request.
Permits updating information about user attempt in correspondence to submitting
request to software secure.
"""
abstract = True
def on_success(self, retval, task_id, args, kwargs):
"""
Update SoftwareSecurePhotoVerification object corresponding to this
task with info about success.
Updates user verification attempt to "submitted" if the response was ok otherwise
set it to "must_retry".
Assumes `retval` is a dict containing the task's result, with the following keys:
'response_ok': boolean, indicating if the response was ok
'response_text': string, indicating the response text in case of failure.
"""
from .models import SoftwareSecurePhotoVerification
user_verification = SoftwareSecurePhotoVerification.objects.get(id=kwargs['user_verification_id'])
if retval['response_ok']:
user_verification.mark_submit()
log.info(
'Sent request to Software Secure for user: %r and receipt ID %r.',
user_verification.user.username,
user_verification.receipt_id,
)
return user_verification
user_verification.mark_must_retry(retval['response_text'])
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
If max retries have reached and task status is still failing, mark user submission
with "must_retry" so that it can be retried latter.
"""
if self.max_retries == self.request.retries and status == FAILURE:
from .models import SoftwareSecurePhotoVerification
user_verification_id = kwargs['user_verification_id']
user_verification = SoftwareSecurePhotoVerification.objects.get(id=user_verification_id)
user_verification.mark_must_retry()
log.error(
'Software Secure submission failed for user %r, setting status to must_retry',
user_verification.user.username,
exc_info=True
)
@shared_task
@set_code_owner_attribute
def send_verification_status_email(context):
"""
Spins a task to send verification status email to the learner
"""
subject = context.get('subject')
message = render_to_string(context.get('template'), context.get('email_vars'))
from_addr = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
dest_addr = context.get('email')
try:
msg = EmailMessage(subject, message, from_addr, [dest_addr])
msg.content_subtype = 'html'
msg.send(fail_silently=False)
except SMTPException:
log.warning(u"Failure in sending verification status e-mail to %s", dest_addr)
@shared_task(
base=BaseSoftwareSecureTask,
bind=True,
default_retry_delay=settings.SOFTWARE_SECURE_REQUEST_RETRY_DELAY,
max_retries=settings.SOFTWARE_SECURE_RETRY_MAX_ATTEMPTS,
)
@set_code_owner_attribute
def send_request_to_ss_for_user(self, user_verification_id, copy_id_photo_from):
"""
Assembles a submission to Software Secure.
Keyword Arguments:
user_verification_id (int) SoftwareSecurePhotoVerification model object identifier.
copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo
data from this attempt. This is used for re-verification, in which new face photos
are sent with previously-submitted ID photos.
Returns:
request.Response
"""
from .models import SoftwareSecurePhotoVerification
user_verification = SoftwareSecurePhotoVerification.objects.get(id=user_verification_id)
log.info('=>New Verification Task Received %r', user_verification.user.username)
try:
headers, body = user_verification.create_request(copy_id_photo_from)
# checkout PROD-1395 for detail why we are adding system certificate paths for verification.
response = requests.post(
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_URL"],
headers=headers,
data=simplejson.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8'),
verify=settings.VERIFY_STUDENT["SOFTWARE_SECURE"]['CERT_VERIFICATION_PATH']
)
return {
'response_ok': getattr(response, 'ok', False),
'response_text': getattr(response, 'text', '')
}
except Exception as exc: # pylint: disable=broad-except
log.error(
(
'Retrying sending request to Software Secure for user: %r, Receipt ID: %r '
'attempt#: %s of %s'
),
user_verification.user.username,
user_verification.receipt_id,
self.request.retries,
settings.SOFTWARE_SECURE_RETRY_MAX_ATTEMPTS,
)
log.error(str(exc))
self.retry()
| agpl-3.0 | -5,051,165,563,618,291,000 | 37.937931 | 106 | 0.667906 | false | 4.319816 | false | false | false |
holgerd77/django-dynamic-scraper | tests/scraper/models.py | 1 | 1421 | #Stage 2 Update (Python 3)
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from builtins import str
from django.db import models
from dynamic_scraper.models import Scraper, SchedulerRuntime
from scrapy_djangoitem import DjangoItem
@python_2_unicode_compatible
class EventWebsite(models.Model):
name = models.CharField(max_length=200)
scraper = models.ForeignKey(Scraper, blank=True, null=True, on_delete=models.SET_NULL)
url = models.URLField()
scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name + " (" + str(self.id) + ")"
@python_2_unicode_compatible
class Event(models.Model):
title = models.CharField(max_length=200)
event_website = models.ForeignKey(EventWebsite, on_delete=models.CASCADE)
description = models.TextField(blank=True)
description2 = models.TextField(blank=True)
url = models.URLField(blank=True)
url2 = models.URLField(blank=True)
checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.title + " (" + str(self.id) + ")"
def detailed(self):
str = "title: {t}\n".format(t=self.title)
str += "event_website:"
return str
class EventItem(DjangoItem):
django_model = Event
| bsd-3-clause | -963,004,689,120,053,800 | 33.658537 | 107 | 0.708656 | false | 3.606599 | false | false | false |
mjirik/io3d | io3d/misc.py | 1 | 8466 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# import sys
import os
from loguru import logger
import sys
import os.path
import numpy as np
from io import open
from .image import DataPlus
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "./extern/sPickle"))
from .dili_subset import ndarray_to_list_in_structure
# from imma.image import resize_to_mm, resize_to_shape
def old_str_format_to_new(string):
"""
convert old format style to new style. Works for digits only
%05d is converted to {:05d}
:param string:
:return:
"""
import re
return re.sub(r"%(\d*d)", r"{:\1}", string)
def suggest_filename(file_path, exists=None):
"""
Try if exist path and append number to its end.
For debug you can set as input if file exists or not.
"""
import os.path
import re
if not isinstance(exists, bool):
exists = os.path.exists(file_path)
if exists:
file_path, file_extension = os.path.splitext(file_path)
# print(file_path)
m = re.search(r"_\d+$", file_path)
if m is None:
# cislo = 2
new_cislo_str = "_2"
else:
cislostr = m.group()
cislo = int(cislostr[1:]) + 1
# it is normal number
file_path = file_path[: -len(cislostr)]
new_cislo_str = "_" + str(cislo)
file_path = file_path + new_cislo_str + file_extension # .zfill(2)
# trorcha rekurze
file_path = suggest_filename(file_path)
return file_path
def obj_from_file(filename="annotation.yaml", filetype="auto", yaml_typ="unsafe"):
""" Read object from file """
if filetype == "auto":
_, ext = os.path.splitext(filename)
filetype = ext[1:]
if filetype in ("yaml", "yml"):
from ruamel.yaml import YAML
# yaml = YAML(typ="unsafe")
yaml = YAML(typ=yaml_typ)
with open(filename, encoding="utf-8") as f:
obj = yaml.load(f)
if obj is None:
obj = {}
# import yaml
# with open(filename, encoding="utf-8") as f:
# intext = f.read()
# obj = yaml.load(intext)
elif filetype in ("pickle", "pkl", "pklz", "picklezip"):
fcontent = read_pkl_and_pklz(filename)
# import pickle
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
# import sPickle as pickle
if sys.version_info.major == 2:
obj = pickle.loads(fcontent)
else:
obj = pickle.loads(fcontent, encoding="latin1")
else:
logger.error("Unknown filetype " + filetype)
return obj
def read_pkl_and_pklz(filename):
"""
Try read zipped or not zipped pickle file
"""
fcontent = None
try:
import gzip
f = gzip.open(filename, "rb")
fcontent = f.read()
f.close()
except IOError as e:
# if the problem is in not gzip file
logger.info("Input gzip exception: " + str(e))
f = open(filename, "rb")
fcontent = f.read()
f.close()
except Exception as e:
# other problem
import traceback
logger.error("Input gzip exception: " + str(e))
logger.error(traceback.format_exc())
return fcontent
def obj_to_file(
obj,
filename,
filetype="auto",
ndarray_to_list=False,
squeeze=True,
yaml_typ="unsafe",
):
"""Writes annotation in file.
:param filetype:
auto
yaml
pkl, pickle
pklz, picklezip
:param ndarray_to_list: convert ndarrays in obj to lists
:param squeeze: squeeze ndarray
"""
# import json
# with open(filename, mode='w') as f:
# json.dump(annotation,f)
if type(obj) == DataPlus:
obj = dict(obj)
if ndarray_to_list:
obj = ndarray_to_list_in_structure(obj, squeeze=squeeze)
# write to yaml
d = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(d):
os.makedirs(d)
if filetype == "auto":
_, ext = os.path.splitext(filename)
filetype = ext[1:]
if filetype in ("yaml", "yml"):
# import yaml
from ruamel.yaml import YAML
# yaml = YAML(typ="unsafe")
yaml = YAML(typ=yaml_typ)
with open(filename, "wt", encoding="utf-8") as f:
yaml.dump(obj, f)
# if sys.version_info.major == 2:
# with open(filename, 'wb') as f:
# yaml.dump(obj, f, encoding="utf-8")
# else:
# with open(filename, "w", encoding="utf-8") as f:
# yaml.dump(obj, f)
elif filetype in ("pickle", "pkl"):
f = open(filename, "wb")
logger.info("filename " + filename)
# if sys.version_info[0] < 3: import cPickle as pickle
# else: import _pickle as pickle
import pickle
pickle.dump(obj, f, -1)
f.close
elif filetype in ("streamingpicklezip", "spklz"):
# this is not working :-(
import gzip
import sPickle as pickle
f = gzip.open(filename, "wb", compresslevel=1)
# f = open(filename, 'wb')
pickle.s_dump(obj, f)
f.close
elif filetype in ("picklezip", "pklz"):
import gzip
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
f = gzip.open(filename, "wb", compresslevel=1)
# f = open(filename, 'wb')
pickle.dump(obj, f)
f.close
elif filetype in ("mat"):
import scipy.io as sio
sio.savemat(filename, obj)
else:
logger.error("Unknown filetype " + filetype)
from imma.image import resize_to_shape, resize_to_shape
# def resize_to_mm(data3d, voxelsize_mm, new_voxelsize_mm, mode="nearest", order=1):
# """
# Function can resize data3d or segmentation to specifed voxelsize_mm
# :new_voxelsize_mm: requested voxelsize. List of 3 numbers, also
# can be a string 'orig', 'orgi*2' and 'orgi*4'.
#
# :voxelsize_mm: size of voxel
# :mode: default is 'nearest'
# """
# import scipy
# import scipy.ndimage
#
# if np.all(list(new_voxelsize_mm) == "orig"):
# new_voxelsize_mm = np.array(voxelsize_mm)
# elif np.all(list(new_voxelsize_mm) == "orig*2"):
# new_voxelsize_mm = np.array(voxelsize_mm) * 2
# elif np.all(list(new_voxelsize_mm) == "orig*4"):
# new_voxelsize_mm = np.array(voxelsize_mm) * 4
# # vx_size = np.array(metadata['voxelsize_mm']) * 4
#
# zoom = voxelsize_mm / (1.0 * np.array(new_voxelsize_mm))
# data3d_res = scipy.ndimage.zoom(data3d, zoom, mode=mode, order=order).astype(
# data3d.dtype
# )
# return data3d_res
def suits_with_dtype(mn, mx, dtype):
"""
Check whether range of values can be stored into defined data type.
:param mn: range minimum
:param mx: range maximum
:param dtype:
:return:
"""
type_info = np.iinfo(dtype)
if mx <= type_info.max and mn >= type_info.min:
return True
else:
return False
def use_economic_dtype(data3d, slope=1, inter=0, dtype=None):
""" Use more economic integer-like dtype if it is possible.
:param data3d:
:param dtype: if dtype is not used, the automatic is used
:return:
"""
if dtype is None:
dtype = data3d.dtype
if issubclass(dtype.type, np.integer):
mn = data3d.min() * slope + inter
mx = data3d.max() * slope + inter
if suits_with_dtype(mn, mx, dtype=np.uint8):
dtype = np.uint8
elif suits_with_dtype(mn, mx, dtype=np.int8):
dtype = np.int8
elif suits_with_dtype(mn, mx, dtype=np.uint16):
dtype = np.uint16
elif suits_with_dtype(mn, mx, dtype=np.int16):
dtype = np.int16
elif suits_with_dtype(mn, mx, dtype=np.uint32):
dtype = np.uint32
elif suits_with_dtype(mn, mx, dtype=np.int32):
dtype = np.int32
# new_data3d = ((np.float(slope) * data3d) + np.float(inter)).astype(dtype)
if slope == 1 and inter == 0:
# this can prevent out of memmory
new_data3d = data3d.astype(dtype)
else:
new_data3d = ((slope * data3d) + inter).astype(dtype)
return new_data3d
| mit | -4,283,288,989,103,422,000 | 27.505051 | 84 | 0.568037 | false | 3.379641 | false | false | false |
mwatts15/YAROM | examples/adding_data.py | 1 | 1499 | import yarom as Y
import rdflib
Y.connect({'rdf.namespace': rdflib.Namespace("http://example.org/")})
def p1():
mary = Y.DataObject(key='mary')
fido = Y.DataObject(key='fido')
mary.relate('has_pet', fido)
mary.relate('age', Y.Quantity(23, 'years'))
mary.relate('email', "[email protected]")
Y.print_graph(mary.get_defined_component())
def p2_p3():
FOAF = rdflib.Namespace("http://xmlns.com/foaf/0.1/")
Y.config('rdf.namespace_manager').bind('foaf', FOAF)
class Person(Y.DataObject):
rdf_type = FOAF['Person']
class Dog(Y.DataObject):
pass
class FOAFAge(Y.DatatypeProperty):
link = FOAF['age']
linkName = "foaf_age"
owner_type = Person
multiple = False # XXX: Default is True
class FOAFMbox(Y.UnionProperty):
link = FOAF['mbox']
linkName = "foaf_mbox"
owner_type = Person # XXX: Not defining agent
multiple = True
Y.remap()
mary = Person(key='mary')
fido = Dog(key='fido')
mary.relate('has_pet', fido)
mary.relate('age', Y.Quantity(23, 'years'), FOAFAge)
mary.relate('email', "[email protected]", FOAFMbox)
Y.print_graph(mary.get_defined_component())
mary.save()
q_person = Person()
q_person.relate('has_pet', Dog())
for p in q_person.load():
p.relate('dog_lover', True)
p.save()
q_person = Person()
q_person.relate('dog_lover', True)
for p in q_person.load():
print(p)
p1()
p2_p3()
| bsd-3-clause | -940,927,674,706,282,800 | 25.298246 | 69 | 0.597065 | false | 2.968317 | false | false | false |
vivaxy/algorithms | python/problems/prime_number_of_set_bits_in_binary_representation.py | 1 | 1089 | """
https://leetcode.com/problems/prime-number-of-set-bits-in-binary-representation/
https://leetcode.com/submissions/detail/136523875/
"""
class Solution:
def countPrimeSetBits(self, L, R):
"""
:type L: int
:type R: int
:rtype: int
"""
def isPrime(n):
if n < 2:
return False
if n == 2:
return True
for i in range(2, n):
if n % i == 0:
return False
return True
result = 0
for i in range(L, R + 1):
if isPrime(bin(i).count('1')):
result += 1
return result
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.countPrimeSetBits(6, 10), 4)
self.assertEqual(solution.countPrimeSetBits(10, 15), 5)
self.assertEqual(solution.countPrimeSetBits(567, 607), 21)
self.assertEqual(solution.countPrimeSetBits(842, 888), 23)
if __name__ == '__main__':
unittest.main()
| mit | 2,882,310,662,519,049,700 | 23.75 | 80 | 0.534435 | false | 3.704082 | true | false | false |
fountainhead-gq/DjangoBlog | blogproject/urls.py | 1 | 1922 | """blogproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import include, url
from django.conf.urls.static import static
from rest_framework.routers import DefaultRouter
from blog.views import PostViewSet, AuthorViewSet, CategoryViewSet, TagViewSet
from django.conf.urls import (handler400, handler403, handler404, handler500)
from rest_framework_swagger.views import get_swagger_view
handler400 = 'blog.views.handler400'
handler403 = 'blog.views.handler403'
handler404 = 'blog.views.handler404'
handler500 = 'blog.views.handler500'
router = DefaultRouter()
router.register(r'posts', PostViewSet, base_name='posts')
# router.register(r'authors', AuthorViewSet, base_name='authors')
router.register(r'category', CategoryViewSet, base_name='category')
router.register(r'tags', TagViewSet)
schema_view = get_swagger_view(title='API')
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^photos/', include('photos.urls', namespace='photos')),
url(r'^', include('blog.urls')),
url(r'^api/', include(router.urls, namespace='api'), name='api'),
url(r'^docs/', schema_view),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| gpl-2.0 | -8,842,098,134,731,623,000 | 41.711111 | 85 | 0.73257 | false | 3.526606 | false | false | false |
hasgeek/boxoffice | migrations/versions/253e7b76eb8e_modify_assignee.py | 1 | 2138 | """modify assignee.
Revision ID: 253e7b76eb8e
Revises: 1ea1e8070ac8
Create Date: 2016-04-11 20:15:52.864916
"""
# revision identifiers, used by Alembic.
revision = '253e7b76eb8e'
down_revision = '1ea1e8070ac8'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
op.add_column('assignee', sa.Column('current', sa.Boolean(), nullable=True))
op.create_check_constraint('assignee_current_check', 'assignee', "current != '0'")
op.add_column(
'assignee',
sa.Column(
'line_item_id',
sqlalchemy_utils.types.uuid.UUIDType(binary=False),
nullable=False,
),
)
op.drop_index('assignee_email_key', table_name='assignee')
op.create_unique_constraint(
'assignee_line_item_current_key', 'assignee', ['line_item_id', 'current']
)
op.drop_constraint('assignee_previous_id_fkey', 'assignee', type_='foreignkey')
op.create_foreign_key(
'assignee_line_item_id', 'assignee', 'line_item', ['line_item_id'], ['id']
)
op.drop_column('assignee', 'previous_id')
op.drop_constraint('line_item_assignee_id_fkey', 'line_item', type_='foreignkey')
op.drop_column('line_item', 'assignee_id')
def downgrade():
op.add_column(
'line_item',
sa.Column('assignee_id', sa.INTEGER(), autoincrement=False, nullable=True),
)
op.create_foreign_key(
'line_item_assignee_id_fkey', 'line_item', 'assignee', ['assignee_id'], ['id']
)
op.add_column(
'assignee',
sa.Column('previous_id', sa.INTEGER(), autoincrement=False, nullable=True),
)
op.drop_constraint('assignee_line_item_id', 'assignee', type_='foreignkey')
op.create_foreign_key(
'assignee_previous_id_fkey', 'assignee', 'assignee', ['previous_id'], ['id']
)
op.drop_constraint('assignee_line_item_current_key', 'assignee', type_='unique')
op.create_index('assignee_email_key', 'assignee', ['email'], unique=False)
op.drop_column('assignee', 'line_item_id')
op.drop_constraint('assignee_current_check', 'assignee')
op.drop_column('assignee', 'current')
| agpl-3.0 | -1,522,597,774,365,325,300 | 33.483871 | 86 | 0.641254 | false | 3.134897 | false | false | false |
lykops/lykops | library/frontend/__init__.py | 1 | 13604 | import logging
from library.config.frontend import adminuser
from library.config.security import vault_header
from library.connecter.database.mongo import Op_Mongo
from library.connecter.database.redis_api import Op_Redis
from library.security.encryption.AES256.api import Using_AES256
from library.security.password import Manager_Password
from library.utils.time_conv import timestamp2datetime
from library.utils.type_conv import str2dict
class Base():
def __init__(self, mongoclient=None, redisclient=None):
'''
这是用户管理部分的MVC中的C
'''
self.logger = logging.getLogger("lykops")
self.userinfo_mongocollect = 'user.login.info'
self.userinfo_rediskey = 'lykops:userinfo'
self.privacy_mongocollect = 'user.privacy'
if mongoclient is None :
self.mongoclient = Op_Mongo()
self.logger.warn('无法继承,需要初始化mongodb连接')
else :
self.mongoclient = mongoclient
if redisclient is None :
self.redisclient = Op_Redis()
self.logger.warn('无法继承,需要初始化redis连接')
else :
self.redisclient = redisclient
self.password_api = Manager_Password()
self.expiretime = 60 * 60 * 24
self.rediskey_prefix = 'lykops:'
def get_userinfo(self, force=False, username=None):
'''
获取userinfo数据
'''
if force :
self.logger.warn('强制删除用户信息缓存')
self.redisclient.delete(self.userinfo_rediskey)
result = self.redisclient.get(self.userinfo_rediskey, fmt='obj')
if result[0] and (result[1] is not None or result[1]) :
userinfo = result[1]
else :
result = self.mongoclient.find(self.userinfo_mongocollect)
if result[0] :
userinfo = result[1]
set_dict = {
'name' : self.userinfo_rediskey,
'value' : userinfo,
'ex':self.expiretime
}
self.redisclient.set(set_dict, fmt='obj')
else :
userinfo = {}
if username is None :
return userinfo
else :
try :
for u_dict in userinfo :
if username == u_dict['username'] :
us = u_dict
else :
continue
except :
us = {}
try :
return us
except :
return {}
def verify_vaultpassword(self, username, vault_password):
'''
验证用户的vault密码是否正确
:parm
username:用户名
vault_password:vault密码
'''
user_dict = self.get_userinfo(username=username)
if not user_dict :
content = '用户' + username + '不存在'
self.logger.error(content)
return (False, content)
try :
cipher_pwd = user_dict['vault_password']
except :
content = '从数据库中没有查询到用户' + username + '的vault密码'
self.logger.error(content)
return (False, content)
result = self.password_api.verify(vault_password, cipher_pwd)
if not result :
content = '用户' + username + '输入的vault密码与数据库中vault密码不匹配'
self.logger.error(content)
return (False, content)
else :
content = '用户' + username + '输入的vault密码与数据库中vault密码匹配成功'
# self.logger.info(content)
return (True, content)
def get_data(self, username, redis_key, mongo_collect, force=False, mongoshare=True):
'''
获取用户数据
:parm
username:用户名
redis_key:redis缓存key名
mongo_collect:mongo的集合名
force:强制刷新
'''
if force:
self.logger.warn('强制删除指定缓存')
self.redisclient.delete(redis_key)
result = self.redisclient.get(redis_key, fmt='obj')
if not result[0] or (result[0] and not result[1]) :
if mongoshare :
result = self.mongoclient.find(mongo_collect, condition_dict={'username' : username})
else :
result = self.mongoclient.find(mongo_collect)
if result[0] :
data_dict = result[1]
self.write_cache(redis_key, data_dict)
else :
self.logger.error('从数据库中查询数据失败,原因:' + result[1])
return result
else :
data_dict = result[1]
try :
del data_dict['username']
except :
pass
return (True, data_dict)
def encryp_dict(self, username, vault_password, data, vault_list, isverify=True):
'''
对用户的数据字典中的某些字段进行加密
'''
encryp_api = Using_AES256(vault_password, vault_header)
if isverify :
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
self.logger.error('加密用户' + username + '的指定数据时失败,原因:输入的vault密码与数据库中vault密码不匹配')
return (False, '输入的vault密码与数据库中vault密码不匹配')
if not vault_list :
vault_list = data.keys()
encryp_dict = {}
for key , value in data.items() :
if not value :
encryp_dict[key] = value
if key in vault_list :
result = encryp_api.encrypt(value)
if result[0] :
encryp_dict[key] = result[1]
else :
self.logger.error('加密用户' + username + '的指定数据时失败,键名' + key + '的值加密失败,原因:' + result[1])
return (False, '加密用户' + username + '的指定数据时失败,键名' + key + '的值加密失败,' + result[1])
else :
if value == 'False' :
value = False
if value == 'True' :
value = True
isdigit = True
if isinstance(value, str) :
for t in value :
if t not in '0123456789' :
isdigit = False
if isdigit :
try :
value = int(value)
except :
pass
encryp_dict[key] = value
# content = '加密用户' + username + '的指定数据成功'
# self.logger.info(content)
return (True, encryp_dict)
def decryp_dict(self, username, vault_password, data, vault_list, isverify=True):
'''
对用户的数据字典中的某些字段进行解密
'''
encryp_api = Using_AES256(vault_password, vault_header)
if isverify :
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
self.logger.error('解密用户' + username + '的指定数据时失败,原因:输入的vault密码与数据库中vault密码不匹配')
return (False, '输入的vault密码与数据库中vault密码不匹配')
if not vault_list :
vault_list = data.keys()
decryp_dict = {}
for key , value in data.items() :
if not value :
decryp_dict[key] = value
if key in vault_list :
result = encryp_api.decrypt(value)
if result[0] :
decryp_dict[key] = result[1]
else :
self.logger.error('解密用户' + username + '的指定数据时失败,键名' + key + '的值加密失败,原因:' + result[1])
return (False, '解密用户' + username + '的指定数据时失败,键名' + key + '的值加密失败,' + result[1])
else :
if value == 'False' :
value = False
if value == 'True' :
value = True
decryp_dict[key] = value
# content = '解密用户' + username + '的指定数据成功'
# self.logger.info(content)
return (True, decryp_dict)
def encryp_string(self, username, vault_password, data, isverify=True):
'''
对用户的数据进行加密
'''
encryp_api = Using_AES256(vault_password, vault_header)
if isverify :
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
self.logger.error('加密用户' + username + '数据时失败,原因:输入的vault密码与数据库中vault密码不匹配')
return (False, '加密用户' + username + '数据时失败,输入的vault密码与数据库中vault密码不匹配')
result = encryp_api.encrypt(data)
if result[0] :
# content = '加密用户' + username + '数据成功'
# self.logger.info(content)
return (True, result[1])
else :
self.logger.error('加密用户' + username + '数据失败,原因:' + result[1])
return (False, '加密用户' + username + '数据失败,' + result[1])
def decryp_string(self, username, vault_password, data, isverify=True):
'''
对用户的数据进行解密
'''
decryp_api = Using_AES256(vault_password, vault_header)
if isverify :
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
self.logger.error('解密用户' + username + '数据时失败,原因:输入的vault密码与数据库中vault密码不匹配')
return (False, '解密用户' + username + '数据时失败,输入的vault密码与数据库中vault密码不匹配')
result = decryp_api.decrypt(data)
if result[0] :
# content = '解密用户' + username + '数据成功'
# self.logger.info(content)
return (True, result[1])
else :
self.logger.error('解密用户' + username + '数据失败,原因:' + result[1])
return (False, result[1])
def change_vltpwd_dict(self, username, old_pwd, new_pwd, vault_dict, vault_list, isverify=False):
'''
修改用户的vault数据(字典)的vault密码
'''
try :
del vault_dict['add_time']
except :
pass
if not vault_list :
vault_list = vault_dict.keys()
# 不要使用encryp_dict和decryp_dict来更换密码,否则无法修改密码
new_data = {}
for key, value in vault_dict.items() :
if key in vault_list :
result = self.decryp_string(username, old_pwd, value, isverify=isverify)
if not result[0] :
self.logger.error('更改用户' + username + '的vault密码时失败,解密数据时出错,原因:' + result[1])
return (False, '更改用户' + username + '的vault密码时失败,解密数据时出错,' + result[1])
new_value = result[1]
result = self.encryp_string(username, new_pwd, new_value, isverify=isverify)
if not result[0] :
self.logger.error('更改用户' + username + '的vault密码时失败,解密后再次加密数据时出错,原因:' + result[1])
return (False, '更改用户' + username + '的vault密码时失败,解密后再次加密数据时出错,' + result[1])
new_data[key] = result[1]
else :
new_data[key] = value
# content = '更改用户' + username + '的vault密码成功'
# self.logger.info(content)
return (True, new_data)
def write_cache(self, redis_key, data, expire=60 * 60, ftm='obj'):
try :
self.logger.warn('强制删除缓存')
self.redisclient.delete(redis_key)
except :
pass
set_dict = {
'name' : redis_key,
'value' : data,
'ex':expire
}
result = self.redisclient.set(set_dict, fmt=ftm)
if result[0] :
content = '写缓存成功'
# self.logger.info(content)
return (True, content)
else :
self.logger.info('写缓存失败,原因:' + result[1])
return (False, '写缓存失败,' + result[1])
| apache-2.0 | 5,559,556,405,368,260,000 | 32.727019 | 105 | 0.494136 | false | 3.403992 | false | false | false |
udoyen/pythonlearning | reading_code/password_craacker_redone.py | 1 | 2533 | # Script Name : password_cracker.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Old school password cracker using python
import crypt # Import the module
def testPass(cryptPass): # Start the function
""" function to compare given password with stored password """
# (me): splice password from zero index to index position two
salt = cryptPass[0:2]
# Open the dictionary file, (me): check the dictionary file for key value
# pair
try: # check to make sure file does exist
dictFile = open('dictionary.txt', 'r')
except IOError:
print "File cannot be opened, it may be missing"
print "or the file name may be incorrect"
else:
for word in dictFile.readlines(): # Scan through the file
# (me): remove line breaks from content of file
word = word.strip('\n')
cryptWord = crypt.crypt(word, salt) # Check for password in the file
# (me): compares the newly ecrypted passwrd and the stored encrypted password
if (cryptWord == cryptPass):
print "[+] Found Password: " + word + "\n"
return
print "[-] Password Not Found.\n"
return
def main(): # (me):this reads a file line by line and splits each line at the ":" character point
""" This test function checks to make sure the password is in the
key:value pair format so the usename and password can be well separated
"""
try: # check to make sure file does exist
passFile = open('passwords.txt') # Open the password file
except IOError:
print "File cannot be opened, it may be missing"
print "or the file name may be incorrect"
else:
for line in passFile.readlines(): # Read through the file
if ":" in line:
user = line.split(':')[0] # assign any word found to user variable
# Prepare the user name etc, (me): assign the second value after
# the split point ":" to cryptPass
cryptPass = line.split(':')[1].strip(' ')
# (me): concatenate user to printed output
print "[*] Cracking Password For: " + user
testPass(cryptPass) # Call it to crack the users password
else:
print "Plain line of text printed: %sNo password found" % line
if __name__ == "__main__": # mian point of entry for aapplication
main()
| mit | -4,450,451,968,419,236,000 | 40.52459 | 98 | 0.606001 | false | 4.242881 | false | false | false |
ciex/motor | application/forms.py | 1 | 1755 | """
forms.py
Web forms based on Flask-WTForms
See: http://flask.pocoo.org/docs/patterns/wtforms/
http://wtforms.simplecodes.com/
"""
import datetime
from flaskext import wtf
from flaskext.wtf import validators
from google.appengine.api import users
from models import Movement
class TimeDeltaField(wtf.IntegerField):
"""Excpects a number of days, returns a datetime.timedelta object"""
def __init__(self, label=None, validators=None, **kwargs):
super(TimeDeltaField, self).__init__(label, validators, **kwargs)
def process_data(self, value):
if value:
try:
return datetime.timedelta(days=value)
except ValueError:
self.data = None
raise ValueError("Not a valid time range")
class GoalForm(wtf.Form):
movement_id = wtf.HiddenField('Movement', validators=[validators.Required()])
cycle = wtf.HiddenField('Cycle', validators=[validators.Required()])
desc = wtf.TextField('Description', validators=[validators.Required()])
class MovementForm(wtf.Form):
name = wtf.TextField('Name', validators=[validators.Required()])
cycle_start = wtf.DateField('Start', validators=[validators.Required()], default=datetime.datetime.now())
cycle_duration = wtf.IntegerField('Cycle length', validators=[validators.Required()], default=7)
cycle_buffer = wtf.IntegerField('Cycle buffer', validators=[validators.Required()], default=2)
def validate_cycle_duration(form, field):
if field < 0:
raise validators.ValidationError("Cycle duration cannot be negative")
def validate_cycle_buffer(form, field):
if field < 0:
raise validators.ValidationError("Cycle buffer cannot be negative")
| apache-2.0 | -4,664,715,961,961,592,000 | 33.411765 | 109 | 0.68661 | false | 4.11007 | false | false | false |
lrem/chord.py | chord/peer.py | 1 | 9656 | #!/usr/bin/env python3
"""
Chord peer
==========
This module provides peer of a Chord distributed hash table.
"""
import random
import time
import socket
import socketserver
import threading
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.DEBUG)
CHAIN = 3
CHORDS = 30
MAX_KEY = 2**CHORDS
CHORD_UPDATE_INTERVAL = 5
class Peer:
def __init__(self, port=4321, key=None):
if key is None:
self.key = random.randint(0, MAX_KEY)
else:
self.key = key
logging.info('Peer key: %x' % self.key)
self.chords = [None] * CHORDS
self.chain = [None]
self.storage = {}
self.port = port
def connect(self, url):
"""
Connects to the DHT using the given `url` (of any connected node).
"""
logging.info('Connecting to: ' + url)
old = self.find_re(self.key, connecting=url)
logging.debug(old)
self.chain = [old] + request(url, 'accept', self.key,
bytes(str(self.port), 'ascii'))
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if not inside(key, self.key, old[0]):
self.chords[i] = self.find_re(key, connecting=url)
def accept(self, key, url):
"""
Accepts a peer to the DHT by:
- putting him on the ring after itself
- reassigning to him part of own key space
"""
self.chain = [(key, url)] + self.chain
# TODO: transfer him the stored keys
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if self.chords[i] is None and\
not inside(key, self.key, self.chain[0][0]):
self.chords[i] = self.chain[0]
def start(self):
"""
Starts Peer's operation.
"""
Handler.peer = self
logging.info('Listening on port %d' % self.port)
server = Server(('0.0.0.0', self.port), Handler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
logging.debug('Server thread started')
while True:
time.sleep(CHORD_UPDATE_INTERVAL)
self._update_chords()
def find(self, key):
"""
Find a peer that is closer to the one responsible for the given `key`.
Returns `None` if it's the responsible itself, or a tuple `(key, url)`.
"""
if self.chain[0] is None or inside(key, self.key, self.chain[0][0]):
return None
for i in range(CHORDS - 1):
if self.chords[i] is None:
continue # I'm still responsible for this part
if inside(key, self.chords[i][0], self.chords[i+1][0]):
return self.chords[i]
if self.chords[-1] is None:
return self.chain[0] # Another funny corner case
else:
return self.chords[-1]
def find_re(self, key, connecting=None):
"""
Find the peer that is responsible for the given `key`.
Returns `None` if it's the responsible itself, or a tuple `(key, url)`.
"""
if connecting is not None:
closer = (None, connecting)
else:
closer = self.find(key)
if closer is None:
return None
while not isinstance(closer, Me):
closer = request(closer[1], 'find', key)
return closer
def get(self, key):
"""
Return the value for the `key`, wherever it is stored.
"""
responsible = self.find_re(key)
logging.debug('Peer %s responsible for key %x' %
(responsible, key))
if responsible is None:
return self.storage.get(key, None)
else:
return request(responsible[1], 'get', key)
def put(self, key, value):
"""
Store the `(key, value)` in the DHT.
"""
responsible = self.find_re(key)
logging.debug('Peer %s responsible for key %x' %
(responsible, key))
if responsible is None:
self.storage[key] = value
else:
request(responsible[1], 'put', key, value)
def _update_chords(self):
logging.info('Storing %d values' % len(self.storage))
logging.debug(self.chain)
if self.chain[0] is None:
return
logging.debug('Updating chords')
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if not inside(key, self.key, self.chain[0][0]):
self.chords[i] = self.find_re(key)
logging.debug("%d chords established" %
sum([1 for x in self.chords if x is not None]))
def inside(key, left, right):
"""
Find whether the key is in the interval `[left, right)`.
Note the keys are arranged on a ring, so it is possible that left > right.
"""
if left == right:
return False
if left < right:
return left <= key < right
else:
return left <= key or key < right
def request(url, operation, key, value=None):
logging.debug('Requesting from %s operation %s key %x value %s' %
(url, operation, key, value))
sock = _connect(url)
body = bytes("%s %x\n" % (operation, key), 'ascii')
if value:
body += bytes("%d\n" % len(value), 'ascii')
body += value
try:
sock.sendall(body)
inh = sock.makefile('rb')
response = inh.readline()
if response.startswith(b'value'):
logging.debug(response)
length = int(response.split()[1])
return inh.read(length)
elif response.startswith(b'none'):
raise KeyError("Key %x not in DHT" % key)
elif response.startswith(b'peer'):
logging.debug('Raw response %s' % response)
return _parse_peer(response)
elif response.startswith(b'me'):
key = int(response.split()[1], base=16)
return Me([key, url])
elif response.startswith(b'chain'):
chain = []
for line in inh:
chain.append(_parse_peer(line))
return chain
finally:
sock.close()
return response
class Handler(socketserver.StreamRequestHandler):
peer = None
def handle(self):
inh = self.rfile
operation, key = inh.readline().split()
key = int(key, base=16)
logging.info("Request: %s %x" % (operation, key))
response = b'unknown operation'
if operation == b'find':
peer = self.peer.find(key)
if peer is None:
response = bytes("me %x\n" % self.peer.key, 'ascii')
else:
response = _serialize_peer(peer)
elif operation == b'accept':
response = b"chain\n"
for peer in self.peer.chain:
response += _serialize_peer(peer)
port = int(_read_value(inh))
self.peer.accept(key, _make_url(self.request, port))
elif operation == b'get':
value = self.peer.get(key)
if value is None:
response = b'none'
else:
response = bytes("value %d\n" % len(value), 'ascii')
response += value
elif operation == b'put':
value = _read_value(inh)
logging.debug("Value: %s" % value)
self.peer.put(key, value)
response = b'ok'
elif operation == b'ping':
response = b'pong'
logging.debug("Response: %s\n" % response)
self.request.sendall(response)
def _read_value(inh):
length = int(inh.readline())
return inh.read(length)
class Server(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class Address(tuple): # Hate I can't define my own __init__
pass
class Me(Address):
pass
def _parse_peer(line):
if line.startswith(b'peer'):
key, url = line.split()[1:]
return Address([int(key, base=16), url])
elif line.startswith(b'none'):
return None
else:
raise ValueError('Wrong response for peer %s' % line)
def _serialize_peer(peer):
if peer is None:
return b'none'
else:
return bytes("peer %x %s\n" % (peer[0], str(peer[1], 'ascii')),
'ascii')
def _make_url(socket, port=None):
#FIXME: this gives us the request socket, not the listening one
if port is None:
return bytes("%s:%d" % socket.getpeername(), 'ascii')
else:
return bytes("%s:%d" % (socket.getpeername()[0], port), 'ascii')
def _connect(url):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if isinstance(url, bytes):
url = str(url, 'ascii')
if ':' in str(url):
host, port = url.split(':')
port = int(port)
else:
host, port = url, 4321
sock.connect((host, port))
return sock
def main():
import argparse
argp = argparse.ArgumentParser(description=__doc__)
argp.add_argument('-key', help='hexadecimal key for this node')
argp.add_argument('-url', help='url of an existing DHT peer')
argp.add_argument('-port', help='listening TCP port',
type=int, default=4321)
args = argp.parse_args()
if args.key is not None:
args.key = int(args.key, 16)
peer = Peer(port=args.port, key=args.key)
if args.url:
peer.connect(args.url)
peer.start()
if __name__ == '__main__':
main()
| mit | 6,198,633,712,688,207,000 | 30.249191 | 79 | 0.545568 | false | 3.760125 | false | false | false |
podhmo/toybox | examples/jwt_server.py | 1 | 1141 | import logging
from toybox.simpleapi import simple_view, run
from pyramid.security import Authenticated
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import Allow
# please: pip install pyramid_jwt
"""
python ./jwt_server.py
# 403
$ http GET :8080/secure
# 200 OK
$ http GET :8080/login | tee /tmp/response.json
$ http GET :8080/secure X-Token:`cat /tmp/response.json | jq -r .token`
"""
logger = logging.getLogger(__name__)
@simple_view("/login")
def login_view(request):
return {"token": request.create_jwt_token(1)}
@simple_view("/secure", permission="read")
def secure_view(request):
return "OK"
class Root:
__acl__ = [
(Allow, Authenticated, ('read',)),
]
def __init__(self, request):
self.request = request
def includeme(config):
config.set_authorization_policy(ACLAuthorizationPolicy())
config.include('pyramid_jwt')
config.set_root_factory(Root)
config.set_jwt_authentication_policy('secret', http_header='X-Token')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
run.include(includeme)
run(port=8080)
| mit | -1,611,208,174,127,569,200 | 21.372549 | 73 | 0.689746 | false | 3.489297 | true | false | false |
yarhajile/sven-daemon | Sven/Module/BeagleboneBlack/Adafruit_BBIO-0.0.19/test/test_gpio_output.py | 1 | 1308 | import pytest
import os
import Adafruit_BBIO.GPIO as GPIO
def teardown_module(module):
GPIO.cleanup()
class TestGPIOOutput:
def test_output_high(self):
GPIO.setup("P8_10", GPIO.OUT)
GPIO.output("P8_10", GPIO.HIGH)
value = open('/sys/class/gpio/gpio68/value').read()
assert int(value)
GPIO.cleanup()
def test_output_low(self):
GPIO.setup("P8_10", GPIO.OUT)
GPIO.output("P8_10", GPIO.LOW)
value = open('/sys/class/gpio/gpio68/value').read()
assert not int(value)
GPIO.cleanup()
def test_direction_readback(self):
GPIO.setup("P8_10", GPIO.OUT)
direction = GPIO.gpio_function("P8_10")
assert direction == GPIO.OUT
def test_output_greater_than_one(self):
GPIO.setup("P8_10", GPIO.OUT)
GPIO.output("P8_10", 2)
value = open('/sys/class/gpio/gpio68/value').read()
assert int(value)
GPIO.cleanup()
def test_output_of_pin_not_setup(self):
with pytest.raises(RuntimeError):
GPIO.output("P8_11", GPIO.LOW)
GPIO.cleanup()
def test_output_setup_as_input(self):
GPIO.setup("P8_10", GPIO.IN)
with pytest.raises(RuntimeError):
GPIO.output("P8_10", GPIO.LOW)
GPIO.cleanup()
| gpl-2.0 | 4,022,292,955,603,422,700 | 28.727273 | 59 | 0.591743 | false | 3.362468 | true | false | false |
bobbydurrett/PythonDBAGraphs | onewait.py | 1 | 2938 | """
PythonDBAGraphs: Graphs to help with Oracle Database Tuning
Copyright (C) 2016 Robert Taft Durrett (Bobby Durrett)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact:
[email protected]
onewait.py
Graph of one wait event
"""
import myplot
import util
def onewait(wait_event,minimum_waits,start_time,end_time,instance_number):
q_string = """
select
sn.END_INTERVAL_TIME,
(after.total_waits-before.total_waits) NUMBER_OF_WAITS,
(after.time_waited_micro-before.time_waited_micro)/(after.total_waits-before.total_waits) AVG_MICROSECONDS
from DBA_HIST_SYSTEM_EVENT before, DBA_HIST_SYSTEM_EVENT after,DBA_HIST_SNAPSHOT sn
where before.event_name='"""
q_string += wait_event
q_string += """' and
END_INTERVAL_TIME
between
to_date('"""
q_string += start_time
q_string += """','DD-MON-YYYY HH24:MI:SS')
and
to_date('"""
q_string += end_time
q_string += """','DD-MON-YYYY HH24:MI:SS')
and
after.event_name=before.event_name and
after.snap_id=before.snap_id+1 and
after.instance_number = """
q_string += instance_number
q_string += """ and
before.instance_number=after.instance_number and
after.snap_id=sn.snap_id and
after.instance_number=sn.instance_number and
(after.total_waits-before.total_waits) > """
q_string += str(minimum_waits)
q_string += """
order by after.snap_id
"""
return q_string
database,dbconnection = util.script_startup('One wait event')
# Get user input
wait_event=util.input_with_default('wait event','db file sequential read')
min_waits=int(util.input_with_default('minimum number of waits per hour','0'))
start_time=util.input_with_default('Start date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-1900 12:00:00')
end_time=util.input_with_default('End date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-2200 12:00:00')
instance_number=util.input_with_default('Database Instance (1 if not RAC)','1')
# Build and run query
q = onewait(wait_event,min_waits,start_time,end_time,instance_number);
r = dbconnection.run_return_flipped_results(q)
util.exit_no_results(r)
# plot query
myplot.title = "'"+wait_event+"' waits on "+database+" database, instance "+instance_number+", minimum waits="+str(min_waits)
myplot.ylabel1 = "Number of events"
myplot.ylabel2 = "Averaged Elapsed Microseconds"
myplot.xdatetimes = r[0]
myplot.ylists = r[1:]
myplot.line_2subplots() | gpl-3.0 | -2,918,252,863,514,742,300 | 29.298969 | 125 | 0.730088 | false | 3.057232 | false | false | false |
nyu-mll/spinn | python/spinn/models/rl_classifier.py | 1 | 15458 | import os
import json
import math
import random
import sys
import time
import gflags
import numpy as np
from spinn.util import afs_safe_logger
from spinn.util.data import SimpleProgressBar
from spinn.util.blocks import to_gpu
from spinn.util.misc import Accumulator, EvalReporter
from spinn.util.logging import stats, train_accumulate, create_log_formatter
from spinn.util.logging import train_rl_accumulate
from spinn.util.logging import eval_stats, eval_accumulate, prettyprint_trees
from spinn.util.loss import auxiliary_loss
from spinn.util.sparks import sparks, dec_str
import spinn.util.evalb as evalb
import spinn.util.logging_pb2 as pb
from spinn.util.trainer import ModelTrainer
# PyTorch
import torch
import torch.nn as nn
from torch.autograd import Variable
from spinn.models.base import get_data_manager, get_flags, get_batch
from spinn.models.base import flag_defaults, init_model, log_path
from spinn.models.base import load_data_and_embeddings
FLAGS = gflags.FLAGS
def evaluate(FLAGS, model, eval_set, log_entry,
logger, trainer, vocabulary=None, show_sample=False, eval_index=0):
filename, dataset = eval_set
A = Accumulator()
eval_log = log_entry.evaluation.add()
reporter = EvalReporter()
tree_strs = None
# Evaluate
total_batches = len(dataset)
progress_bar = SimpleProgressBar(
msg="Run Eval",
bar_length=60,
enabled=FLAGS.show_progress_bar)
progress_bar.step(0, total=total_batches)
total_tokens = 0
cpt = 0
cpt_max = 0
start = time.time()
model.eval()
for i, dataset_batch in enumerate(dataset):
batch = get_batch(dataset_batch)
eval_X_batch, eval_transitions_batch, eval_y_batch, eval_num_transitions_batch, eval_ids = batch
# Run model.
np.set_printoptions(threshold=np.inf)
output = model(eval_X_batch, eval_transitions_batch, eval_y_batch,
use_internal_parser=FLAGS.use_internal_parser,
validate_transitions=FLAGS.validate_transitions,
store_parse_masks=show_sample,
example_lengths=eval_num_transitions_batch)
can_sample = (FLAGS.model_type ==
"RLSPINN" and FLAGS.use_internal_parser)
if show_sample and can_sample:
tmp_samples = model.get_samples(
eval_X_batch, vocabulary, only_one=not FLAGS.write_eval_report)
tree_strs = prettyprint_trees(tmp_samples)
if not FLAGS.write_eval_report:
# Only show one sample, regardless of the number of batches.
show_sample = False
# Calculate class accuracy.
target = torch.from_numpy(eval_y_batch).long()
# get the index of the max log-probability
pred = output.data.max(1, keepdim=False)[1].cpu()
eval_accumulate(model, A, batch)
A.add('class_correct', pred.eq(target).sum())
A.add('class_total', target.size(0))
# Update Aggregate Accuracies
total_tokens += sum(
[(nt + 1) / 2 for nt in eval_num_transitions_batch.reshape(-1)])
if FLAGS.write_eval_report:
transitions_per_example, _ = model.spinn.get_transitions_per_example(
style="preds" if FLAGS.eval_report_use_preds else "given") if (
FLAGS.model_type == "RLSPINN" and FLAGS.use_internal_parser) else (
None, None)
if model.use_sentence_pair:
batch_size = pred.size(0)
sent1_transitions = transitions_per_example[:
batch_size] if transitions_per_example is not None else None
sent2_transitions = transitions_per_example[batch_size:
] if transitions_per_example is not None else None
sent1_trees = tree_strs[:batch_size] if tree_strs is not None else None
sent2_trees = tree_strs[batch_size:
] if tree_strs is not None else None
else:
sent1_transitions = transitions_per_example if transitions_per_example is not None else None
sent2_transitions = None
sent1_trees = tree_strs if tree_strs is not None else None
sent2_trees = None
if FLAGS.cp_metric:
cp, cp_max = reporter.save_batch(
pred,
target,
eval_ids,
output.data.cpu().numpy(),
sent1_transitions,
sent2_transitions,
sent1_trees,
sent2_trees,
cp_metric=FLAGS.cp_metric,
mt=False)
cpt += cp
cpt_max += cp_max
else:
reporter.save_batch(
pred,
target,
eval_ids,
output.data.cpu().numpy(),
sent1_transitions,
sent2_transitions,
sent1_trees,
sent2_trees,
mt=False)
# Print Progress
progress_bar.step(i + 1, total=total_batches)
progress_bar.finish()
cp_metric_value = cpt / cpt_max
if tree_strs is not None:
logger.Log('Sample: ' + tree_strs[0])
end = time.time()
total_time = end - start
A.add('total_tokens', total_tokens)
A.add('total_time', total_time)
logger.Log("Eval cp_acc: " + str(cp_metric_value))
eval_stats(model, A, eval_log)
eval_log.filename = filename
if FLAGS.write_eval_report:
eval_report_path = os.path.join(
FLAGS.log_path,
FLAGS.experiment_name +
".eval_set_" +
str(eval_index) +
".report")
reporter.write_report(eval_report_path)
eval_class_acc = eval_log.eval_class_accuracy
eval_trans_acc = eval_log.eval_transition_accuracy
return eval_class_acc, eval_trans_acc
def train_loop(
FLAGS,
model,
trainer,
training_data_iter,
eval_iterators,
logger,
vocabulary):
# Accumulate useful statistics.
A = Accumulator(maxlen=FLAGS.deque_length)
# Train.
logger.Log("Training.")
# New Training Loop
progress_bar = SimpleProgressBar(
msg="Training", bar_length=60, enabled=FLAGS.show_progress_bar)
progress_bar.step(i=0, total=FLAGS.statistics_interval_steps)
log_entry = pb.SpinnEntry()
for _ in range(trainer.step, FLAGS.training_steps):
if (trainer.step - trainer.best_dev_step) > FLAGS.early_stopping_steps_to_wait:
logger.Log('No improvement after ' +
str(FLAGS.early_stopping_steps_to_wait) +
' steps. Stopping training.')
break
model.train()
log_entry.Clear()
log_entry.step = trainer.step
should_log = False
start = time.time()
batch = get_batch(next(training_data_iter))
X_batch, transitions_batch, y_batch, num_transitions_batch, train_ids = batch
total_tokens = sum(
[(nt + 1) / 2 for nt in num_transitions_batch.reshape(-1)])
# Reset cached gradients.
trainer.optimizer_zero_grad()
temperature = math.sin(
math.pi /
2 +
trainer.step /
float(
FLAGS.rl_confidence_interval) *
2 *
math.pi)
temperature = (temperature + 1) / 2
# Confidence Penalty for Transition Predictions.
if FLAGS.rl_confidence_penalty:
epsilon = FLAGS.rl_epsilon * \
math.exp(-trainer.step / float(FLAGS.rl_epsilon_decay))
temp = 1 + \
(temperature - .5) * FLAGS.rl_confidence_penalty * epsilon
model.spinn.temperature = max(1e-3, temp)
# Soft Wake/Sleep based on temperature.
if FLAGS.rl_wake_sleep:
model.rl_weight = temperature * FLAGS.rl_weight
# Run model.
output = model(X_batch, transitions_batch, y_batch,
use_internal_parser=FLAGS.use_internal_parser,
validate_transitions=FLAGS.validate_transitions
)
# Calculate class accuracy.
target = torch.from_numpy(y_batch).long()
# get the index of the max log-probability
pred = output.data.max(1, keepdim=False)[1].cpu()
class_acc = pred.eq(target).sum().float() / float(target.size(0))
# Calculate class loss.
xent_loss = nn.CrossEntropyLoss()(output, to_gpu(Variable(target)))
# Optionally calculate transition loss.
transition_loss = model.transition_loss if hasattr(
model, 'transition_loss') else None
# Accumulate Total Loss Variable
total_loss = 0.0
total_loss += xent_loss
if transition_loss is not None and model.optimize_transition_loss:
total_loss += transition_loss
aux_loss = auxiliary_loss(model)
total_loss += aux_loss[0]
# Backward pass.
total_loss.backward()
# Hard Gradient Clipping
nn.utils.clip_grad_norm_([param for name, param in model.named_parameters() if name not in ["embed.embed.weight"]], FLAGS.clipping_max_value)
# Gradient descent step.
trainer.optimizer_step()
end = time.time()
total_time = end - start
train_accumulate(model, A, batch)
A.add('class_acc', class_acc)
A.add('total_tokens', total_tokens)
A.add('total_time', total_time)
train_rl_accumulate(model, A, batch)
if trainer.step % FLAGS.statistics_interval_steps == 0:
progress_bar.step(i=FLAGS.statistics_interval_steps,
total=FLAGS.statistics_interval_steps)
progress_bar.finish()
A.add('xent_cost', xent_loss.data.item())
stats(model, trainer, A, log_entry)
should_log = True
if trainer.step % FLAGS.sample_interval_steps == 0 and FLAGS.num_samples > 0:
should_log = True
model.train()
model(X_batch, transitions_batch, y_batch,
use_internal_parser=FLAGS.use_internal_parser,
validate_transitions=FLAGS.validate_transitions
)
tr_transitions_per_example, tr_strength = model.spinn.get_transitions_per_example(
)
model.eval()
model(X_batch, transitions_batch, y_batch,
use_internal_parser=FLAGS.use_internal_parser,
validate_transitions=FLAGS.validate_transitions
)
ev_transitions_per_example, ev_strength = model.spinn.get_transitions_per_example(
)
if model.use_sentence_pair and len(transitions_batch.shape) == 3:
transitions_batch = np.concatenate([
transitions_batch[:, :, 0], transitions_batch[:, :, 1]], axis=0)
# This could be done prior to running the batch for a tiny speed
# boost.
t_idxs = list(range(FLAGS.num_samples))
random.shuffle(t_idxs)
t_idxs = sorted(t_idxs[:FLAGS.num_samples])
for t_idx in t_idxs:
log = log_entry.rl_sampling.add()
gold = transitions_batch[t_idx]
pred_tr = tr_transitions_per_example[t_idx]
pred_ev = ev_transitions_per_example[t_idx]
strength_tr = sparks(
[1] + tr_strength[t_idx].tolist(), dec_str)
strength_ev = sparks(
[1] + ev_strength[t_idx].tolist(), dec_str)
_, crossing = evalb.crossing(gold, pred)
log.t_idx = t_idx
log.crossing = crossing
log.gold_lb = "".join(map(str, gold))
log.pred_tr = "".join(map(str, pred_tr))
log.pred_ev = "".join(map(str, pred_ev))
log.strg_tr = strength_tr[1:]
log.strg_ev = strength_ev[1:]
if trainer.step > 0 and trainer.step % FLAGS.eval_interval_steps == 0:
should_log = True
for index, eval_set in enumerate(eval_iterators):
acc, _ = evaluate(
FLAGS, model, eval_set, log_entry, logger, trainer, eval_index=index, vocabulary=vocabulary, show_sample=True)
if index == 0:
trainer.new_dev_accuracy(acc)
progress_bar.reset()
if trainer.step > FLAGS.ckpt_step and trainer.step % FLAGS.ckpt_interval_steps == 0:
should_log = True
trainer.checkpoint()
if should_log:
logger.LogEntry(log_entry)
progress_bar.step(i=(trainer.step % FLAGS.statistics_interval_steps) + 1,
total=FLAGS.statistics_interval_steps)
def run(only_forward=False):
logger = afs_safe_logger.ProtoLogger(log_path(FLAGS),
print_formatter=create_log_formatter(
True, True),
write_proto=FLAGS.write_proto_to_log)
header = pb.SpinnHeader()
data_manager = get_data_manager(FLAGS.data_type)
logger.Log("Flag Values:\n" +
json.dumps(FLAGS.FlagValuesDict(), indent=4, sort_keys=True))
# Get Data and Embeddings
vocabulary, initial_embeddings, training_data_iter, eval_iterators, training_data_length = \
load_data_and_embeddings(FLAGS, data_manager, logger,
FLAGS.training_data_path, FLAGS.eval_data_path)
# Build model.
vocab_size = len(vocabulary)
num_classes = len(set(data_manager.LABEL_MAP.values()))
model = init_model(
FLAGS,
logger,
initial_embeddings,
vocab_size,
num_classes,
data_manager,
header)
time_to_wait_to_lower_lr = min(10000, int(training_data_length / FLAGS.batch_size))
trainer = ModelTrainer(model, logger, time_to_wait_to_lower_lr, vocabulary, FLAGS)
header.start_step = trainer.step
header.start_time = int(time.time())
# Do an evaluation-only run.
logger.LogHeader(header) # Start log_entry logging.
if only_forward:
log_entry = pb.SpinnEntry()
for index, eval_set in enumerate(eval_iterators):
log_entry.Clear()
acc = evaluate(
FLAGS,
model,
eval_set,
log_entry,
logger,
trainer,
vocabulary,
show_sample=True,
eval_index=index)
print(log_entry)
logger.LogEntry(log_entry)
else:
train_loop(
FLAGS,
model,
trainer,
training_data_iter,
eval_iterators,
logger,
vocabulary)
if __name__ == '__main__':
get_flags()
# Parse command line flags.
FLAGS(sys.argv)
flag_defaults(FLAGS)
if FLAGS.model_type != "RLSPINN":
raise Exception("Reinforce is only implemented for RLSPINN.")
run(only_forward=FLAGS.expanded_eval_only_mode)
| mit | -1,203,337,465,858,312,400 | 33.815315 | 149 | 0.566373 | false | 3.975823 | false | false | false |
SelvorWhim/competitive | LeetCode/MinimumSubsequenceInNonIncreasingOrder.py | 1 | 1351 | # idea: order by size and take largest elements until the sum becomes > sum of remaining
class Solution:
def minSubsequence(self, nums: List[int]) -> List[int]:
nums = sorted(nums, reverse=True)
total_sum = sum(nums)
running_sum = 0
subseq_len = 0 # how many biggest members we'll need to take before sum is greater than the rest
for x in nums:
running_sum += x
subseq_len += 1
if running_sum > (total_sum - running_sum):
break
return nums[:subseq_len]
# in this variant (not relevant for the problem as described) we keep track of original order so subsequence can be returned in original order
def minSubsequenceInOriginalOrder(self, nums: List[int]) -> List[int]:
total_sum = sum(nums)
sorted_nums = sorted(enumerate(nums), key=lambda x: x[1], reverse=True) # preserving original order in 1st index
running_sum = 0
subseq_len = 0 # how many biggest members we'll need to take before sum is greater than the rest
for t in sorted_nums:
running_sum += t[1]
subseq_len += 1
if running_sum > (total_sum - running_sum):
break
subseq_indexes = sorted([t[0] for t in sorted_nums[:subseq_len]])
return [nums[i] for i in subseq_indexes]
| unlicense | 6,513,663,138,112,198,000 | 49.037037 | 146 | 0.617321 | false | 3.950292 | false | false | false |
Ircam-Web/mezzanine-organization | organization/core/related.py | 1 | 2036 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import exceptions
from django.db.models.fields.related import ForeignKey
from django.db.utils import ConnectionHandler, ConnectionRouter
connections = ConnectionHandler()
router = ConnectionRouter()
class SpanningForeignKey(ForeignKey):
def validate(self, value, model_instance):
if self.rel.parent_link:
return
# Call the grandparent rather than the parent to skip validation
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(self.rel.to, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.rel.to._meta.verbose_name, 'pk': value,
'field': self.rel.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
| agpl-3.0 | -7,799,400,437,900,264,000 | 37.415094 | 77 | 0.672888 | false | 4.121457 | false | false | false |
virtualnobi/MediaFiler | Model/MediaClassHandler.py | 1 | 18503 | """Provides class and element handling functionality.
(c) by nobisoft 2016-
"""
# Imports
## Standard
import sys
import copy
import re
import codecs
import logging
## Contributed
## nobi
## Project
#from .MediaOrganization import MediaOrganization
# Package Variables
Logger = logging.getLogger(__name__)
class MediaClassHandler(object):
"""
"""
# Constants
KeyName = u'class' # key in class dictionary mapping to class name
KeyMultiple = u'multiple' # key in class dictionary mapping to Boolean indicating whether multiple elements can be selected
KeyElements = u'elements' # key in class dictionary mapping to list of elements
KeyRequired = u'required' # key in class dictionary mapping to list of required elements
KeyRequiredClasses = u'requiredClass' # key in class dictionary mapping to list of required classes
KeyProhibited = u'prohibited' # key in class dictionary mapping to list of prohibited elements
TagSeparator = u'.' # character to introduce a tag/element
RETagSeparatorsRecognized = ('[, _' + TagSeparator + '-]')
ElementIllegal = u'illegal' # special element signalling that a combination of elements is not legal
ElementNew = u'new' # special element signalling that the entry is new, i.e., just imported
InitialFileContent = (u'# Classname Element+ # for classes with single-choice elements\n' +
u'# Classname [] Element+ # for classes with multiple-choice elements\n' +
u'# Classname +Element1 Element2+ # for a class which applies only if Element1 has been assigned')
# Class Variables
# Class Methods
# Lifecycle
def __init__(self, pathname):
"""Create a MediaClassHandler instance from the definitions in pathname.
"""
# inheritance
# internal state
self.classes = []
self.knownElements = []
self.readClassesFromFile(pathname)
return(None)
# Setters
# Getters
def getClasses(self):
"""Return a list of all classes.
"""
return(copy.copy(self.classes))
def getClassNames(self):
"""Return a list of all class names.
"""
return([aClass[self.__class__.KeyName] for aClass in self.classes])
def isMultipleClass(self, aClass):
"""Return True if multiple elements of CLASSNAME may be selected.
Return False if at most one element of CLASSNAME may be selected.
"""
return((aClass != None)
and (self.KeyMultiple in aClass)
and (aClass[self.KeyMultiple]))
def isMultipleClassByName(self, className):
"""Return True if multiple elements of CLASSNAME may be selected.
Return False if at most one element of CLASSNAME may be selected.
"""
return(self.isMultipleClass(self.getClassByName(className)))
def getElementsOfClass(self, aClass):
"""
dict aClass
Return list of all tags in aClass, ordered as in definition.
"""
return(list(aClass[self.KeyElements]))
def getElementsOfClassByName(self, className):
"""
String className
Raises KeyError if no class exists with name className
Return list of all tags in className, ordered as in definition.
"""
aClass = self.getClassByName(className)
if (aClass == None):
raise KeyError('No class named "%s" exists!' % className)
else:
return(self.getElementsOfClass(aClass))
def getKnownElements(self):
"""Return a list of all known elements.
"""
return(copy.copy(self.knownElements))
def isLegalElement(self, element):
"""Return True if element is a legal class element, False otherwise.
String element
Return Boolean
"""
return(self.normalizeTag(element) in self.getKnownElements())
# Other API
def normalizeTag(self, tag):
"""Normalize a tag (element), for example, when importing.
This will compare the tag with all known tags in a case-insensitive way,
and return the defined spelling if found in the known tags.
If not found in the known tags, it will be returned without changes.
String tag
Return Boolean
"""
for knownTag in self.getKnownElements():
if (knownTag.lower() == tag.lower()):
return(knownTag)
return(tag)
def combineTagsWithPriority(self, tagSet, priorityTagSet):
"""Return the union of the two tag sets, except for single-selection tag classes where the second set has priority.
Set of String tagSet
Set of String priorityTagSet
Return Set of String
"""
result = set(tagSet)
singleSelectionClasses = filter(lambda c: (not self.isMultipleClass(c)), self.getClasses())
for priorityTag in priorityTagSet:
priorityClass = self.getClassOfTag(priorityTag)
if (priorityClass in singleSelectionClasses):
result.difference_update(set(self.getElementsOfClass(priorityClass)))
result.add(priorityTag)
return(result)
def getTagsOnChange(self, tagSet, addedTag, removedTags):
"""Determine new set of tags based on tags added and removed.
Set of String tagSet
String or None addedTag
Set of String removedTags
Return Set of String containing the tags after addition and removal
"""
Logger.debug('MediaClassHandler.getTagsOnChange(%s +%s -%s)' % (tagSet, addedTag, removedTags))
result = copy.copy(tagSet)
if (addedTag):
result.update(set([addedTag]))
result = self.includeRequiredElements(result)
Logger.debug('MediaClassHandler.getTagsOnChange(): Adding %s yields %s' % (addedTag, result.difference(tagSet)))
for tag in removedTags:
result.discard(tag)
for aClass in self.getClasses():
if (tag in self.getRequiredElementsOfClass(aClass)):
result.difference_update(set(self.getElementsOfClass(aClass)))
if (((addedTag == None) or
(self.getClassOfTag(tag) != self.getClassOfTag(addedTag)))
and (self.getClassOfTag(tag)[MediaClassHandler.KeyName] in self.getRequiredClassesOfClass(aClass))):
result.difference_update(set(self.getElementsOfClass(aClass)))
Logger.debug('MediaClassHandler.getTagsOnChange(): Removed %s' % tagSet.difference(result))
return(result)
def includeRequiredElements(self, elements):
"""Add all required tags to a tagset.
Set elements contains tags as String
Return Set containing all tags as well as additional tags required by them
"""
result = set(elements)
for aClass in self.getClasses():
for anElement in self.getElementsOfClass(aClass):
if (anElement in elements):
for requiredElement in self.getRequiredElementsOfClass(aClass):
result.add(requiredElement)
for requiredClassName in self.getRequiredClassesOfClass(aClass):
requiredTags = set(self.getElementsOfClassByName(requiredClassName))
if (len(requiredTags.intersection(elements)) == 0):
result.add(self.getElementsOfClassByName(requiredClassName)[0]) # requiredTags.pop()) # choose first tag from class definition
for prohibitedElement in self.getProhibitedElementsOfClass(aClass):
if (prohibitedElement in elements):
result.add(self.ElementIllegal)
Logger.debug('MediaClassHandler.includeRequiredElements(%s): Added %s' % (elements, (result - elements)))
return(result)
def orderElements(self, elementSet):
"""Order the elements specified according to class definition.
Returns a List of String.
"""
result = []
elements = copy.copy(elementSet)
for aClass in self.getClasses():
for element in self.getElementsOfClass(aClass):
if (element in elements):
result.append(element)
elements.remove(element)
for element in sorted(elements):
result.append(element)
return (result)
def elementsToString(self, elementSet):
"""Return a String containing all elements in ELEMENTSET in canonical order.
Elements are introduced by TagSeparator (meaning the result is either empty or starts with a TagSeparator).
"""
elements = self.orderElements(elementSet)
result = (MediaClassHandler.TagSeparator.join(elements))
if (not (result == '')):
result = (MediaClassHandler.TagSeparator + result)
return (result)
def stringToElements(self, elementString):
"""Turn a (unicode) string into a set of (unicode) tags.
String elementString contains a string of words
Return a Set with all elements from ELEMENTSTRING
"""
elements = set(re.split(MediaClassHandler.RETagSeparatorsRecognized, elementString))
if (u'' in elements):
elements.remove(u'')
return(elements)
def stringToKnownAndUnknownElements(self, elementString):
"""Turn a (unicode) string into (unicode) tags.
Return (known, unknown) where
Dictionary known maps class names to (unicode) tags
Set unknown contains all remaining tags from elementString
"""
remainingElements = self.stringToElements(elementString)
knownElements = {}
# sort elements into class sequence
for aClass in self.getClasses():
className = aClass[self.KeyName]
for classElement in self.getElementsOfClass(aClass):
if (classElement in remainingElements):
remainingElements.remove(classElement)
if (className in knownElements.keys()): # add known element...
knownElements[className].append(classElement) # ...to an existing list
else:
knownElements[className] = [classElement] # ...as a single-entry list
return(knownElements, remainingElements)
# Event Handlers
# Internal - to change without notice
def getClassByName(self, className):
"""Return a Dictionary defining the named class.
Return None if className does not exist.
"""
for aClass in self.classes:
if (aClass[self.KeyName] == className):
return(aClass)
return(None)
def getClassOfTag(self, tagName):
"""Return the class to which the given tag belongs.
String tagName
Return Dictionary describing the class
or None if tagName belongs to no class
"""
for aClass in self.classes:
if (tagName in self.getElementsOfClass(aClass)):
return(aClass)
return(None)
def getRequiredElementsOfClass(self, aClass):
"""Return a list of all elements which must apply for aClass to be applicable.
"""
return(aClass[self.KeyRequired])
def getRequiredClassesOfClass(self, aClass):
"""Return a list of all class names which must apply for aClass to be applicable.
At least one tag from the resulting classes must be applied for aClass to be applicable.
Return List of String
"""
return(aClass[self.KeyRequiredClasses])
def getProhibitedElementsOfClass(self, aClass):
"""Return a list of all elements which may not apply for className to be applicable.
Return None if className does not exist.
"""
return(aClass[self.KeyProhibited])
def readClassesFromFile(self, pathname):
"""Set self's internal state from the class definition in the given file.
String pathname contains the file name
"""
self.classes = []
self.knownElements = []
try:
# classFile = codecs.open(pathname, encoding=sys.getfilesystemencoding())
classFile = open(pathname, mode='rt') # Python 3
except:
raise IOError('Cannot open "%s" to read tag classes!' % pathname)
for line in classFile:
#print ("Read line >%s<" % line)
line = line.strip() # trim white space
if ((len (line) == 0) or (line[0] == '#')): # empty or comment line, ignore
#print ("Ignored empty or comment line")
pass
else: # non-comment, interpret
tokens = line.split()
className = tokens.pop(0)
Logger.debug('MediaClassHandler.readClassesFromFile(): Definition of "%s" is "%s"' % (className, tokens))
multiple = False
required = []
requiredClasses = []
prohibited = []
elements = []
while (len(tokens) > 0):
token = tokens.pop(0)
if (token == '[]'): # this is a multiple-selection class
multiple = True
elif (token[0] == '+'):
name = token[1:]
if (self.isLegalElement(name)):
Logger.debug('MediaClassHandler.readClassesFromFile(): Required tag "%s"' % name)
required.append(name)
elif (self.getClassByName(name)):
Logger.debug('MediaClassHandler.readClassesFromFile(): Required class "%s"' % name)
requiredClasses.append(name)
else:
Logger.debug('MediaClassHandler.readClassesFromFile(): Requiring unknown tag "%s"' % name)
required.append(name)
elif (token[0] == '-'):
prohibited.append(token[1:])
else:
#print ("Adding element %s" % token)
elements.append(token)
aClass = {self.KeyName:className,
self.KeyRequired:required,
self.KeyRequiredClasses:requiredClasses,
self.KeyProhibited:prohibited,
self.KeyMultiple:multiple,
self.KeyElements:elements}
#print ("Found definition of %s" % aClass)
self.classes.append(aClass)
self.knownElements.extend(elements) # extend list of all known elements for filtering
classFile.close()
def readClassesFromFile3(self, pathname):
"""Set self's internal state from the class definition in the given file.
String pathname contains the file name
"""
self.classes = []
self.knownElements = []
try:
classFile = codecs.open(pathname, encoding=sys.getfilesystemencoding())
except:
raise IOError('Cannot open "%s" to read tag classes!' % pathname)
for line in classFile:
#print ("Read line >%s<" % line)
line = line.strip() # trim white space
if ((len (line) == 0) or (line[0] == '#')): # empty or comment line, ignore
#print ("Ignored empty or comment line")
pass
else: # non-comment, interpret
tokens = line.split()
className = tokens.pop(0)
Logger.debug('MediaClassHandler.readClassesFromFile(): Definition of "%s" is "%s"' % (className, tokens))
multiple = False
required = []
requiredClasses = []
prohibited = []
elements = []
while (len(tokens) > 0):
token = tokens.pop(0)
if (token == '[]'): # this is a multiple-selection class
multiple = True
elif (token[0] == '+'):
name = token[1:]
if (self.isLegalElement(name)):
Logger.debug('MediaClassHandler.readClassesFromFile(): Required tag "%s"' % name)
required.append(name)
elif (self.getClassByName(name)):
Logger.debug('MediaClassHandler.readClassesFromFile(): Required class "%s"' % name)
requiredClasses.append(name)
else:
Logger.debug('MediaClassHandler.readClassesFromFile(): Requiring unknown tag "%s"' % name)
required.append(name)
elif (token[0] == '-'):
prohibited.append(token[1:])
else:
#print ("Adding element %s" % token)
elements.append(token)
aClass = {self.KeyName:className,
self.KeyRequired:required,
self.KeyRequiredClasses:requiredClasses,
self.KeyProhibited:prohibited,
self.KeyMultiple:multiple,
self.KeyElements:elements}
#print ("Found definition of %s" % aClass)
self.classes.append(aClass)
self.knownElements.extend(elements) # extend list of all known elements for filtering
classFile.close()
# Class Initialization
pass
# Executable Script
if __name__ == "__main__":
pass
| gpl-3.0 | -4,694,349,200,343,272,000 | 38.755507 | 156 | 0.565908 | false | 4.813476 | false | false | false |
aurex-linux/virt-manager | virtManager/mediacombo.py | 1 | 6975 | #
# Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
# pylint: disable=E0611
from gi.repository import Gtk
# pylint: enable=E0611
from virtManager import uiutil
from virtManager.baseclass import vmmGObjectUI
class vmmMediaCombo(vmmGObjectUI):
OPTICAL_FIELDS = 4
(OPTICAL_DEV_PATH,
OPTICAL_LABEL,
OPTICAL_HAS_MEDIA,
OPTICAL_DEV_KEY) = range(OPTICAL_FIELDS)
def __init__(self, conn, builder, topwin, media_type):
vmmGObjectUI.__init__(self, None, None, builder=builder, topwin=topwin)
self.conn = conn
self.media_type = media_type
self.top_box = None
self.combo = None
self._warn_icon = None
self._populated = False
self._init_ui()
def _cleanup(self):
try:
self.conn.disconnect_by_func(self._mediadev_added)
self.conn.disconnect_by_func(self._mediadev_removed)
except:
pass
self.conn = None
self.top_box.destroy()
self.top_box = None
##########################
# Initialization methods #
##########################
def _init_ui(self):
self.top_box = Gtk.Box()
self.top_box.set_spacing(6)
self.top_box.set_orientation(Gtk.Orientation.HORIZONTAL)
self._warn_icon = Gtk.Image()
self._warn_icon.set_from_stock(
Gtk.STOCK_DIALOG_WARNING, Gtk.IconSize.MENU)
self.combo = Gtk.ComboBox()
self.top_box.add(self.combo)
self.top_box.add(self._warn_icon)
self.top_box.show_all()
# [Device path, pretty label, has_media?, device key, media key,
# vmmMediaDevice, is valid device]
fields = []
fields.insert(self.OPTICAL_DEV_PATH, str)
fields.insert(self.OPTICAL_LABEL, str)
fields.insert(self.OPTICAL_HAS_MEDIA, bool)
fields.insert(self.OPTICAL_DEV_KEY, str)
self.combo.set_model(Gtk.ListStore(*fields))
text = Gtk.CellRendererText()
self.combo.pack_start(text, True)
self.combo.add_attribute(text, 'text', self.OPTICAL_LABEL)
error = self.conn.mediadev_error
self._warn_icon.set_visible(bool(error))
self._warn_icon.set_tooltip_text(error)
def _set_mediadev_default(self):
model = self.combo.get_model()
if len(model) != 0:
return
row = [None] * self.OPTICAL_FIELDS
row[self.OPTICAL_DEV_PATH] = None
row[self.OPTICAL_LABEL] = _("No device present")
row[self.OPTICAL_HAS_MEDIA] = False
row[self.OPTICAL_DEV_KEY] = None
model.append(row)
def _set_mediadev_row_from_object(self, row, obj):
row[self.OPTICAL_DEV_PATH] = obj.get_path()
row[self.OPTICAL_LABEL] = obj.pretty_label()
row[self.OPTICAL_HAS_MEDIA] = obj.has_media()
row[self.OPTICAL_DEV_KEY] = obj.get_key()
def _mediadev_set_default_selection(self):
# Set the first active cdrom device as selected, otherwise none
widget = self.combo
model = widget.get_model()
idx = 0
active = widget.get_active()
if active != -1:
# already a selection, don't change it
return
for row in model:
if row[self.OPTICAL_HAS_MEDIA] is True:
widget.set_active(idx)
return
idx += 1
widget.set_active(0)
def _mediadev_media_changed(self, newobj):
widget = self.combo
model = widget.get_model()
active = widget.get_active()
idx = 0
# Search for the row with matching device node and
# fill in info about inserted media. If model has no current
# selection, select the new media.
for row in model:
if row[self.OPTICAL_DEV_PATH] == newobj.get_path():
self._set_mediadev_row_from_object(row, newobj)
has_media = row[self.OPTICAL_HAS_MEDIA]
if has_media and active == -1:
widget.set_active(idx)
elif not has_media and active == idx:
widget.set_active(-1)
idx = idx + 1
self._mediadev_set_default_selection()
def _mediadev_added(self, ignore, newobj):
widget = self.combo
model = widget.get_model()
if newobj.get_media_type() != self.media_type:
return
if model is None:
return
if len(model) == 1 and model[0][self.OPTICAL_DEV_PATH] is None:
# Only entry is the 'No device' entry
model.clear()
newobj.connect("media-added", self._mediadev_media_changed)
newobj.connect("media-removed", self._mediadev_media_changed)
# Brand new device
row = [None] * self.OPTICAL_FIELDS
self._set_mediadev_row_from_object(row, newobj)
model.append(row)
self._mediadev_set_default_selection()
def _mediadev_removed(self, ignore, key):
widget = self.combo
model = widget.get_model()
active = widget.get_active()
idx = 0
for row in model:
if row[self.OPTICAL_DEV_KEY] == key:
# Whole device removed
del(model[idx])
if idx > active and active != -1:
widget.set_active(active - 1)
elif idx == active:
widget.set_active(-1)
idx += 1
self._set_mediadev_default()
self._mediadev_set_default_selection()
def _populate_media(self):
if self._populated:
return
widget = self.combo
model = widget.get_model()
model.clear()
self._set_mediadev_default()
self.conn.connect("mediadev-added", self._mediadev_added)
self.conn.connect("mediadev-removed", self._mediadev_removed)
widget.set_active(-1)
self._mediadev_set_default_selection()
self._populated = True
##############
# Public API #
##############
def reset_state(self):
self._populate_media()
def get_path(self):
return uiutil.get_list_selection(self.combo, self.OPTICAL_DEV_PATH)
def has_media(self):
return uiutil.get_list_selection(self.combo, self.OPTICAL_HAS_MEDIA)
| gpl-2.0 | 7,790,347,630,391,545,000 | 30.278027 | 79 | 0.590108 | false | 3.739946 | false | false | false |
AKAMobi/ibot | ibot-kernel/ibot/extractor/startup.py | 1 | 4162 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from common import get_regs, get_compile_regs
import re
REGS = get_compile_regs(get_regs('startup'))
def get_startup(document):
""" 抽取项目名
@document: Document对象 chunk
@rtype: str 项目名
"""
global REGS
startup = ''
max_length = 20
# 存储正则表达式的列表,用于去除格式之后的项目名称筛选
for sentence in document.sentences:
text = sentence.raw
# 文件格式名之前的部分为包含项目名称的部分
searchObj = re.search(
r'附件[内容]*[::]?(.*)(\.pp|\.do|\.pdf|\.wps)', text)
# 如果匹配到的内容不为空
if searchObj:
# 取出包含项目名称的部分,并用reh_list中的规则对其进行匹配
new_text = searchObj.group(1)
startup = new_text
for every_re in REGS:
new_searchObj = re.search(every_re, new_text)
if new_searchObj and startup == new_text:
# 如果关键字前面的字段长度大于2,则为项目名称
if len(new_searchObj.group(1)) >= 2:
startup = new_searchObj.group(1)
break
# 否则,关键字后面为项目名称
else:
startup = new_searchObj.group(2)
break
# 对项目名称中的一些符号进行替换,将其去除
# 去除BP
startup = startup.replace('BP', '')
# 去除开头的-
matchObj = re.match('^-*(.*)', startup)
if matchObj:
startup = matchObj.group(1)
# 去除开头的_
matchObj = re.match(u'^_*(.*)', startup)
if matchObj:
startup = matchObj.group(1)
# 去除开头的——
matchObj = re.match(u'^——*(.*)', startup)
if matchObj:
startup = matchObj.group(1)
# 去除开头的.
matchObj = re.match(r'^\.*(.*)', startup)
if matchObj:
startup = matchObj.group(1)
# 去除版本号
matchObj = re.match(r'(.*)v[0~9]*', startup)
if matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)V[0~9]*', startup)
if matchObj:
startup = matchObj.group(1)
# 去除末尾的-、_、.等符号
matchObj = re.match(r'(.*)_$', startup)
while matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)_$', startup)
matchObj = re.match(r'(.*)-$', startup)
while matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)-$', startup)
matchObj = re.match(r'(.*)\.$', startup)
while matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)\.$', startup)
matchObj = re.match(r'(.*)―$', startup)
while matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)―$', startup)
# 去除结尾的‘PreA轮、B轮’等内容
startup = re.sub(u'PreA轮.*', '', startup)
startup = re.sub(u'Pre-A轮.*', '', startup)
startup = re.sub(u'A轮.*', '', startup)
startup = re.sub(u'B轮.*', '', startup)
startup = re.sub(u'C轮.*', '', startup)
startup = re.sub(u'D轮.*', '', startup)
startup = re.sub(u'天使轮.*', '', startup)
# 去除《》
startup = startup.replace(u'《', '')
startup = startup.replace(u'》', '')
# 去除APP
startup = startup.replace(u'APP', '')
# 去除结尾的“项目”
startup = startup.replace(u'项目', '')
# 去除结尾的“网站”
startup = startup.replace(u'网站', '')
startup = startup.replace(r'\s*阅读版', '')
startup = re.sub(r'\d{4,11}[-_.\d]*', '', startup)
# 如果包含‘项目名称’的关键字,则取其为项目名称
searchObj = re.search(u'项目名称:(.{2,})', text)
if searchObj:
startup = searchObj.group(1)
if len(startup) > max_length:
startup == ''
return startup
| apache-2.0 | -2,418,756,691,669,638,000 | 25.984496 | 61 | 0.508587 | false | 2.625455 | false | false | false |
vinu76jsr/django-memoize | setup.py | 1 | 1282 | #!/usr/bin/env python
"""
django-memoize
--------------
**django-memoize** is an implementation
of `memoization <http://en.wikipedia.org/wiki/Memoization>`_ technique
for Django. You can think of it as a cache for function or method results.
"""
from setuptools import setup, find_packages
setup(
name='django-memoize',
version='1.1.2',
url='https://github.com/tvavrys/django-memoize',
license='BSD',
author='Thomas Vavrys',
author_email='[email protected]',
description='An implementation of memoization technique for Django.',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
install_requires=[
'django >= 1.4'
],
classifiers=[
'Environment :: Web Environment',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='setuptest.setuptest.SetupTestSuite',
tests_require=(
'django-setuptest',
'argparse', # Required by django-setuptools on Python 2.6
),
)
| bsd-3-clause | -3,856,313,471,149,989,000 | 28.813953 | 74 | 0.634165 | false | 3.981366 | true | false | false |
kinsney/sport | sport/settings.py | 1 | 4682 | #coding:utf-8
# -*- coding: utf-8 -*-
"""
Django settings for sport project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
#coding: utf-8
# -*- coding: utf-8 -*-
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0#92e&xud-w5ry-6k6c^n#5s8hj+6@(8kmwz5=aj%aplilu3k1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["101.200.145.32",'localhost']
SITE_URL = "http://101.200.145.32"
# Application definition
INSTALLED_APPS = [
'constance',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'constance.backends.database',
'bike',
'order',
'participator',
'advertisement',
'message'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'participator.hashers.DoubleMD5PasswordHasher',
]
ROOT_URLCONF = 'sport.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'sport/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sport.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sport',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'zh-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "sport/static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
OSS_MEDIA_URL = ''
TEST_OSS_MEDIA_URL = ''
# django
# http://django-constance.readthedocs.org/
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_CONFIG = {
'Brokerage':(0.1,u'每单所收佣金'),
'VerificationCodeLength':(6,u'验证码长度'),
'VerificationAvailableMinutes':(5,u'验证码有效时间'),
'IPMessageLimit':(100,u'每个IP每日允许发送的信息最大值'),
'VerificationCodeTemplate': (u'【%s】',
u'用“%s”来替换要发送的验证码。'),
'bikeNumberLength':(13,u'单车编号长度'),
'orderNumberLength':(13,u'订单编号长度'),
'withdrawRate':(0.1,'撤单利率'),
'numbersPerRequest':(12,'每次请求得到的数目')
}
CONSTANCE_SUPERUSER_ONLY = True
| mit | -3,694,405,221,567,587,000 | 25.526316 | 91 | 0.676808 | false | 3.130435 | false | false | false |
demitri/cornish | source/cornish/region/region.py | 1 | 28830 |
from __future__ import annotations # remove in Python 3.10
# Needed for forward references, see:
# https://stackoverflow.com/a/33533514/2712652
import logging
from abc import ABCMeta, abstractproperty, abstractmethod
from typing import Union, Iterable, Tuple
import math
from math import radians as deg2rad
from math import degrees as rad2deg
import numpy as np
import astropy
import astropy.units as u
import starlink
import starlink.Ast as Ast
import cornish.region # to avoid circular imports below - better way?
from ..mapping import ASTFrame, ASTFrameSet, ASTMapping
from ..exc import NotA2DRegion, CoordinateSystemsCouldNotBeMapped
from ..constants import AST_SURFACE_MESH, AST_BOUNDARY_MESH
__all__ = ['ASTRegion']
logger = logging.getLogger("cornish")
'''
Copied from documentation, to be implemented.
Properties of ASTRegion over those from ASTFrame
* Adaptive: Should the area adapt to changes in the coordinate system?
* Negated: Has the original region been negated?
* Closed: Should the boundary be considered to be inside the region?
* MeshSize: Number of points used to create a mesh covering the Region
* FillFactor: Fraction of the Region which is of interest
* Bounded: Is the Region bounded?
Region-specific methods:
* astGetRegionBounds: Get the bounds of a Region
* astGetRegionFrame: Get a copy of the Frame represent by a Region
* astGetRegionFrameSet: Get a copy of the Frameset encapsulated by a Region
* astGetRegionMesh: Get a mesh of points covering a Region
* astGetRegionPoints: Get the positions that define a Region
* astGetUnc: Obtain uncertainty information from a Region
* astMapRegion: Transform a Region into a new coordinate system
* astNegate: Toggle the value of the Negated attribute
* astOverlap: Determines the nature of the overlap between two Regions
* astMask<X>: Mask a region of a data grid
* astSetUnc: Associate a new uncertainty with a Region
* astShowMesh: Display a mesh of points on the surface of a Region
'''
class ASTRegion(ASTFrame, metaclass=ABCMeta):
'''
Represents a region within a coordinate system.
This is an abstract superclass - there is no supported means to create an ASTRegion object directly
(see :py:class:`ASTBox`, :py:class:`ASTPolygon`, etc.).
Accepted signatures for creating an ASTRegion:
.. code-block:: python
r = ASTRegion(ast_object)
:param ast_object:
:param uncertainty:
'''
def __init__(self, ast_object:starlink.Ast.Region=None, uncertainty=None):
super().__init__(ast_object=ast_object)
self._uncertainty = uncertainty
def __add__(self, other_region):
# TODO: check data type, handle both ASTRegion and the ast_object region?
from .compound_region import ASTCompoundRegion # forward import to avoid circular import error
return ASTCompoundRegion(regions=[self, other_region], operation=Ast.AND)
@classmethod
def fromFITSHeader(cls, fits_header=None, uncertainty:float=4.848e-6):
'''
Factory method to create a region from the provided FITS header; the returned object will be as specific as possible (but probably an :py:class:`ASTPolygon`).
The frame is determined from the FITS header.
:param fits_header: a FITS header (Astropy, fitsio, an array of cards)
:param uncertainty: defaults to 4.848e-6, an uncertainty of 1 arcsec
'''
if fits_header is None:
raise ValueError("This method requires a 'fits_header' to be set.")
# import here to avoid circular imports
from .box import ASTBox
from .circle import ASTCircle
from .polygon import ASTPolygon
from ..channel import ASTFITSChannel
# get frame from header
fits_channel = ASTFITSChannel(header=fits_header)
# does this channel contain a frame set?
frame_set = fits_channel.frameSet
if frame_set is None:
raise ValueError("The provided FITS header does not describe a region (e.g. not an image, does not contain a WCS that AST can read).")
frame = frame_set.baseFrame
# support n-dimensional regions
# define needed parameters for region creation below
dimensions = fits_channel.dimensions
n_dim = len(dimensions)
cornerPoint = [0.5 for x in range(n_dim)]
cornerPoint2 = [dimensions[x] + 0.5 for x in range(n_dim)]
#cornerPoint=[0.5,0.5], # center of lower left pixel
#cornerPoint2=[dimensions[0]+0.5, dimensions[1]+0.5])
if n_dim > 2:
raise NotImplementedError("HDUs describing dimensions greater than 2 not currently supported.")
#if isinstance(frame, ASTFrame):
# self.frame = frame
#elif isinstance(frame, starlink.Ast.Frame):
# self.frame = ASTFrame(frame=frame)
#else:
# raise Exception("ASTBox: unexpected frame type specified ('{0}').".format(type(frame)))
#if all([cornerPoint,centerPoint]) or all([cornerPoint,cornerPoint2]) or dimensions is not None:
# if dimensions is not None:
# input_form = CORNER_CORNER
# p1 = [0.5,0.5] # use 0.5 to specify the center of each pixel
# p2 = [dimensions[0]+0.5, dimensions[1]+0.5]
# elif centerPoint is None:
# input_form = CORNER_CORNER
# p1 = [cornerPoint[0], cornerPoint[1]]
# p2 = [cornerPoint2[0], cornerPoint2[1]]
# dimensions = [math.fabs(cornerPoint[0] - cornerPoint2[0]),
# math.fabs(cornerPoint[1] - cornerPoint2[1])]
# else:
# input_form = CENTER_CORNER
# p1 = [centerPoint[0], centerPoint[1]]
# p2 = [cornerPoint[0], cornerPoint[1]]
# dimensions = [2.0 * math.fabs(centerPoint[0] - cornerPoint[0]),
# 2.0 * math.fabs(centerPoint[1] - cornerPoint[1])]
# input_form constants (define properly elsewhere?)
CENTER_CORNER = 0
CORNER_CORNER = 1
input_form = CORNER_CORNER
p1 = [cornerPoint[0], cornerPoint[1]]
p2 = [cornerPoint2[0], cornerPoint2[1]]
dimensions = [math.fabs(cornerPoint[0] - cornerPoint2[0]),
math.fabs(cornerPoint[1] - cornerPoint2[1])]
#dimensions = [dimensions[0], dimensions[1]]
#logger.debug("Setting dims: {0}".format(self.dimensions))
ast_object = Ast.Box( frame.astObject, input_form, p1, p2, unc=uncertainty )
# create the mapping from pixel to sky (or whatever is there) if available
mapping = frame_set.astObject.getmapping() # defaults are good
current_frame = frame_set.astObject.getframe(starlink.Ast.CURRENT)
# create a new region with new mapping
ast_object = ast_object.mapregion(mapping, current_frame)
if isinstance(ast_object, Ast.Box):
from .box import ASTBox # avoid circular imports
return ASTBox(ast_object=ast_object)
elif isinstance(ast_object, Ast.Circle):
from .circle import ASTCircle # avoid circular imports
return ASTCircle(ast_object=ast_object)
elif isinstance(ast_object, Ast.Polygon):
return ASTPolygon(ast_object=ast_object)
else:
raise Exception(f"Unexpected region type encountered: {type(ast_object)}.")
@property
def points(self) -> np.ndarray:
'''
The array of points that define the region. The interpretation of the points is dependent on the type of shape in question.
Box: returns two points; the center and a box corner.
Circle: returns two points; the center and a point on the circumference.
CmpRegion: no points returned; to get points that define a compound region, call this method on each of the component regions via the method "decompose".
Ellipse: three points: 1) center, 2) end of one axis, 3) end of the other axis
Interval: two points: 1) lower bounds position, 2) upper bounds position (reversed when interval is an excluded interval)
NullRegion: no points
PointList: positions that the list was constructed with
Polygon: vertex positions used to construct the polygon
Prism: no points (see CmpRegion)
NOTE: points returned reflect the current coordinate system and may be different from the initial construction
:returns: NumPy array of coordinate points in degrees, shape (ncoord,2), e.g. [[ra1,dec1], [ra2, dec2], ..., [ra_n, dec_n]]
'''
# getregionpoints returns data as [[x1, x2, ..., xn], [y1, y2, ..., yn]]
# transpose the points before returning
# also, normalize points to expected bounds
region_points = self.astObject.getregionpoints()
if self.isNegated:
# reverse order to reflect the definition of the polygon
region_points = np.fliplr(region_points)
if self.frame().isSkyFrame:
#return np.rad2deg(self.astObject.norm(self.astObject.getregionpoints())).T
return np.rad2deg(self.astObject.norm(region_points)).T
else:
#return self.astObject.getregionpoints().T
return region_points.T
@property
def uncertainty(self):
'''
Uncertainties associated with the boundary of the Box.
The uncertainty in any point on the boundary of the Box is found by
shifting the supplied "uncertainty" Region so that it is centered at
the boundary point being considered. The area covered by the shifted
uncertainty Region then represents the uncertainty in the boundary
position. The uncertainty is assumed to be the same for all points.
'''
return self._uncertainty
@uncertainty.setter
def uncertainty(self, unc):
raise Exception("Setting the uncertainty currently doesn't do anything.")
self._uncertainty = unc
@property
def isAdaptive(self):
'''
Boolean attribute that indicates whether the area adapt to changes in the coordinate system.
'''
return self.astObject.get("Adaptive") == "1"
@isAdaptive.setter
def isAdaptive(self, newValue:bool):
if newValue in [True, 1, "1"]:
self.astObject.set("Adaptive=1")
elif newValue in [False, 0, "0"]:
self.astObject.set("Adaptive=0")
else:
raise Exception("ASTRegion.adaptive property must be one of [True, False, 1, 0].")
@property
def isNegated(self):
''' Boolean attribute that indicates whether the original region has been negated. '''
return self.astObject.get("Negated") == "1"
@isNegated.setter
def isNegated(self, newValue:bool):
if newValue in [True, 1, "1"]:
self.astObject.set("Negated=1")
elif newValue in [False, 0, "0"]:
self.astObject.set("Negated=0")
else:
raise Exception("ASTRegion.isNegated property must be one of [True, False, 1, 0].")
def negate(self):
''' Negate the region, i.e. points inside the region will be outside, and vice versa. '''
self.astObject.negate()
@property
def isClosed(self) -> bool:
'''
Boolean attribute that indicates whether the boundary be considered to be inside the region.
'''
return self.astObject.get("Closed") == "1"
@isClosed.setter
def isClosed(self, newValue:bool):
if newValue in [True, 1, "1"]:
self.astObject.set("Closed=1")
elif newValue in [False, 0, "0"]:
self.astObject.set("Closed=0")
else:
raise Exception("ASTRegion.isClosed property must be one of [True, False, 1, 0].")
@property
def isBounded(self) -> bool:
''' Boolean attribute that indicates whether the region is bounded. '''
return self.astObject.get("Bounded") == "1"
@isBounded.setter
def isBounded(self, newValue:bool):
if newValue in [True, 1, "1"]:
self.astObject.set("Bounded=1")
elif newValue in [False, 0, "0"]:
self.astObject.set("Bounded=0")
else:
raise Exception("ASTRegion.isBounded property must be one of [True, False, 1, 0].")
def frame(self) -> ASTFrame:
'''
Returns a copy of the frame encapsulated by this region.
Note that the frame is not directly accessible; both this method and the underlying ``starlink-pyast`` function returns a copy.
'''
# this is not a property since a new object is being returned.
ast_frame = self.astObject.getregionframe() # "A pointer to a deep copy of the Frame represented by the Region."
return ASTFrame.frameFromAstObject(ast_frame)
def frameSet(self) -> ASTFrameSet:
'''
Returns a copy of the frameset encapsulated by this region.
From AST docs:
::
The base Frame is the Frame in which the box was originally
defined, and the current Frame is the Frame into which the
Region is currently mapped (i.e. it will be the same as the
Frame returned by astGetRegionFrame).
'''
raise NotImplementedError("getregionframeset() has not yet been exposed to the Python interface.")
return ASTFrameSet(ast_object=self.astObject.getregionframeset())
@property
def meshSize(self) -> int:
''' Number of points used to create a mesh covering the region. '''
#return int(self.astObject.get("MeshSize"))
return int(self.astObject.MeshSize)
@meshSize.setter
def meshSize(self, newValue:int):
if isinstance(newValue, int):
if newValue < 5:
newValue = 5
self.astObject.set("MeshSize={0}".format(newValue))
else:
raise Exception("ASTRegion.meshSize: an integer value of at least 5 is required.")
@property
def fillFactor(self):
''' <Fraction of the Region which is of interest> '''
# TODO: properly document, see p. 812 of documentation
return self.astObject.get("FillFactor")
@fillFactor.setter
def fillFactor(self, newValue):
raise Exception("TODO: document and implement")
@property
def bounds(self) -> Tuple:
'''
Returns lower and upper coordinate points that bound this region.
'''
lower_bounds, upper_bounds = self.astObject.getregionbounds()
# lower_bounds and upper_bounds are n-dimensional arrays
# e.g. for a 2D image,
# [-10,5], [10,20] <- (ra, dec) or pixel bounds
if self.frame().isSkyFrame:
lower_bounds = np.rad2deg(self.astObject.norm(lower_bounds))
upper_bounds = np.rad2deg(self.astObject.norm(upper_bounds))
return (lower_bounds, upper_bounds)
def boundingBox(self) -> ASTBox:
'''
Returns an ASTBox region that bounds this region where the box sides align with RA, dec.
'''
from cornish import ASTBox # import here to avoid circular import
return ASTBox.fromCorners(frame=self.frame(), corners=self.bounds)
#raise NotImplementedError()
# use the "bounds" method above to create a bounding box
def boundingCircle(self) -> ASTCircle:
'''
Returns the smallest circle (:py:class:`ASTCircle`) that bounds this region.
It is up to the caller to know that this is a 2D region (only minimal checks are made).
:raises cornish.exc.NotA2DRegion: raised when attempting to get a bounding circle for a region that is not 2D
'''
if self.naxes != 2:
raise NotA2DRegion(f"A bounding circle can only be computed on a 2D region; this region has {self.naxes} axes.")
from .circle import ASTCircle
centre, radius = self.astObject.getregiondisc() # returns radians
return ASTCircle(frame=self, center=np.rad2deg(centre), radius=rad2deg(radius))
def overlaps(self, region) -> bool:
'''
Return ``True`` if this region overlaps with the provided one.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
elif return_value == 1:
return False # no overlap
elif return_value == 2:
return True # this region is completely inside the provded region
elif return_value == 3:
return True # the provded region is completely inside the first region
elif return_value == 4:
return True # there is partial overlap
elif return_value == 5:
return True # the resions are identical to within their uncertainties
elif return_value == 6:
return False # the second region is the exact negation of this region to within their uncertainties
def isIdenticalTo(self, region:ASTRegion) -> bool:
'''
Returns 'True' if this region is identical (to within their uncertainties) to the provided region, 'False' otherwise.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
else:
return return_value == 5
def isFullyWithin(self, region:ASTRegion) -> bool:
'''
Returns 'True' if this region is fully within the provided region.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
else:
return return_value == 2
def fullyEncloses(self, region:ASTRegion) -> bool:
'''
Returns 'True' if this region fully encloses the provided region.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
else:
return return_value == 3
def isNegationOf(self, region):
'''
Returns 'True' if this region is the exact negation of the provided region.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
else:
return return_value == 6
def maskOnto(self, image=None, mapping=None, fits_coordinates:bool=True, lower_bounds=None, mask_inside=True, mask_value=float("NaN")):
'''
Apply this region as a mask on top of the provided image; note: the image values are overwritten!
:param image: numpy.ndarray of pixel values (or other array of values)
:param mapping: mapping from this region to the pixel coordinates of the provided image
:param fits_coordinates: use the pixel coordinates of a FITS file (i.e. origin = [0.5, 0.5] for 2D)
:param lower_bounds: lower bounds of provided image, only specify if not using FITS coordinates
:param mask_inside: True: mask the inside of this region; False: mask outside of this region
:param mask_value: the value to set the masked image pixels to
:returns: number of pixels in image masked
'''
# coded now for numpy arrays, but set ndim,shape for anything else
ndim = len(image.shape)
shape = image.shape # <-- unused variable!
# assert number of axes in image == # of outputs in the mapping
if ndim != mapping.number_of_output_coordinates:
raise Exception(f"The number of dimensions in the provided image ({ndim}) does not match the number of output dimensions of the provided mapping ({mapping.number_of_output_coordinates}).")
if fits_coordinates:
# use the pixel coordinates for FITS files -> origin at [0.5, 0.5]
lower_bounds = [0.5 for x in range(ndim)]
else:
# must use provided lower bounds
if lower_bounds is None:
raise ValueError("'lower_bounds' must be provided (or specify 'fits_coordinates=True' to use FITS coordinates.")
# upper_bounds = list()
# for idx, n in enumerate(shape):
# upper_bounds.append(lower_bounds[idx] + n)
npix_masked = self.astObject.mask(mapping.astObject, mask_inside, lower_bounds, image, mask_value)
return npix_masked
def regionWithMapping(self, map=None, frame=None) -> ASTRegion:
'''
Returns a new ASTRegion with the coordinate system from the supplied frame.
Corresponds to the ``astMapRegion`` C function (``starlink.Ast.mapregion``).
:param map: A mapping that can convert coordinates from the system of the current region to that of the supplied frame.
:param frame: A frame containing the coordinate system for the new region.
:returns: new ASTRegion with a new coordinate system
'''
if frame is None:
raise Exception("A frame must be specified.")
if map is None:
map = frame # attempt to use the frame as a mapper (an ASTFrame is a subclass of ASTMapper)
# Would be nice to be able to create an instance of the same subclass of ASTRegion
# - how to inspect the object for this information?
if isinstance(map, starlink.Ast.Mapping):
ast_map = map
elif isinstance(map, (ASTMapping, ASTFrameSet)): # frame sets contain mappings
ast_map = map.astObject
else:
raise Exception("Expected 'map' to be one of these two types: starlink.Ast.Mapping, ASTMap.")
if isinstance(frame, starlink.Ast.Frame):
ast_frame = frame
elif isinstance(map, (ASTFrame, ASTFrameSet)):
ast_frame = frame.astObject
else:
raise Exception("Expected 'frame' to be one of these two types: starlink.Ast.Frame, ASTFrame.")
new_ast_region = self.astObject.mapregion(ast_map, ast_frame)
# This is temporary and probably fragile. Find a replacement for this ASAP.
# get the returned region type to create the correct wrapper
#
# -> make a deep copy, replace obj.astObject with new one (check any properties)
#
if new_ast_region.Class == 'Polygon' or isinstance(new_ast_region, starlink.Ast.Polygon):
return cornish.region.ASTPolygon(ast_object=new_ast_region)
elif new_ast_region.Class == 'Box' or isinstance(new_ast_region, starlink.Ast.Box):
return cornish.region.ASTBox(ast_object=new_ast_region)
else:
raise Exception("ASTRegion.regionWithMapping: unhandled region type (easy fix).")
def mapRegionMesh(self, mapping=None, frame=None):
'''
Returns a new ASTRegion that is the same as this one but with the specified coordinate system.
Parameters
----------
mapping : `~cornish.mapping.ASTMapping` class
The mapping to transform positions from the current ASTRegion to those specified by the given frame.
frame : `~cornish.frame.ASTFrame` class
Coordinate system frame to convert the current ASTRegion to.
Returns
-------
region : `ASTRegion`
A new region object covering the same area but in the frame specified in `frame`.
Raises
------
Exception
An exception is raised for missing parameters.
'''
if mapping is None or frame is None:
raise Exception("A mapping and frame is required to be passed to 'mapRegion'.")
# check it's the correct type
if not isinstance(mapping, ASTMapping):
raise Exception("The object passed to 'mapping' needs to be an ASTMapping object.")
if not isinstance(frame, ASTFrame):
raise Exception("The object passed to 'frame' needs to be an ASTFrame object.")
self.astObject.mapregionmesh( mapping, frame )
def boundaryPointMesh(self, npoints:int=None) -> np.ndarray:
'''
Returns an array of evenly distributed points that cover the boundary of the region.
For example, if the region is a box, it will generate a list of points that trace the edges of the box.
The default value of 'npoints' is 200 for 2D regions and 2000 for three or more dimensions.
:param npoints: the approximate number of points to generate in the mesh
:returns: list of points in degrees
'''
# The starlink.AST object uses the attribute "MeshSize" to determine the number of points to
# use. This should be specified when building the mesh - the attribute doesn't seem to be used
# anywhere else. This method shouldn't change the value in case that's not true, but we'll make
# this one step here.
#if npoints is not None and not isinstance(npoints, int):
# raise Exception("The parameter 'npoints' must be an integer ('{1}' provided).".format(npoints))
if npoints is None:
pass # use default meshSize value
else:
# use provided value
#old_mesh_size = self.astObject.get("MeshSize")
#self.astObject.set("MeshSize={0}".format(npoints))
old_mesh_size = self.meshSize
self.meshSize = npoints
try:
mesh = self.astObject.getregionmesh(AST_BOUNDARY_MESH) # here "surface" means the boundary
# if a basic frame is used instead of a sky frame, the points need to be normalized on [0,360)
mesh = self.astObject.norm(mesh)
except Ast.MBBNF as e:
print(f"AST error: Mapping bounding box not found. ({e})")
raise e
if npoints is not None:
# restore original value
self.meshSize = old_mesh_size #self.astObject.set("MeshSize={0}".format(old_mesh_size))
if self.frame().isSkyFrame:
return np.rad2deg(mesh).T # returns as a list of pairs of points, not two parallel arrays
else:
return mesh.T
def interiorPointMesh(self, npoints:int=None):
'''
Returns an array of evenly distributed points that cover the surface of the region.
For example, if the region is a box, it will generate a list of points that fill the interior area of the box.
The default value of 'npoints' is 200 for 2D regions and 2000 for three or more dimensions.
:param npoints: the approximate number of points to generate in the mesh
:returns: array of points in degrees
'''
# See discussion of "MeshSize" in method "boundaryPointMesh".
if npoints is not None and not isinstance(npoints, int):
raise Exception(f"The parameter 'npoints' must be an integer ('{type(npoints)}' provided).")
if npoints is None:
pass # use default value
else:
# use provided value
old_mesh_size = self.astObject.get("MeshSize")
self.astObject.set("MeshSize={0}".format(npoints))
# The returned "points" array from getregionmesh() will be a 2-dimensional array with shape (ncoord,npoint),
# where "ncoord" is the number of axes within the Frame represented by the Region,
# and "npoint" is the number of points in the mesh (see attribute "MeshSize").
mesh = self.astObject.getregionmesh(AST_SURFACE_MESH) # here "surface" means the interior
mesh = self.astObject.norm(mesh)
if npoints is not None:
# restore original value
self.astObject.set("MeshSize={0}".format(old_mesh_size))
if self.frame().isSkyFrame:
mesh = np.rad2deg(mesh)
return mesh.T
def containsPoint(self, point:Union[Iterable, astropy.coordinates.SkyCoord]=None) -> bool:
'''
Returns ``True`` if the provided point lies inside this region, ``False`` otherwise.
This method is a direct synonym for :meth:`pointInRegion`.
The name "containsPoint" is more appropriate from an object perspective,
but the ``pointInRegion`` method is present for consistency with the AST library.
:param point: a coordinate point in the same frame as this region
'''
return self.pointInRegion(point=point)
def pointInRegion(self, point:Union[Iterable, astropy.coordinates.SkyCoord,np.ndarray]=None) -> bool:
'''
Returns ``True`` if the provided point lies inside this region, ``False`` otherwise.
If no units are specified degrees are assumed.
:param point: a coordinate point in the same frame as this region
'''
if point is None:
raise ValueError("A test point must be specified.")
if isinstance(point, astropy.coordinates.SkyCoord):
point = [point.ra.to(u.rad).value, point.dec.to(u.rad).value]
else:
point = np.deg2rad(point)
return self.astObject.pointinregion(point)
@abstractproperty
def area(self) -> astropy.units.quantity.Quantity:
# subclasses must implement
raise NotImplementedError()
@abstractmethod
def toPolygon(self, npoints=200, maxerr:astropy.units.Quantity=1.0*u.arcsec) -> ASTPolygon:
'''
Method that guarantees returning a polygon that describes or approximates this region.
This method provides a common interface to create polygons from different region types.
Calling this on an ASTPolygon will return itself; calling it on an ASTCircle
will return a polygon that approximates the circle. The parameters 'npoints' and
'maxerr' will be used only when appropriate.
:param npoints: number of points to sample from the region's boundary for the resulting polygon
:param maxerr:
'''
pass
| mit | -7,151,365,176,470,967,000 | 37.906883 | 191 | 0.728755 | false | 3.47475 | false | false | false |
Coderhypo/UIAMS | manage.py | 1 | 1085 | #-*- coding: UTF-8 -*-
import os
from app import app, db
from app.models import User, Role
from flask.ext.script import Manager, Server, Shell
from flask.ext.migrate import Migrate, MigrateCommand
manager = Manager(app)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("runserver", Server(host="0.0.0.0", port=2000))
manager.add_command("shell", Shell(make_context=make_shell_context))
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.command
def deploy():
from flask.ext.migrate import upgrade
from app.models import Role, User
# migrate database to latest revision
# upgrade()
db.drop_all()
db.create_all()
try:
Role.insert_roles()
r = Role.query.filter_by(role_name = u'管理员').first()
u = User('admin', 'admin')
u.role = r
u.password = '123'
db.session.add(u)
db.session.commit()
except Exception, e:
print e
db.session.rollback()
if __name__ == '__main__':
manager.run()
| lgpl-3.0 | -1,176,110,634,197,252,400 | 24.690476 | 68 | 0.639481 | false | 3.340557 | false | false | false |
maestromusic/maestro | maestro/core/domains.py | 1 | 5562 | # -*- coding: utf-8 -*-
# Maestro Music Manager - https://github.com/maestromusic/maestro
# Copyright (C) 2014-2015 Martin Altmayer, Michael Helmling
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os.path
from PyQt5 import QtCore, QtGui, QtWidgets
translate = QtCore.QCoreApplication.translate
from .. import application, database as db, logging, stack
from ..application import ChangeEvent, ChangeType
domains = []
# Maximum length of encoded domain names.
MAX_NAME_LENGTH = 63
def default():
return domains[0]
def isValidName(name):
return name == name.strip() and 0 < len(name.encode()) <= MAX_NAME_LENGTH
def exists(name):
return any(domain.name == name for domain in domains)
def domainById(id: int):
for domain in domains:
if domain.id == id:
return domain
else: return None
def domainByName(name):
for domain in domains:
if domain.name == name:
return domain
else: return None
def init():
if db.prefix+'domains' not in db.listTables():
logging.error(__name__, "domains-table is missing")
raise RuntimeError()
result = db.query("SELECT id, name FROM {p}domains ORDER BY name")
for row in result:
domains.append(Domain(*row))
class Domain:
def __init__(self, id, name):
self.id = id
self.name = name
def __repr__(self):
return "<Domain {}>".format(self.name)
def addDomain(name):
"""Add a new domain with the given name to the database. Return the new domain."""
if exists(name):
raise ValueError("There is already a domain with name '{}'.".format(name))
if not isValidName(name):
raise ValueError("'{}' is not a valid domain name.".format(name))
domain = Domain(None, name)
stack.push(translate("Domains", "Add domain"),
stack.Call(_addDomain, domain),
stack.Call(_deleteDomain, domain))
return domain
def _addDomain(domain):
"""Add a domain to database and some internal lists and emit a DomainChanged-event. If *domain*
doesn't have an id, choose an unused one.
"""
if domain.id is None:
domain.id = db.query("INSERT INTO {p}domains (name) VALUES (?)", domain.name).insertId()
else: db.query("INSERT INTO {p}domains (id, name) VALUES (?,?)", domain.id, domain.name)
logging.info(__name__, "Added new domain '{}'".format(domain.name))
domains.append(domain)
application.dispatcher.emit(DomainChangeEvent(ChangeType.added, domain))
def deleteDomain(domain):
"""Delete a domain from all elements and the database."""
stack.push(translate("Domains", "Delete domain"),
stack.Call(_deleteDomain, domain),
stack.Call(_addDomain, domain))
def _deleteDomain(domain):
"""Like deleteDomain but not undoable."""
assert db.query("SELECT COUNT(*) FROM {p}elements WHERE domain=?", domain.id).getSingle() == 0
if domains == [domain]:
raise RuntimeError("Cannot delete last domain.")
logging.info(__name__, "Deleting domain '{}'.".format(domain))
db.query("DELETE FROM {p}domains WHERE id = ?", domain.id)
domains.remove(domain)
application.dispatcher.emit(DomainChangeEvent(ChangeType.deleted, domain))
def changeDomain(domain, **data):
"""Change a domain. The attributes that should be changed must be specified by keyword arguments.
Currently only 'name' is supported.
"""
oldData = {'name': domain.name}
stack.push(translate("Domains ", "Change domain"),
stack.Call(_changeDomain, domain, **data),
stack.Call(_changeDomain, domain, **oldData))
def _changeDomain(domain, **data):
"""Like changeDomain but not undoable."""
# Below we will build a query like UPDATE domains SET ... using the list of assignments (e.g. (name=?).
# The parameters will be sent with the query to replace the questionmarks.
assignments = []
params = []
if 'name' in data:
name = data['name']
if name != domain.name:
if exists(name):
raise ValueError("There is already a domain named '{}'.".format(name))
logging.info(__name__, "Changing domain name '{}' to '{}'.".format(domain.name, name))
assignments.append('name = ?')
params.append(name)
domain.name = name
if len(assignments) > 0:
params.append(domain.id) # for the where clause
db.query("UPDATE {p}domains SET "+','.join(assignments)+" WHERE id = ?", *params)
application.dispatcher.emit(DomainChangeEvent(ChangeType.changed, domain))
class DomainChangeEvent(ChangeEvent):
"""DomainChangeEvents are used when a domain is added, changed or deleted."""
def __init__(self, action, domain):
assert isinstance(action, ChangeType)
self.action = action
self.domain = domain
| gpl-3.0 | -4,220,954,320,742,945,000 | 33.546584 | 107 | 0.647609 | false | 4.050983 | false | false | false |
PaloAltoNetworks/minemeld-core | minemeld/flask/config.py | 1 | 5920 | # Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import gevent
import yaml
import filelock
import passlib.apache
from . import utils
from .logger import LOG
CONFIG = {}
API_CONFIG_PATH = None
API_CONFIG_LOCK = None
CONFIG_FILES_RE = '^(?:(?:[0-9]+.*\.yml)|(?:.*\.htpasswd))$'
# if you change things here change also backup/import API
_AUTH_DBS = {
'USERS_DB': 'wsgi.htpasswd',
'FEEDS_USERS_DB': 'feeds.htpasswd'
}
def get(key, default=None):
try:
result = CONFIG[key]
except KeyError:
pass
else:
return result
try:
result = os.environ[key]
except KeyError:
pass
else:
if result == 'False':
result = False
if result == 'True':
result = True
return result
return default
def store(file, value):
with API_CONFIG_LOCK.acquire():
with open(os.path.join(API_CONFIG_PATH, file), 'w+') as f:
yaml.safe_dump(value, stream=f)
def lock():
return API_CONFIG_LOCK.acquire()
class APIConfigDict(object):
def __init__(self, attribute, level=50):
self.attribute = attribute
self.filename = '%d-%s.yml' % (level, attribute.lower().replace('_', '-'))
def set(self, key, value):
curvalues = get(self.attribute, {})
curvalues[key] = value
store(self.filename, {self.attribute: curvalues})
def delete(self, key):
curvalues = get(self.attribute, {})
curvalues.pop(key, None)
store(self.filename, {self.attribute: curvalues})
def value(self):
return get(self.attribute, {})
def _load_config(config_path):
global CONFIG
new_config = {}
# comptaibilty early releases where all the config
# was store in a single file
old_config_file = os.path.join(config_path, 'wsgi.yml')
if os.path.exists(old_config_file):
try:
with open(old_config_file, 'r') as f:
add_config = yaml.safe_load(f)
if add_config is not None:
new_config.update(add_config)
except OSError:
pass
with API_CONFIG_LOCK.acquire():
api_config_path = os.path.join(config_path, 'api')
if os.path.exists(api_config_path):
config_files = sorted(os.listdir(api_config_path))
for cf in config_files:
if not cf.endswith('.yml'):
continue
try:
with open(os.path.join(api_config_path, cf), 'r') as f:
add_config = yaml.safe_load(f)
if add_config is not None:
new_config.update(add_config)
except (OSError, IOError, ValueError):
LOG.exception('Error loading config file %s' % cf)
CONFIG = new_config
LOG.info('Config loaded: %r', new_config)
def _load_auth_dbs(config_path):
with API_CONFIG_LOCK.acquire():
api_config_path = os.path.join(config_path, 'api')
for env, default in _AUTH_DBS.iteritems():
dbname = get(env, default)
new_db = False
dbpath = os.path.join(
api_config_path,
dbname
)
# for compatibility with old releases
if not os.path.exists(dbpath):
old_dbpath = os.path.join(
config_path,
dbname
)
if os.path.exists(old_dbpath):
dbpath = old_dbpath
else:
new_db = True
CONFIG[env] = passlib.apache.HtpasswdFile(
path=dbpath,
new=new_db
)
LOG.info('%s loaded from %s', env, dbpath)
def _config_monitor(config_path):
api_config_path = os.path.join(config_path, 'api')
dirsnapshot = utils.DirSnapshot(api_config_path, CONFIG_FILES_RE)
while True:
try:
with API_CONFIG_LOCK.acquire(timeout=600):
new_snapshot = utils.DirSnapshot(api_config_path, CONFIG_FILES_RE)
if new_snapshot != dirsnapshot:
try:
_load_config(config_path)
_load_auth_dbs(config_path)
except gevent.GreenletExit:
break
except:
LOG.exception('Error loading config')
dirsnapshot = new_snapshot
except filelock.Timeout:
LOG.error('Timeout locking config in config monitor')
gevent.sleep(1)
# initialization
def init():
global API_CONFIG_PATH
global API_CONFIG_LOCK
config_path = os.environ.get('MM_CONFIG', None)
if config_path is None:
LOG.critical('MM_CONFIG environment variable not set')
raise RuntimeError('MM_CONFIG environment variable not set')
if not os.path.isdir(config_path):
config_path = os.path.dirname(config_path)
# init global vars
API_CONFIG_PATH = os.path.join(config_path, 'api')
API_CONFIG_LOCK = filelock.FileLock(
os.environ.get('API_CONFIG_LOCK', '/var/run/minemeld/api-config.lock')
)
_load_config(config_path)
_load_auth_dbs(config_path)
if config_path is not None:
gevent.spawn(_config_monitor, config_path)
| apache-2.0 | 661,309,615,503,957,100 | 26.793427 | 82 | 0.572635 | false | 3.897301 | true | false | false |
jopohl/urh | src/urh/dev/native/SoundCard.py | 1 | 4702 | from collections import OrderedDict
from multiprocessing import Array
from multiprocessing.connection import Connection
import numpy as np
import pyaudio
from urh.dev.native.Device import Device
from urh.util.Logger import logger
class SoundCard(Device):
DEVICE_LIB = pyaudio
ASYNCHRONOUS = False
DEVICE_METHODS = dict()
CHUNK_SIZE = 1024
SYNC_TX_CHUNK_SIZE = 2 * CHUNK_SIZE
CONTINUOUS_TX_CHUNK_SIZE = SYNC_TX_CHUNK_SIZE
SAMPLE_RATE = 48000
pyaudio_handle = None
pyaudio_stream = None
DATA_TYPE = np.float32
@classmethod
def init_device(cls, ctrl_connection: Connection, is_tx: bool, parameters: OrderedDict) -> bool:
try:
cls.SAMPLE_RATE = int(parameters[cls.Command.SET_SAMPLE_RATE.name])
except (KeyError, ValueError):
pass
return super().init_device(ctrl_connection, is_tx, parameters)
@classmethod
def setup_device(cls, ctrl_connection: Connection, device_identifier):
ctrl_connection.send("Initializing pyaudio...")
try:
cls.pyaudio_handle = pyaudio.PyAudio()
ctrl_connection.send("Initialized pyaudio")
return True
except Exception as e:
logger.exception(e)
ctrl_connection.send("Failed to initialize pyaudio")
@classmethod
def prepare_sync_receive(cls, ctrl_connection: Connection):
try:
cls.pyaudio_stream = cls.pyaudio_handle.open(format=pyaudio.paFloat32,
channels=2,
rate=cls.SAMPLE_RATE,
input=True,
frames_per_buffer=cls.CHUNK_SIZE)
ctrl_connection.send("Successfully started pyaudio stream")
return 0
except Exception as e:
logger.exception(e)
ctrl_connection.send("Failed to start pyaudio stream")
@classmethod
def prepare_sync_send(cls, ctrl_connection: Connection):
try:
cls.pyaudio_stream = cls.pyaudio_handle.open(format=pyaudio.paFloat32,
channels=2,
rate=cls.SAMPLE_RATE,
frames_per_buffer=cls.CHUNK_SIZE,
output=True)
ctrl_connection.send("Successfully started pyaudio stream")
return 0
except Exception as e:
logger.exception(e)
ctrl_connection.send("Failed to start pyaudio stream")
@classmethod
def receive_sync(cls, data_conn: Connection):
if cls.pyaudio_stream:
data_conn.send_bytes(cls.pyaudio_stream.read(cls.CHUNK_SIZE, exception_on_overflow=False))
@classmethod
def send_sync(cls, data):
if cls.pyaudio_stream:
data_bytes = data.tostring() if isinstance(data, np.ndarray) else bytes(data)
# pad with zeros if smaller than chunk size
cls.pyaudio_stream.write(data_bytes.ljust(cls.CHUNK_SIZE*8, b'\0'))
@classmethod
def shutdown_device(cls, ctrl_connection, is_tx: bool):
logger.debug("shutting down pyaudio...")
try:
if cls.pyaudio_stream:
cls.pyaudio_stream.stop_stream()
cls.pyaudio_stream.close()
if cls.pyaudio_handle:
cls.pyaudio_handle.terminate()
ctrl_connection.send("CLOSE:0")
except Exception as e:
logger.exception(e)
ctrl_connection.send("Failed to shut down pyaudio")
def __init__(self, sample_rate, resume_on_full_receive_buffer=False):
super().__init__(center_freq=0, sample_rate=sample_rate, bandwidth=0,
gain=1, if_gain=1, baseband_gain=1,
resume_on_full_receive_buffer=resume_on_full_receive_buffer)
self.success = 0
self.bandwidth_is_adjustable = False
@property
def device_parameters(self) -> OrderedDict:
return OrderedDict([(self.Command.SET_SAMPLE_RATE.name, self.sample_rate),
("identifier", None)])
@staticmethod
def bytes_to_iq(buffer):
return np.frombuffer(buffer, dtype=np.float32).reshape((-1, 2), order="C")
@staticmethod
def iq_to_bytes(samples: np.ndarray):
arr = Array("f", 2 * len(samples), lock=False)
numpy_view = np.frombuffer(arr, dtype=np.float32)
numpy_view[:] = samples.flatten(order="C")
return arr
| gpl-3.0 | -7,528,737,987,523,719,000 | 37.227642 | 102 | 0.574011 | false | 4.313761 | false | false | false |
mario23285/ProyectoElectrico | src/Leg.py | 1 | 6591 | """
UNIVERSIDAD DE COSTA RICA Escuela de Ingeniería Eléctrica
IE0499 | Proyecto Eléctrico
Mario Alberto Castresana Avendaño
A41267
Programa: BVH_TuneUp
-------------------------------------------------------------------------------
archivo: Leg.py
descripción:
Este archivo contiene la clase Leg, la cual se utiliza para implementar la
rodilla izquierda y la derecha. Los estudios de goniometría para este hueso
se basan en los siguientes límites de los ángulos de Euler:
Z torsión no válida
X Flexión + y extensión -
Y rotación no válida
"""
from Bone import Bone
class Leg(Bone):
"""
Esta subclase implementa el estudio de goniometría para las rodillas en
el esqueleto del BVH. La jerarquía los llama "Leg".
"""
def __init__(self, ID=' ', Zp=0, Xp=0, Yp=0):
"""
Se inicializa este hueso con los siguientes parámetros
ID: identificador del bone. Ej: izquierdo/derecho
Cada posición del hueso se define con un vector de ángulos de Euler
(Z, X, Y) los cuales tienen una posición específica dentro del array
de la sección MOTION del BVH
Zp: índice del array MOTION que contiene el angulo de euler Z para ese hueso
Xp: índice del array MOTION que contiene el angulo de euler X para ese hueso
Yp: índice del array MOTION que contiene el angulo de euler Y para ese hueso
"""
self.ID = ID
self.Zp = Zp
self.Xp = Xp
self.Yp = Yp
#se llama al constructor de la super clase para acceder a todos los atributos
#de goniometría
Bone.__init__(self,
Name='Rodilla',
Zmin=-0.200000,
Zmax=0.200000,
Xmin=0.000000,
Xmax=150.000000,
Ymin=-1.000000,
Ymax=1.000000)
def Goniometry_check(self, MOTION, frame):
"""
Descripción:
Esta función se encarga de comparar el valor de los ángulos de Euler que
un hueso posee en un frame determinado, con el valor de los límites
goniométricos de ese hueso en particular. Si algún ángulo de Euler excede
los límites del movimiento humano, se reportará un glitch en ese frame
y se procederá a corregirlo en el arreglo MOTION.
argumentos:
MOTION: arreglo de 156 posiciones que contiene todos los ángulos de Euler
para cada hueso en un frame dado. El orden de cada hueso viene dado por
la sección HIERARCHY del BVH.
frame: cuadro del video de MoCap que se está analizando
"""
#Primero, definimos los valores de cada ángulo de Euler
Zeuler = MOTION[self.Zp]
Xeluer = MOTION[self.Xp]
Yeuler = MOTION[self.Yp]
glitch = False
#Exempt es una variable que se activa cuando detecta problemas de rotacion
#de ejes Z y Y en las rodillas
Exempt = False
ErrorMsg = ' existen glitches de '
#Variables para probar si hubo rotación de ejes y el esqueleto está agachado
rodilla_flex = Xeluer > 13.0 or Xeluer < -15.0
y_rot = Yeuler > 20.0 or Yeuler < -20.0
z_rot = Zeuler > 40.0 or Zeuler < -40.0
Rotacion_ejes = y_rot or z_rot
if rodilla_flex and Rotacion_ejes:
Exempt = True
if Exempt:
#Existen dos pruebas goniométricas distintas de acuerdo al nivel de flexión de las
#rodillas. En el caso de que las rodillas tengan un ángulo de flexión mayor a 45º o
#exista una rotacion de los eje Z y Y, debemos incrementar los límites de movilidad.
#en Z y Y. Esto debido al comportamiento de los huesos en el BVH, los cuales rotan
#los ejes Y y Z para representar movimientos de un esqueleto agachado.
#Esto ocurre debido a la pérdida de orientación del hueso,por parte de las cámaras
#en los ejes Z y Y.
#probamos límites nuevos en Z
if Zeuler < -160.000000:
#MOTION[self.Zp] no se le aplica restricción en Z
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Z- | '
if Zeuler > 160.000000:
#MOTION[self.Zp] no se le aplica restricción en Z
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Z+ | '
#aquí probamos nuevos límites en X
if Xeluer < -150.000000:
#MOTION[self.Xp] no se le aplica restricción en X
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en X- | '
if Xeluer > 150.000000:
#MOTION[self.Xp] no se le aplica restricción en X
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en X+ | '
#aquí probamos nuevos límites en Y
if Yeuler < -105.000000:
#MOTION[self.Yp] no se le aplica restricción en Y
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Y- | '
if Yeuler > 105.000000:
#MOTION[self.Yp] no se le aplica restricción en Y
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Y+ | '
else:
#probamos límites en Z
if Zeuler < self.Zmin:
MOTION[self.Zp] = self.Zmin
glitch = True
ErrorMsg += 'torsión | '
if Zeuler > self.Zmax:
MOTION[self.Zp] = self.Zmax
glitch = True
ErrorMsg += 'torsión | '
#aquí probamos límites en X
if Xeluer < self.Xmin:
MOTION[self.Xp] = self.Xmin
glitch = True
ErrorMsg += 'extension | '
if Xeluer > self.Xmax:
MOTION[self.Xp] = self.Xmax
glitch = True
ErrorMsg += 'flexion | '
#aquí probamos límites en Y
if Yeuler < self.Ymin:
MOTION[self.Yp] = self.Ymin
glitch = True
ErrorMsg += 'rotacion interna | '
if Yeuler > self.Ymax:
MOTION[self.Yp] = self.Ymax
glitch = True
ErrorMsg += 'rotacion externa | '
if glitch:
self.Report_glitch(ErrorMsg, frame)
| gpl-2.0 | -7,727,383,819,763,163,000 | 38.93865 | 96 | 0.567896 | false | 3.126801 | false | false | false |
cryptapus/electrum-uno | gui/qt/__init__.py | 1 | 7412 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import signal
try:
import PyQt4
except Exception:
sys.exit("Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'")
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum.i18n import _, set_language
from electrum.plugins import run_hook
from electrum import SimpleConfig, Wallet, WalletStorage
from electrum.paymentrequest import InvoiceStore
from electrum.contacts import Contacts
from electrum.synchronizer import Synchronizer
from electrum.verifier import SPV
from electrum.util import DebugMem
from electrum.wallet import Abstract_Wallet
from installwizard import InstallWizard
try:
import icons_rc
except Exception:
sys.exit("Error: Could not import icons_rc.py, please generate it with: 'pyrcc4 icons.qrc -o gui/qt/icons_rc.py'")
from util import * # * needed for plugins
from main_window import ElectrumWindow
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class ElectrumGui:
def __init__(self, config, daemon, plugins):
set_language(config.get('language'))
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
self.app = QApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.timer = Timer()
# shared objects
self.invoices = InvoiceStore(self.config)
self.contacts = Contacts(self.config)
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Unobtanium Electrum')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.app.connect(self.app, QtCore.SIGNAL('new_window'), self.start_new_window)
run_hook('init_qt', self)
def build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
self.old_menu = self.tray.contextMenu()
m = QMenu()
for window in self.windows:
submenu = m.addMenu(window.wallet.basename())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Unobtanium Electrum"), self.close)
self.tray.setContextMenu(m)
def tray_icon(self):
if self.dark_icon:
return QIcon(':icons/electrum_dark_icon.png')
else:
return QIcon(':icons/electrum_light_icon.png')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in self.windows:
window.close()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.emit(SIGNAL('new_window'), path, uri)
def create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
self.build_tray_menu()
# FIXME: Remove in favour of the load_wallet hook
run_hook('on_new_window', w)
return w
def get_wizard(self):
return InstallWizard(self.config, self.app, self.plugins)
def start_new_window(self, path, uri):
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it.'''
for w in self.windows:
if w.wallet.storage.path == path:
w.bring_to_top()
break
else:
wallet = self.daemon.load_wallet(path, self.get_wizard)
if not wallet:
return
w = self.create_window_for_wallet(wallet)
if uri:
w.pay_to_URI(uri)
return w
def close_window(self, window):
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
if self.config.get('wallet_path') is None and not self.windows:
path = window.wallet.storage.path
self.config.set_key('gui_last_wallet', path)
run_hook('on_close_window', window)
def main(self):
self.timer.start()
# open last wallet
if self.config.get('wallet_path') is None:
last_wallet = self.config.get('gui_last_wallet')
if last_wallet is not None and os.path.exists(last_wallet):
self.config.cmdline_options['default_wallet_path'] = last_wallet
if not self.start_new_window(self.config.get_wallet_path(),
self.config.get('url')):
return
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
# main loop
self.app.exec_()
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence. see http://www.mail-archive.com/[email protected]/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
| mit | -6,099,789,603,438,935,000 | 34.980583 | 118 | 0.642876 | false | 3.84839 | true | false | false |
gladsonvm/haystackdemo | lib/python2.7/site-packages/pyelasticsearch/client.py | 1 | 39599 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime
from operator import itemgetter
from functools import wraps
from logging import getLogger
import re
from six import (iterkeys, binary_type, text_type, string_types, integer_types,
iteritems, PY3)
from six.moves import xrange
try:
# PY3
from urllib.parse import urlencode, quote_plus
except ImportError:
# PY2
from urllib import urlencode, quote_plus
import requests
import simplejson as json # for use_decimal
from simplejson import JSONDecodeError
from pyelasticsearch.downtime import DowntimePronePool
from pyelasticsearch.exceptions import (Timeout, ConnectionError,
ElasticHttpError,
InvalidJsonResponseError,
ElasticHttpNotFoundError,
IndexAlreadyExistsError)
def _add_es_kwarg_docs(params, method):
"""
Add stub documentation for any args in ``params`` that aren't already in
the docstring of ``method``.
The stubs may not tell much about each arg, but they serve the important
purpose of letting the user know that they're safe to use--we won't be
paving over them in the future for something pyelasticsearch-specific.
"""
def docs_for_kwarg(p):
return '\n :arg %s: See the ES docs.' % p
doc = method.__doc__
if doc is not None: # It's none under python -OO.
# Handle the case where there are no :arg declarations to key off:
if '\n :arg' not in doc and params:
first_param, params = params[0], params[1:]
doc = doc.replace('\n (Insert es_kwargs here.)',
docs_for_kwarg(first_param))
for p in params:
if ('\n :arg %s: ' % p) not in doc:
# Find the last documented arg so we can put our generated docs
# after it. No need to explicitly compile this; the regex cache
# should serve.
insertion_point = re.search(
r' :arg (.*?)(?=\n+ (?:$|[^: ]))',
doc,
re.MULTILINE | re.DOTALL).end()
doc = ''.join([doc[:insertion_point],
docs_for_kwarg(p),
doc[insertion_point:]])
method.__doc__ = doc
def es_kwargs(*args_to_convert):
"""
Mark which kwargs will become query string params in the eventual ES call.
Return a decorator that grabs the kwargs of the given names, plus any
beginning with "es_", subtracts them from the ordinary kwargs, and passes
them to the decorated function through the ``query_params`` kwarg. The
remaining kwargs and the args are passed through unscathed.
Also, if any of the given kwargs are undocumented in the decorated method's
docstring, add stub documentation for them.
"""
convertible_args = set(args_to_convert)
def decorator(func):
# Add docs for any missing query params:
_add_es_kwarg_docs(args_to_convert, func)
@wraps(func)
def decorate(*args, **kwargs):
# Make kwargs the map of normal kwargs and query_params the map of
# kwargs destined for query string params:
query_params = {}
for k in list(iterkeys(kwargs)): # Make a copy; we mutate kwargs.
if k.startswith('es_'):
query_params[k[3:]] = kwargs.pop(k)
elif k in convertible_args:
query_params[k] = kwargs.pop(k)
return func(*args, query_params=query_params, **kwargs)
return decorate
return decorator
class ElasticSearch(object):
"""
An object which manages connections to elasticsearch and acts as a
go-between for API calls to it
This object is thread-safe. You can create one instance and share it
among all threads.
"""
def __init__(self, urls, timeout=60, max_retries=0, revival_delay=300):
"""
:arg urls: A URL or iterable of URLs of ES nodes. These are full URLs
with port numbers, like ``http://elasticsearch.example.com:9200``.
:arg timeout: Number of seconds to wait for each request before raising
Timeout
:arg max_retries: How many other servers to try, in series, after a
request times out or a connection fails
:arg revival_delay: Number of seconds for which to avoid a server after
it times out or is uncontactable
"""
if isinstance(urls, string_types):
urls = [urls]
urls = [u.rstrip('/') for u in urls]
self.servers = DowntimePronePool(urls, revival_delay)
self.revival_delay = revival_delay
self.timeout = timeout
self.max_retries = max_retries
self.logger = getLogger('pyelasticsearch')
self.session = requests.session()
self.json_encoder = JsonEncoder
def _concat(self, items):
"""
Return a comma-delimited concatenation of the elements of ``items``,
with any occurrences of "_all" omitted.
If ``items`` is a string, promote it to a 1-item list.
"""
# TODO: Why strip out _all?
if items is None:
return ''
if isinstance(items, string_types):
items = [items]
return ','.join(i for i in items if i != '_all')
def _to_query(self, obj):
"""
Convert a native-Python object to a unicode or bytestring
representation suitable for a query string.
"""
# Quick and dirty thus far
if isinstance(obj, string_types):
return obj
if isinstance(obj, bool):
return 'true' if obj else 'false'
if isinstance(obj, integer_types):
return str(obj)
if isinstance(obj, float):
return repr(obj) # str loses precision.
if isinstance(obj, (list, tuple)):
return ','.join(self._to_query(o) for o in obj)
iso = _iso_datetime(obj)
if iso:
return iso
raise TypeError("_to_query() doesn't know how to represent %r in an ES"
' query string.' % obj)
def _utf8(self, thing):
"""Convert any arbitrary ``thing`` to a utf-8 bytestring."""
if isinstance(thing, binary_type):
return thing
if not isinstance(thing, text_type):
thing = text_type(thing)
return thing.encode('utf-8')
def _join_path(self, path_components):
"""
Smush together the path components, omitting '' and None ones.
Unicodes get encoded to strings via utf-8. Incoming strings are assumed
to be utf-8-encoded already.
"""
path = '/'.join(quote_plus(self._utf8(p), '') for p in path_components if
p is not None and p != '')
if not path.startswith('/'):
path = '/' + path
return path
def send_request(self,
method,
path_components,
body='',
query_params=None,
encode_body=True):
"""
Send an HTTP request to ES, and return the JSON-decoded response.
This is mostly an internal method, but it also comes in handy if you
need to use a brand new ES API that isn't yet explicitly supported by
pyelasticsearch, while still taking advantage of our connection pooling
and retrying.
Retry the request on different servers if the first one is down and
``self.max_retries`` > 0.
:arg method: An HTTP method, like "GET"
:arg path_components: An iterable of path components, to be joined by
"/"
:arg body: The request body
:arg query_params: A map of querystring param names to values or
``None``
:arg encode_body: Whether to encode the body of the request as JSON
"""
path = self._join_path(path_components)
if query_params:
path = '?'.join(
[path,
urlencode(dict((k, self._utf8(self._to_query(v))) for k, v in
iteritems(query_params)))])
request_body = self._encode_json(body) if encode_body else body
req_method = getattr(self.session, method.lower())
# We do our own retrying rather than using urllib3's; we want to retry
# a different node in the cluster if possible, not the same one again
# (which may be down).
for attempt in xrange(self.max_retries + 1):
server_url, was_dead = self.servers.get()
url = server_url + path
self.logger.debug(
"Making a request equivalent to this: curl -X%s '%s' -d '%s'",
method, url, request_body)
try:
resp = req_method(
url,
timeout=self.timeout,
**({'data': request_body} if body else {}))
except (ConnectionError, Timeout):
self.servers.mark_dead(server_url)
self.logger.info('%s marked as dead for %s seconds.',
server_url,
self.revival_delay)
if attempt >= self.max_retries:
raise
else:
if was_dead:
self.servers.mark_live(server_url)
break
self.logger.debug('response status: %s', resp.status_code)
prepped_response = self._decode_response(resp)
if resp.status_code >= 400:
self._raise_exception(resp, prepped_response)
self.logger.debug('got response %s', prepped_response)
return prepped_response
def _raise_exception(self, response, decoded_body):
"""Raise an exception based on an error-indicating response from ES."""
error_message = decoded_body.get('error', decoded_body)
error_class = ElasticHttpError
if response.status_code == 404:
error_class = ElasticHttpNotFoundError
elif (error_message.startswith('IndexAlreadyExistsException') or
'nested: IndexAlreadyExistsException' in error_message):
error_class = IndexAlreadyExistsError
raise error_class(response.status_code, error_message)
def _encode_json(self, value):
"""
Convert a Python value to a form suitable for ElasticSearch's JSON DSL.
"""
return json.dumps(value, cls=self.json_encoder, use_decimal=True)
def _decode_response(self, response):
"""Return a native-Python representation of a response's JSON blob."""
try:
json_response = response.json()
except JSONDecodeError:
raise InvalidJsonResponseError(response)
return json_response
## REST API
@es_kwargs('routing', 'parent', 'timestamp', 'ttl', 'percolate',
'consistency', 'replication', 'refresh', 'timeout', 'fields')
def index(self, index, doc_type, doc, id=None, overwrite_existing=True,
query_params=None):
"""
Put a typed JSON document into a specific index to make it searchable.
:arg index: The name of the index to which to add the document
:arg doc_type: The type of the document
:arg doc: A Python mapping object, convertible to JSON, representing
the document
:arg id: The ID to give the document. Leave blank to make one up.
:arg overwrite_existing: Whether we should overwrite existing documents
of the same ID and doctype
:arg routing: A value hashed to determine which shard this indexing
request is routed to
:arg parent: The ID of a parent document, which leads this document to
be routed to the same shard as the parent, unless ``routing``
overrides it.
:arg timestamp: An explicit value for the (typically automatic)
timestamp associated with a document, for use with ``ttl`` and such
:arg ttl: The time until this document is automatically removed from
the index. Can be an integral number of milliseconds or a duration
like '1d'.
:arg percolate: An indication of which percolator queries, registered
against this index, should be checked against the new document: '*'
or a query string like 'color:green'
:arg consistency: An indication of how many active shards the contact
node should demand to see in order to let the index operation
succeed: 'one', 'quorum', or 'all'
:arg replication: Set to 'async' to return from ES before finishing
replication.
:arg refresh: Pass True to refresh the index after adding the document.
:arg timeout: A duration to wait for the relevant primary shard to
become available, in the event that it isn't: for example, "5m"
See `ES's index API`_ for more detail.
.. _`ES's index API`:
http://www.elasticsearch.org/guide/reference/api/index_.html
"""
# :arg query_params: A map of other querystring params to pass along to
# ES. This lets you use future ES features without waiting for an
# update to pyelasticsearch. If we just used **kwargs for this, ES
# could start using a querystring param that we already used as a
# kwarg, and we'd shadow it. Name these params according to the names
# they have in ES's REST API, but prepend "\es_": for example,
# ``es_version=2``.
# TODO: Support version along with associated "preference" and
# "version_type" params.
if not overwrite_existing:
query_params['op_type'] = 'create'
return self.send_request('POST' if id is None else 'PUT',
[index, doc_type, id],
doc,
query_params)
@es_kwargs('consistency', 'refresh')
def bulk_index(self, index, doc_type, docs, id_field='id',
parent_field='_parent', query_params=None):
"""
Index a list of documents as efficiently as possible.
:arg index: The name of the index to which to add the document
:arg doc_type: The type of the document
:arg docs: An iterable of Python mapping objects, convertible to JSON,
representing documents to index
:arg id_field: The field of each document that holds its ID
:arg parent_field: The field of each document that holds its parent ID,
if any. Removed from document before indexing.
See `ES's bulk API`_ for more detail.
.. _`ES's bulk API`:
http://www.elasticsearch.org/guide/reference/api/bulk.html
"""
body_bits = []
if not docs:
raise ValueError('No documents provided for bulk indexing!')
for doc in docs:
action = {'index': {'_index': index, '_type': doc_type}}
if doc.get(id_field) is not None:
action['index']['_id'] = doc[id_field]
if doc.get(parent_field) is not None:
action['index']['_parent'] = doc.pop(parent_field)
body_bits.append(self._encode_json(action))
body_bits.append(self._encode_json(doc))
# Need the trailing newline.
body = '\n'.join(body_bits) + '\n'
return self.send_request('POST',
['_bulk'],
body,
encode_body=False,
query_params=query_params)
@es_kwargs('routing', 'parent', 'replication', 'consistency', 'refresh')
def delete(self, index, doc_type, id, query_params=None):
"""
Delete a typed JSON document from a specific index based on its ID.
:arg index: The name of the index from which to delete
:arg doc_type: The type of the document to delete
:arg id: The (string or int) ID of the document to delete
See `ES's delete API`_ for more detail.
.. _`ES's delete API`:
http://www.elasticsearch.org/guide/reference/api/delete.html
"""
# id should never be None, and it's not particular dangerous
# (equivalent to deleting a doc with ID "None", but it's almost
# certainly not what the caller meant:
if id is None or id == '':
raise ValueError('No ID specified. To delete all documents in '
'an index, use delete_all().')
return self.send_request('DELETE', [index, doc_type, id],
query_params=query_params)
@es_kwargs('routing', 'parent', 'replication', 'consistency', 'refresh')
def delete_all(self, index, doc_type, query_params=None):
"""
Delete all documents of the given doctype from an index.
:arg index: The name of the index from which to delete. ES does not
support this being empty or "_all" or a comma-delimited list of
index names (in 0.19.9).
:arg doc_type: The name of a document type
See `ES's delete API`_ for more detail.
.. _`ES's delete API`:
http://www.elasticsearch.org/guide/reference/api/delete.html
"""
return self.send_request('DELETE', [index, doc_type],
query_params=query_params)
@es_kwargs('q', 'df', 'analyzer', 'default_operator', 'source' 'routing',
'replication', 'consistency')
def delete_by_query(self, index, doc_type, query, query_params=None):
"""
Delete typed JSON documents from a specific index based on query.
:arg index: An index or iterable thereof from which to delete
:arg doc_type: The type of document or iterable thereof to delete
:arg query: A dictionary that will convert to ES's query DSL or a
string that will serve as a textual query to be passed as the ``q``
query string parameter. (Passing the ``q`` kwarg yourself is
deprecated.)
See `ES's delete-by-query API`_ for more detail.
.. _`ES's delete-by-query API`:
http://www.elasticsearch.org/guide/reference/api/delete-by-query.html
"""
if isinstance(query, string_types) and 'q' not in query_params:
query_params['q'] = query
body = ''
else:
body = query
return self.send_request(
'DELETE',
[self._concat(index), self._concat(doc_type), '_query'],
body,
query_params=query_params)
@es_kwargs('realtime', 'fields', 'routing', 'preference', 'refresh')
def get(self, index, doc_type, id, query_params=None):
"""
Get a typed JSON document from an index by ID.
:arg index: The name of the index from which to retrieve
:arg doc_type: The type of document to get
:arg id: The ID of the document to retrieve
See `ES's get API`_ for more detail.
.. _`ES's get API`:
http://www.elasticsearch.org/guide/reference/api/get.html
"""
return self.send_request('GET', [index, doc_type, id],
query_params=query_params)
@es_kwargs()
def multi_get(self, ids, index=None, doc_type=None, fields=None,
query_params=None):
"""
Get multiple typed JSON documents from ES.
:arg ids: An iterable, each element of which can be either an a dict or
an id (int or string). IDs are taken to be document IDs. Dicts are
passed through the Multi Get API essentially verbatim, except that
any missing ``_type``, ``_index``, or ``fields`` keys are filled in
from the defaults given in the ``index``, ``doc_type``, and
``fields`` args.
:arg index: Default index name from which to retrieve
:arg doc_type: Default type of document to get
:arg fields: Default fields to return
See `ES's Multi Get API`_ for more detail.
.. _`ES's Multi Get API`:
http://www.elasticsearch.org/guide/reference/api/multi-get.html
"""
doc_template = dict(
filter(
itemgetter(1),
[('_index', index), ('_type', doc_type), ('fields', fields)]))
docs = []
for id in ids:
doc = doc_template.copy()
if isinstance(id, dict):
doc.update(id)
else:
doc['_id'] = id
docs.append(doc)
return self.send_request(
'GET', ['_mget'], {'docs': docs}, query_params=query_params)
@es_kwargs('routing', 'parent', 'timeout', 'replication', 'consistency',
'percolate', 'refresh', 'retry_on_conflict', 'fields')
def update(self, index, doc_type, id, script=None, params=None, lang=None,
query_params=None, doc=None, upsert=None):
"""
Update an existing document. Raise ``TypeError`` if ``script``, ``doc``
and ``upsert`` are all unspecified.
:arg index: The name of the index containing the document
:arg doc_type: The type of the document
:arg id: The ID of the document
:arg script: The script to be used to update the document
:arg params: A dict of the params to be put in scope of the script
:arg lang: The language of the script. Omit to use the default,
specified by ``script.default_lang``.
:arg doc: A partial document to be merged into the existing document
:arg upsert: The content for the new document created if the document
does not exist
"""
if script is None and doc is None and upsert is None:
raise TypeError('At least one of the script, doc, or upsert '
'kwargs must be provided.')
body = {}
if script:
body['script'] = script
if lang and script:
body['lang'] = lang
if doc:
body['doc'] = doc
if upsert:
body['upsert'] = upsert
if params:
body['params'] = params
return self.send_request(
'POST',
[index, doc_type, id, '_update'],
body=body,
query_params=query_params)
def _search_or_count(self, kind, query, index=None, doc_type=None,
query_params=None):
if isinstance(query, string_types):
query_params['q'] = query
body = ''
else:
body = query
return self.send_request(
'GET',
[self._concat(index), self._concat(doc_type), kind],
body,
query_params=query_params)
@es_kwargs('routing', 'size')
def search(self, query, **kwargs):
"""
Execute a search query against one or more indices and get back search
hits.
:arg query: A dictionary that will convert to ES's query DSL or a
string that will serve as a textual query to be passed as the ``q``
query string parameter
:arg index: An index or iterable of indexes to search. Omit to search
all.
:arg doc_type: A document type or iterable thereof to search. Omit to
search all.
:arg size: Limit the number of results to ``size``. Use with ``es_from`` to
implement paginated searching.
See `ES's search API`_ for more detail.
.. _`ES's search API`:
http://www.elasticsearch.org/guide/reference/api/search/
"""
return self._search_or_count('_search', query, **kwargs)
@es_kwargs('df', 'analyzer', 'default_operator', 'source', 'routing')
def count(self, query, **kwargs):
"""
Execute a query against one or more indices and get hit count.
:arg query: A dictionary that will convert to ES's query DSL or a
string that will serve as a textual query to be passed as the ``q``
query string parameter
:arg index: An index or iterable of indexes to search. Omit to search
all.
:arg doc_type: A document type or iterable thereof to search. Omit to
search all.
See `ES's count API`_ for more detail.
.. _`ES's count API`:
http://www.elasticsearch.org/guide/reference/api/count.html
"""
return self._search_or_count('_count', query, **kwargs)
@es_kwargs()
def get_mapping(self, index=None, doc_type=None, query_params=None):
"""
Fetch the mapping definition for a specific index and type.
:arg index: An index or iterable thereof
:arg doc_type: A document type or iterable thereof
Omit both arguments to get mappings for all types and indexes.
See `ES's get-mapping API`_ for more detail.
.. _`ES's get-mapping API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-get-mapping.html
"""
# TODO: Think about turning index=None into _all if doc_type is non-
# None, per the ES doc page.
return self.send_request(
'GET',
[self._concat(index), self._concat(doc_type), '_mapping'],
query_params=query_params)
@es_kwargs('ignore_conflicts')
def put_mapping(self, index, doc_type, mapping, query_params=None):
"""
Register specific mapping definition for a specific type against one or
more indices.
:arg index: An index or iterable thereof
:arg doc_type: The document type to set the mapping of
:arg mapping: A dict representing the mapping to install. For example,
this dict can have top-level keys that are the names of doc types.
See `ES's put-mapping API`_ for more detail.
.. _`ES's put-mapping API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-put-mapping.html
"""
# TODO: Perhaps add a put_all_mappings() for consistency and so we
# don't need to expose the "_all" magic string. We haven't done it yet
# since this routine is not dangerous: ES makes you explicily pass
# "_all" to update all mappings.
return self.send_request(
'PUT',
[self._concat(index), doc_type, '_mapping'],
mapping,
query_params=query_params)
@es_kwargs('search_type', 'search_indices', 'search_types',
'search_scroll', 'search_size', 'search_from',
'like_text', 'percent_terms_to_match', 'min_term_freq',
'max_query_terms', 'stop_words', 'min_doc_freq', 'max_doc_freq',
'min_word_len', 'max_word_len', 'boost_terms', 'boost',
'analyzer')
def more_like_this(self, index, doc_type, id, mlt_fields, body='', query_params=None):
"""
Execute a "more like this" search query against one or more fields and
get back search hits.
:arg index: The index to search and where the document for comparison
lives
:arg doc_type: The type of document to find others like
:arg id: The ID of the document to find others like
:arg mlt_fields: The list of fields to compare on
:arg body: A dictionary that will convert to ES's query DSL and be
passed as the request body
See `ES's more-like-this API`_ for more detail.
.. _`ES's more-like-this API`:
http://www.elasticsearch.org/guide/reference/api/more-like-this.html
"""
query_params['mlt_fields'] = self._concat(mlt_fields)
return self.send_request('GET',
[index, doc_type, id, '_mlt'],
body=body,
query_params=query_params)
## Index Admin API
@es_kwargs('recovery', 'snapshot')
def status(self, index=None, query_params=None):
"""
Retrieve the status of one or more indices
:arg index: An index or iterable thereof
See `ES's index-status API`_ for more detail.
.. _`ES's index-status API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-status.html
"""
return self.send_request('GET', [self._concat(index), '_status'],
query_params=query_params)
@es_kwargs()
def update_aliases(self, settings, query_params=None):
"""
Add, remove, or update aliases in bulk.
:arg settings: a dictionary specifying the actions to perform
See `ES's admin-indices-aliases API`_.
.. _`ES's admin-indices-aliases API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-aliases.html
"""
return self.send_request('POST', ['_aliases'],
body=settings, query_params=query_params)
@es_kwargs()
def aliases(self, index=None, query_params=None):
"""
Retrieve a listing of aliases
:arg index: the name of an index or an iterable of indices
See `ES's admin-indices-aliases API`_.
.. _`ES's admin-indices-aliases API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-aliases.html
"""
return self.send_request('GET', [self._concat(index), '_aliases'],
query_params=query_params)
@es_kwargs()
def create_index(self, index, settings=None, query_params=None):
"""
Create an index with optional settings.
:arg index: The name of the index to create
:arg settings: A dictionary of settings
If the index already exists, raise
:class:`~pyelasticsearch.exceptions.IndexAlreadyExistsError`.
See `ES's create-index API`_ for more detail.
.. _`ES's create-index API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-create-index.html
"""
return self.send_request('PUT', [index], body=settings,
query_params=query_params)
@es_kwargs()
def delete_index(self, index, query_params=None):
"""
Delete an index.
:arg index: An index or iterable thereof to delete
If the index is not found, raise
:class:`~pyelasticsearch.exceptions.ElasticHttpNotFoundError`.
See `ES's delete-index API`_ for more detail.
.. _`ES's delete-index API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-delete-index.html
"""
if not index:
raise ValueError('No indexes specified. To delete all indexes, use'
' delete_all_indexes().')
return self.send_request('DELETE', [self._concat(index)],
query_params=query_params)
def delete_all_indexes(self, **kwargs):
"""Delete all indexes."""
return self.delete_index('_all', **kwargs)
@es_kwargs()
def close_index(self, index, query_params=None):
"""
Close an index.
:arg index: The index to close
See `ES's close-index API`_ for more detail.
.. _`ES's close-index API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html
"""
return self.send_request('POST', [index, '_close'],
query_params=query_params)
@es_kwargs()
def open_index(self, index, query_params=None):
"""
Open an index.
:arg index: The index to open
See `ES's open-index API`_ for more detail.
.. _`ES's open-index API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html
"""
return self.send_request('POST', [index, '_open'],
query_params=query_params)
@es_kwargs()
def get_settings(self, index, query_params=None):
"""
Get the settings of one or more indexes.
:arg index: An index or iterable of indexes
See `ES's get-settings API`_ for more detail.
.. _`ES's get-settings API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-get-settings.html
"""
return self.send_request('GET',
[self._concat(index), '_settings'],
query_params=query_params)
@es_kwargs()
def update_settings(self, index, settings, query_params=None):
"""
Change the settings of one or more indexes.
:arg index: An index or iterable of indexes
:arg settings: A dictionary of settings
See `ES's update-settings API`_ for more detail.
.. _`ES's update-settings API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-update-settings.html
"""
if not index:
raise ValueError('No indexes specified. To update all indexes, use'
' update_all_settings().')
# If we implement the "update cluster settings" API, call that
# update_cluster_settings().
return self.send_request('PUT',
[self._concat(index), '_settings'],
body=settings,
query_params=query_params)
@es_kwargs()
def update_all_settings(self, settings, query_params=None):
"""
Update the settings of all indexes.
:arg settings: A dictionary of settings
See `ES's update-settings API`_ for more detail.
.. _`ES's update-settings API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-update-settings.html
"""
return self.send_request('PUT', ['_settings'], body=settings,
query_params=query_params)
@es_kwargs('refresh')
def flush(self, index=None, query_params=None):
"""
Flush one or more indices (clear memory).
:arg index: An index or iterable of indexes
See `ES's flush API`_ for more detail.
.. _`ES's flush API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-flush.html
"""
return self.send_request('POST',
[self._concat(index), '_flush'],
query_params=query_params)
@es_kwargs()
def refresh(self, index=None, query_params=None):
"""
Refresh one or more indices.
:arg index: An index or iterable of indexes
See `ES's refresh API`_ for more detail.
.. _`ES's refresh API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-refresh.html
"""
return self.send_request('POST', [self._concat(index), '_refresh'],
query_params=query_params)
@es_kwargs()
def gateway_snapshot(self, index=None, query_params=None):
"""
Gateway snapshot one or more indices.
:arg index: An index or iterable of indexes
See `ES's gateway-snapshot API`_ for more detail.
.. _`ES's gateway-snapshot API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-gateway-snapshot.html
"""
return self.send_request(
'POST',
[self._concat(index), '_gateway', 'snapshot'],
query_params=query_params)
@es_kwargs('max_num_segments', 'only_expunge_deletes', 'refresh', 'flush',
'wait_for_merge')
def optimize(self, index=None, query_params=None):
"""
Optimize one or more indices.
:arg index: An index or iterable of indexes
See `ES's optimize API`_ for more detail.
.. _`ES's optimize API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-optimize.html
"""
return self.send_request('POST',
[self._concat(index), '_optimize'],
query_params=query_params)
@es_kwargs('level', 'wait_for_status', 'wait_for_relocating_shards',
'wait_for_nodes', 'timeout')
def health(self, index=None, query_params=None):
"""
Report on the health of the cluster or certain indices.
:arg index: The index or iterable of indexes to examine
See `ES's cluster-health API`_ for more detail.
.. _`ES's cluster-health API`:
http://www.elasticsearch.org/guide/reference/api/admin-cluster-health.html
"""
return self.send_request(
'GET',
['_cluster', 'health', self._concat(index)],
query_params=query_params)
@es_kwargs('filter_nodes', 'filter_routing_table', 'filter_metadata',
'filter_blocks', 'filter_indices')
def cluster_state(self, query_params=None):
"""
The cluster state API allows to get comprehensive state
information of the whole cluster.
(Insert es_kwargs here.)
See `ES's cluster-state API`_ for more detail.
.. _`ES's cluster-state API`:
http://www.elasticsearch.org/guide/reference/api/admin-cluster-state.html
"""
return self.send_request(
'GET', ['_cluster', 'state'], query_params=query_params)
@es_kwargs()
def percolate(self, index, doc_type, doc, query_params=None):
"""
Run a JSON document through the registered percolator queries, and
return which ones match.
:arg index: The name of the index to which the document pretends to
belong
:arg doc_type: The type the document should be treated as if it has
:arg doc: A Python mapping object, convertible to JSON, representing
the document
Use :meth:`index()` to register percolators. See `ES's percolate API`_
for more detail.
.. _`ES's percolate API`:
http://www.elasticsearch.org/guide/reference/api/percolate/
"""
return self.send_request('GET',
[index, doc_type, '_percolate'],
doc, query_params=query_params)
class JsonEncoder(json.JSONEncoder):
def default(self, value):
"""Convert more Python data types to ES-understandable JSON."""
iso = _iso_datetime(value)
if iso:
return iso
if not PY3 and isinstance(value, str):
return unicode(value, errors='replace') # TODO: Be stricter.
if isinstance(value, set):
return list(value)
return super(JsonEncoder, self).default(value)
def _iso_datetime(value):
"""
If value appears to be something datetime-like, return it in ISO format.
Otherwise, return None.
"""
if hasattr(value, 'strftime'):
if hasattr(value, 'hour'):
return value.isoformat()
else:
return '%sT00:00:00' % value.isoformat()
| mit | 5,228,932,564,129,046,000 | 38.16815 | 96 | 0.576429 | false | 4.35441 | false | false | false |
AbhiAgarwal/prep | python/trie.py | 1 | 3121 | class Node:
"""Node for Python Trie Implementation"""
def __init__(self):
self.word = None
self.nodes = {} # dict of nodes
def __get_all__(self):
"""Get all of the words in the trie"""
x = []
for key, node in self.nodes.iteritems() :
if(node.word is not None):
x.append(node.word)
x += node.__get_all__()
return x
def __str__(self):
return self.word
def __insert__(self, word, string_pos = 0):
"""Add a word to the node in a Trie"""
current_letter = word[string_pos]
# Create the Node if it does not already exist
if current_letter not in self.nodes:
self.nodes[current_letter] = Node();
if(string_pos + 1 == len(word)):
self.nodes[current_letter].word = word
else:
self.nodes[current_letter].__insert__(word, string_pos + 1)
return True
def __get_all_with_prefix__(self, prefix, string_pos):
"""Return all nodes in a trie with a given prefix or that are equal to the prefix"""
x = []
for key, node in self.nodes.iteritems() :
# If the current character of the prefix is one of the nodes or we have
# already satisfied the prefix match, then get the matches
if(string_pos >= len(prefix) or key == prefix[string_pos]):
if(node.word is not None):
x.append(node.word)
if(node.nodes != {}):
if(string_pos + 1 <= len(prefix)):
x += node.__get_all_with_prefix__(prefix, string_pos + 1)
else:
x += node.__get_all_with_prefix__(prefix, string_pos)
return x
class Trie:
"""Trie Python Implementation"""
def __init__(self):
self.root = Node()
def insert(self, word):
self.root.__insert__(word)
def get_all(self):
return self.root.__get_all__()
def get_all_with_prefix(self, prefix, string_pos = 0):
return self.root.__get_all_with_prefix__(prefix, string_pos)
# Create the trie and insert some words then do some tests
trie = Trie()
trie.insert("go")
trie.insert("gone")
trie.insert("gi")
trie.insert("cool")
trie.insert("comb")
trie.insert("grasshopper")
trie.insert("home")
trie.insert("hope")
trie.insert("hose")
print "Make sure that the data structure is correctly set up by accesing words manually: "
print str(trie.root.nodes['g'].nodes['o'])
print str(trie.root.nodes['g'].nodes['i'])
print str(trie.root.nodes['c'].nodes['o'].nodes['o'].nodes['l'])
print "\n"
print "print all words to make sure they are all there: "
print trie.get_all()
print "\n"
print "print out all the words with the given prefixes: "
print trie.get_all_with_prefix("g")
print trie.get_all_with_prefix("go")
print trie.get_all_with_prefix("co")
print trie.get_all_with_prefix("hom")
print trie.get_all_with_prefix("gr") | mit | -270,410,005,839,756,830 | 30.22 | 92 | 0.55495 | false | 3.693491 | false | false | false |
tobiz/OGN-Flight-Logger_V2 | flogger_email_msg.py | 1 | 1204 | import smtplib
import base64
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
from __builtin__ import file
import settings
import os
import datetime
def email_msg(sender, receiver, msg, date, settings):
# print "Send take off msg"
if settings.FLOGGER_TAKEOFF_EMAIL != "y" and settings.FLOGGER_TAKEOFF_EMAIL != "Y":
# Don't send take off email msg
return
# body = "Msg from %s. %s taken off @ %s" % (settings.APRS_USER, msg, date)
body = "%s. %s taken off @ %s" % (settings.APRS_USER, msg, date)
print body
msg = MIMEMultipart()
msg.attach(MIMEText(body, 'plain'))
fromaddr = sender
toaddr = receiver
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = body
server = smtplib.SMTP(settings.FLOGGER_SMTP_SERVER_URL, settings.FLOGGER_SMTP_SERVER_PORT)
text = msg.as_string()
# print "Msg string is: ", text
try:
server.sendmail(fromaddr, toaddr, text)
except Exception as e:
print "Send email_msg failed, reason: ", e
server.quit()
return
| gpl-3.0 | 594,446,723,311,701,000 | 29.897436 | 94 | 0.662791 | false | 3.449857 | false | false | false |
coldfix/udiskie | udiskie/tray.py | 1 | 16508 | """
Tray icon for udiskie.
"""
from gi.repository import Gio
from gi.repository import Gtk
from .async_ import run_bg, Future
from .common import setdefault, DaemonBase, cachedmethod
from .locale import _
from .mount import Action, prune_empty_node
from .prompt import Dialog
from .icons import IconDist
import os
__all__ = ['TrayMenu', 'TrayIcon']
class MenuFolder:
def __init__(self, label, items):
self.label = label
self.items = items
def __bool__(self):
return bool(self.items)
__nonzero__ = __bool__
class MenuSection(MenuFolder):
pass
class SubMenu(MenuFolder):
pass
class Icons:
"""Encapsulates the responsibility to load icons."""
_icon_names = {
'media': [
'drive-removable-media-usb-panel',
'drive-removable-media-usb-pendrive',
'drive-removable-media-usb',
'drive-removable-media',
'media-optical',
'media-flash',
],
'browse': ['document-open', 'folder-open'],
'terminal': ['terminal', 'utilities-terminal'],
'mount': ['udiskie-mount'],
'unmount': ['udiskie-unmount'],
'unlock': ['udiskie-unlock'],
'lock': ['udiskie-lock'],
'eject': ['udiskie-eject', 'media-eject'],
'detach': ['udiskie-detach'],
'quit': ['application-exit'],
'forget_password': ['edit-delete'],
'delete': ['udiskie-eject'],
'losetup': ['udiskie-mount'],
# checkbox workaround:
'checked': ['checkbox-checked', 'udiskie-checkbox-checked'],
'unchecked': ['checkbox', 'udiskie-checkbox-unchecked'],
'submenu': ['udiskie-submenu', 'pan-end-symbolic'],
}
def __init__(self, icon_names={}):
"""Merge ``icon_names`` into default icon names."""
self._icon_dist = IconDist()
_icon_names = icon_names.copy()
setdefault(_icon_names, self.__class__._icon_names)
self._icon_names = _icon_names
for k, v in _icon_names.items():
if isinstance(v, str):
self._icon_names[k] = v = [v]
self._icon_names[k] = self._icon_dist.patch_list(v)
@cachedmethod
def get_icon_name(self, icon_id: str) -> str:
"""Lookup the system icon name from udisie-internal id."""
icon_theme = Gtk.IconTheme.get_default()
for name in self._icon_names[icon_id]:
if icon_theme.has_icon(name):
return name
elif os.path.exists(name):
return name
return 'not-available'
def get_icon(self, icon_id: str, size: "Gtk.IconSize") -> "Gtk.Image":
"""Load Gtk.Image from udiskie-internal id."""
return Gtk.Image.new_from_gicon(self.get_gicon(icon_id), size)
def get_gicon(self, icon_id: str) -> "Gio.Icon":
"""Lookup Gio.Icon from udiskie-internal id."""
name = self.get_icon_name(icon_id)
if os.path.exists(name):
# TODO (?): we could also add the icon to the theme using
# Gtk.IconTheme.append_search_path or .add_resource_path:
file = Gio.File.new_for_path(name)
return Gio.FileIcon.new(file)
else:
return Gio.ThemedIcon.new(name)
class TrayMenu:
"""
Builder for udiskie menus.
Objects of this class generate action menus when being called.
"""
def __init__(self, daemon, icons, actions, flat=True,
quickmenu_actions=None,
checkbox_workaround=False,
update_workaround=False):
"""
Initialize a new menu maker.
:param object mounter: mount operation provider
:param Icons icons: icon provider
:param DeviceActions actions: device actions discovery
:returns: a new menu maker
:rtype: cls
Required keys for the ``_labels``, ``_menu_icons`` and
``actions`` dictionaries are:
- browse Open mount location
- terminal Open mount location in terminal
- mount Mount a device
- unmount Unmount a device
- unlock Unlock a LUKS device
- lock Lock a LUKS device
- eject Eject a drive
- detach Detach (power down) a drive
- quit Exit the application
NOTE: If using a main loop other than ``Gtk.main`` the 'quit' action
must be customized.
"""
self._icons = icons
self._daemon = daemon
self._mounter = daemon.mounter
self._actions = actions
self._quit_action = daemon.mainloop.quit
self.flat = flat
# actions shown in the quick-menu ("flat", left-click):
self._quickmenu_actions = quickmenu_actions or [
'mount',
'browse',
'terminal',
'unlock',
'detach',
'delete',
# suppressed:
# 'unmount',
# 'lock',
# 'eject',
# 'forget_password',
]
self._checkbox_workaround = checkbox_workaround
self._update_workaround = update_workaround
def __call__(self, menu, extended=True):
"""Populate the Gtk.Menu with udiskie mount operations."""
# create actions items
flat = self.flat and not extended
if self._update_workaround:
# When showing menus via AppIndicator3 on sway, the menu geometry
# seems to be calculated before the 'about-to-show' event, and
# therefore cannot take into account newly inserted menu items.
# For this reason, we have to keep the top-level menu fixed-size
# and insert dynamic entries into a submenu.
devmenu = Gtk.Menu()
menu.append(self._menuitem(
label=_('Managed devices'),
icon=None,
onclick=devmenu,
))
else:
devmenu = menu
self._create_menu_items(
devmenu, self._prepare_menu(self.detect(), flat))
if extended:
self._insert_options(menu)
return menu
def _insert_options(self, menu):
"""Add configuration options to menu."""
menu.append(Gtk.SeparatorMenuItem())
menu.append(self._menuitem(
_('Mount disc image'),
self._icons.get_icon('losetup', Gtk.IconSize.MENU),
run_bg(lambda _: self._losetup())
))
menu.append(Gtk.SeparatorMenuItem())
menu.append(self._menuitem(
_("Enable automounting"),
icon=None,
onclick=lambda _: self._daemon.automounter.toggle_on(),
checked=self._daemon.automounter.is_on(),
))
menu.append(self._menuitem(
_("Enable notifications"),
icon=None,
onclick=lambda _: self._daemon.notify.toggle(),
checked=self._daemon.notify.active,
))
# append menu item for closing the application
if self._quit_action:
menu.append(Gtk.SeparatorMenuItem())
menu.append(self._menuitem(
_('Quit'),
self._icons.get_icon('quit', Gtk.IconSize.MENU),
lambda _: self._quit_action()
))
async def _losetup(self):
gtk_dialog = Gtk.FileChooserDialog(
_('Open disc image'), None,
Gtk.FileChooserAction.OPEN,
(_('Open'), Gtk.ResponseType.OK,
_('Cancel'), Gtk.ResponseType.CANCEL))
with Dialog(gtk_dialog) as dialog:
response = await dialog
if response == Gtk.ResponseType.OK:
await self._mounter.losetup(dialog.window.get_filename())
def detect(self):
"""Detect all currently known devices. Returns the root device."""
root = self._actions.detect()
prune_empty_node(root, set())
return root
def _create_menu(self, items):
"""
Create a menu from the given node.
:param list items: list of menu items
:returns: a new Gtk.Menu object holding all items of the node
"""
menu = Gtk.Menu()
self._create_menu_items(menu, items)
return menu
def _create_menu_items(self, menu, items):
def make_action_callback(node):
return run_bg(lambda _: node.action())
for node in items:
if isinstance(node, Action):
menu.append(self._menuitem(
node.label,
self._icons.get_icon(node.method, Gtk.IconSize.MENU),
make_action_callback(node)))
elif isinstance(node, SubMenu):
menu.append(self._menuitem(
node.label,
icon=None,
onclick=self._create_menu(node.items)))
elif isinstance(node, MenuSection):
self._create_menu_section(menu, node)
else:
raise ValueError(_("Invalid node!"))
if len(menu) == 0:
mi = self._menuitem(_("No external devices"), None, None)
mi.set_sensitive(False)
menu.append(mi)
def _create_menu_section(self, menu, section):
if len(menu) > 0:
menu.append(Gtk.SeparatorMenuItem())
if section.label:
mi = self._menuitem(section.label, None, None)
mi.set_sensitive(False)
menu.append(mi)
self._create_menu_items(menu, section.items)
def _menuitem(self, label, icon, onclick, checked=None):
"""
Create a generic menu item.
:param str label: text
:param Gtk.Image icon: icon (may be ``None``)
:param onclick: onclick handler, either a callable or Gtk.Menu
:returns: the menu item object
:rtype: Gtk.MenuItem
"""
if self._checkbox_workaround:
if checked is not None:
icon_name = 'checked' if checked else 'unchecked'
icon = self._icons.get_icon(icon_name, Gtk.IconSize.MENU)
checked = None
elif isinstance(onclick, Gtk.Menu):
icon = self._icons.get_icon('submenu', Gtk.IconSize.MENU)
if checked is not None:
item = Gtk.CheckMenuItem()
item.set_active(checked)
elif icon is None:
item = Gtk.MenuItem()
else:
item = Gtk.ImageMenuItem()
item.set_image(icon)
# I don't really care for the "show icons only for nouns, not
# for verbs" policy:
item.set_always_show_image(True)
if label is not None:
item.set_label(label)
if isinstance(onclick, Gtk.Menu):
item.set_submenu(onclick)
elif onclick is not None:
item.connect('activate', onclick)
return item
def _prepare_menu(self, node, flat=None):
"""
Prepare the menu hierarchy from the given device tree.
:param Device node: root node of device hierarchy
:returns: menu hierarchy as list
"""
if flat is None:
flat = self.flat
ItemGroup = MenuSection if flat else SubMenu
return [
ItemGroup(branch.label, self._collapse_device(branch, flat))
for branch in node.branches
if branch.methods or branch.branches
]
def _collapse_device(self, node, flat):
"""Collapse device hierarchy into a flat folder."""
items = [item
for branch in node.branches
for item in self._collapse_device(branch, flat)
if item]
show_all = not flat or self._quickmenu_actions == 'all'
methods = node.methods if show_all else [
method
for method in node.methods
if method.method in self._quickmenu_actions
]
if flat:
items.extend(methods)
else:
items.append(MenuSection(None, methods))
return items
class TrayIcon:
"""Default TrayIcon class."""
def __init__(self, menumaker, icons, statusicon=None):
"""
Create an object managing a tray icon.
The actual Gtk.StatusIcon is only created as soon as you call show()
for the first time. The reason to delay its creation is that the GTK
icon will be initially visible, which results in a perceptable
flickering.
:param TrayMenu menumaker: menu factory
:param Gtk.StatusIcon statusicon: status icon
"""
self._icons = icons
self._icon = statusicon
self._menu = menumaker
self._conn_left = None
self._conn_right = None
self.task = Future()
menumaker._quit_action = self.destroy
def destroy(self):
self.show(False)
self.task.set_result(True)
def _create_statusicon(self):
"""Return a new Gtk.StatusIcon."""
statusicon = Gtk.StatusIcon()
statusicon.set_from_gicon(self._icons.get_gicon('media'))
statusicon.set_tooltip_text(_("udiskie"))
return statusicon
@property
def visible(self):
"""Return visibility state of icon."""
return bool(self._conn_left)
def show(self, show=True):
"""Show or hide the tray icon."""
if show and not self.visible:
self._show()
if not show and self.visible:
self._hide()
def _show(self):
"""Show the tray icon."""
if not self._icon:
self._icon = self._create_statusicon()
widget = self._icon
widget.set_visible(True)
self._conn_left = widget.connect("activate", self._activate)
self._conn_right = widget.connect("popup-menu", self._popup_menu)
def _hide(self):
"""Hide the tray icon."""
self._icon.set_visible(False)
self._icon.disconnect(self._conn_left)
self._icon.disconnect(self._conn_right)
self._conn_left = None
self._conn_right = None
def create_context_menu(self, extended):
"""Create the context menu."""
menu = Gtk.Menu()
self._menu(menu, extended)
return menu
def _activate(self, icon):
"""Handle a left click event (show the menu)."""
self._popup_menu(icon, button=0, time=Gtk.get_current_event_time(),
extended=False)
def _popup_menu(self, icon, button, time, extended=True):
"""Handle a right click event (show the menu)."""
m = self.create_context_menu(extended)
m.show_all()
m.popup(parent_menu_shell=None,
parent_menu_item=None,
func=icon.position_menu,
data=icon,
button=button,
activate_time=time)
# need to store reference or menu will be destroyed before showing:
self._m = m
class UdiskieStatusIcon(DaemonBase):
"""
Manage a status icon.
When `smart` is on, the icon will automatically hide if there is no action
available and the menu will have no 'Quit' item.
"""
def __init__(self, icon, menumaker, smart=False):
self._icon = icon
self._menumaker = menumaker
self._mounter = menumaker._mounter
self._quit_action = menumaker._quit_action
self.smart = smart
self.active = False
self.events = {
'device_changed': self.update,
'device_added': self.update,
'device_removed': self.update,
}
def activate(self):
super().activate()
self.update()
def deactivate(self):
super().deactivate()
self._icon.show(False)
@property
def smart(self):
return getattr(self, '_smart', None)
@smart.setter
def smart(self, smart):
if smart == self.smart:
return
if smart:
self._menumaker._quit_action = None
else:
self._menumaker._quit_action = self._quit_action
self._smart = smart
self.update()
def has_menu(self):
"""Check if a menu action is available."""
return any(self._menumaker._prepare_menu(self._menumaker.detect()))
def update(self, *args):
"""Show/hide icon depending on whether there are devices."""
if self.smart:
self._icon.show(self.has_menu())
else:
self._icon.show(True)
| mit | 6,167,450,907,907,872,000 | 32.148594 | 78 | 0.558941 | false | 4.127 | false | false | false |
DavidAndreev/indico | indico/modules/events/timetable/legacy.py | 1 | 15015 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from collections import defaultdict
from flask import session
from sqlalchemy.orm import defaultload
from indico.modules.events.contributions.models.persons import AuthorType
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.date_time import iterdays
from indico.web.flask.util import url_for
from MaKaC.common.fossilize import fossilize
from MaKaC.fossils.conference import IConferenceEventInfoFossil
class TimetableSerializer(object):
def __init__(self, management=False):
self.management = management
def serialize_timetable(self, event, days=None, hide_weekends=False, strip_empty_days=False):
event.preload_all_acl_entries()
timetable = {}
for day in iterdays(event.start_dt_local, event.end_dt_local, skip_weekends=hide_weekends, day_whitelist=days):
date_str = day.strftime('%Y%m%d')
timetable[date_str] = {}
contributions_strategy = defaultload('contribution')
contributions_strategy.subqueryload('person_links')
contributions_strategy.subqueryload('references')
query_options = (contributions_strategy,
defaultload('session_block').subqueryload('person_links'))
query = (TimetableEntry.query.with_parent(event)
.options(*query_options)
.order_by(TimetableEntry.type != TimetableEntryType.SESSION_BLOCK))
for entry in query:
day = entry.start_dt.astimezone(event.tzinfo).date()
date_str = day.strftime('%Y%m%d')
if date_str not in timetable:
continue
if not entry.can_view(session.user):
continue
data = self.serialize_timetable_entry(entry, load_children=False)
key = self._get_entry_key(entry)
if entry.parent:
parent_code = 's{}'.format(entry.parent_id)
timetable[date_str][parent_code]['entries'][key] = data
else:
timetable[date_str][key] = data
if strip_empty_days:
timetable = self._strip_empty_days(timetable)
return timetable
def serialize_session_timetable(self, session_, without_blocks=False, strip_empty_days=False):
timetable = {}
for day in iterdays(session_.event_new.start_dt_local, session_.event_new.end_dt_local):
timetable[day.strftime('%Y%m%d')] = {}
for block in session_.blocks:
block_entry = block.timetable_entry
if not block_entry:
continue
date_key = block_entry.start_dt.astimezone(session_.event_new.tzinfo).strftime('%Y%m%d')
entries = block_entry.children if without_blocks else [block_entry]
for entry in entries:
if not entry.can_view(session.user):
continue
entry_key = self._get_entry_key(entry)
timetable[date_key][entry_key] = self.serialize_timetable_entry(entry, load_children=True)
if strip_empty_days:
timetable = self._strip_empty_days(timetable)
return timetable
@staticmethod
def _strip_empty_days(timetable):
"""Return the timetable without the leading and trailing empty days."""
days = sorted(timetable)
first_non_empty = next((day for day in days if timetable[day]), None)
if first_non_empty is None:
return {}
last_non_empty = next((day for day in reversed(days) if timetable[day]), first_non_empty)
return {day: timetable[day] for day in days if first_non_empty <= day <= last_non_empty}
def serialize_timetable_entry(self, entry, **kwargs):
if entry.type == TimetableEntryType.SESSION_BLOCK:
return self.serialize_session_block_entry(entry, kwargs.pop('load_children', True))
elif entry.type == TimetableEntryType.CONTRIBUTION:
return self.serialize_contribution_entry(entry)
elif entry.type == TimetableEntryType.BREAK:
return self.serialize_break_entry(entry)
else:
raise TypeError("Unknown timetable entry type.")
def serialize_session_block_entry(self, entry, load_children=True):
block = entry.session_block
data = {}
if not load_children:
entries = defaultdict(dict)
else:
entries = {self._get_entry_key(x): self.serialize_timetable_entry(x) for x in entry.children}
data.update(self._get_entry_data(entry))
data.update(self._get_color_data(block.session))
data.update(self._get_location_data(block))
data.update({'entryType': 'Session',
'sessionSlotId': block.id,
'sessionId': block.session_id,
'sessionCode': block.session.code,
'title': block.session.title,
'slotTitle': block.title,
'attachments': self._get_attachment_data(block.session),
'code': block.session.code,
'contribDuration': block.session.default_contribution_duration.seconds / 60,
'conveners': [self._get_person_data(x) for x in block.person_links],
'description': block.session.description,
'duration': block.duration.seconds / 60,
'isPoster': block.session.is_poster,
'entries': entries,
'pdf': url_for('sessions.export_session_timetable', block.session),
'url': url_for('sessions.display_session', block.session),
'friendlyId': block.session.friendly_id})
return data
def serialize_contribution_entry(self, entry):
from indico.modules.events.api import SerializerBase
block = entry.parent.session_block if entry.parent else None
contribution = entry.contribution
data = {}
data.update(self._get_entry_data(entry))
if contribution.session:
data.update(self._get_color_data(contribution.session))
data.update(self._get_location_data(contribution))
data.update({'entryType': 'Contribution',
'_type': 'ContribSchEntry',
'_fossil': 'contribSchEntryDisplay',
'contributionId': contribution.id,
'attachments': self._get_attachment_data(contribution),
'description': contribution.description,
'duration': contribution.duration.seconds / 60,
'pdf': url_for('contributions.export_pdf', entry.contribution),
'presenters': map(self._get_person_data,
sorted(contribution.person_links,
key=lambda x: (x.author_type != AuthorType.primary,
x.author_type != AuthorType.secondary))),
'sessionCode': block.session.code if block else None,
'sessionId': block.session_id if block else None,
'sessionSlotId': block.id if block else None,
'sessionSlotEntryId': entry.parent.id if entry.parent else None,
'title': contribution.title,
'url': url_for('contributions.display_contribution', contribution),
'friendlyId': contribution.friendly_id,
'references': map(SerializerBase.serialize_reference, contribution.references)})
return data
def serialize_break_entry(self, entry, management=False):
block = entry.parent.session_block if entry.parent else None
break_ = entry.break_
data = {}
data.update(self._get_entry_data(entry))
data.update(self._get_color_data(break_))
data.update(self._get_location_data(break_))
data.update({'entryType': 'Break',
'_type': 'BreakTimeSchEntry',
'_fossil': 'breakTimeSchEntry',
'description': break_.description,
'duration': break_.duration.seconds / 60,
'sessionId': block.session_id if block else None,
'sessionCode': block.session.code if block else None,
'sessionSlotId': block.id if block else None,
'sessionSlotEntryId': entry.parent.id if entry.parent else None,
'title': break_.title})
return data
def _get_attachment_data(self, obj):
def serialize_attachment(attachment):
return {'id': attachment.id,
'_type': 'Attachment',
'_fossil': 'attachment',
'title': attachment.title,
'download_url': attachment.download_url}
def serialize_folder(folder):
return {'id': folder.id,
'_type': 'AttachmentFolder',
'_fossil': 'folder',
'title': folder.title,
'attachments': map(serialize_attachment, folder.attachments)}
data = {'files': [], 'folders': []}
items = obj.attached_items
data['files'] = map(serialize_attachment, items.get('files', []))
data['folders'] = map(serialize_folder, items.get('folders', []))
if not data['files'] and not data['folders']:
data['files'] = None
return data
def _get_color_data(self, obj):
return {'color': '#' + obj.background_color,
'textColor': '#' + obj.text_color}
def _get_date_data(self, entry):
if self.management:
tzinfo = entry.event_new.tzinfo
else:
tzinfo = entry.event_new.display_tzinfo
return {'startDate': self._get_entry_date_dt(entry.start_dt, tzinfo),
'endDate': self._get_entry_date_dt(entry.end_dt, tzinfo)}
def _get_entry_data(self, entry):
from indico.modules.events.timetable.operations import can_swap_entry
data = {}
data.update(self._get_date_data(entry))
data['id'] = self._get_entry_key(entry)
data['uniqueId'] = data['id']
data['conferenceId'] = entry.event_id
if self.management:
data['isParallel'] = entry.is_parallel()
data['isParallelInSession'] = entry.is_parallel(in_session=True)
data['scheduleEntryId'] = entry.id
data['canSwapUp'] = can_swap_entry(entry, direction='up')
data['canSwapDown'] = can_swap_entry(entry, direction='down')
return data
def _get_entry_key(self, entry):
if entry.type == TimetableEntryType.SESSION_BLOCK:
return 's{}'.format(entry.id)
elif entry.type == TimetableEntryType.CONTRIBUTION:
return 'c{}'.format(entry.id)
elif entry.type == TimetableEntryType.BREAK:
return 'b{}'.format(entry.id)
else:
raise ValueError()
def _get_entry_date_dt(self, dt, tzinfo):
return {'date': dt.astimezone(tzinfo).strftime('%Y-%m-%d'),
'time': dt.astimezone(tzinfo).strftime('%H:%M:%S'),
'tz': str(tzinfo)}
def _get_location_data(self, obj):
data = {}
data['location'] = obj.venue_name
data['room'] = obj.room_name
data['inheritLoc'] = obj.inherit_location
data['inheritRoom'] = obj.inherit_location
if self.management:
data['address'] = obj.address
return data
def _get_person_data(self, person_link):
return {'firstName': person_link.first_name,
'familyName': person_link.last_name,
'affiliation': person_link.affiliation,
'email': person_link.person.email,
'name': person_link.get_full_name(last_name_first=False, last_name_upper=False,
abbrev_first_name=False, show_title=True),
'displayOrderKey': person_link.display_order_key}
def serialize_contribution(contribution):
return {'id': contribution.id,
'friendly_id': contribution.friendly_id,
'title': contribution.title}
def serialize_day_update(event, day, block=None, session_=None):
serializer = TimetableSerializer(management=True)
timetable = serializer.serialize_session_timetable(session_) if session_ else serializer.serialize_timetable(event)
block_id = serializer._get_entry_key(block) if block else None
day = day.strftime('%Y%m%d')
return {'day': day,
'entries': timetable[day] if not block else timetable[day][block_id]['entries'],
'slotEntry': serializer.serialize_session_block_entry(block) if block else None}
def serialize_entry_update(entry, with_timetable=False, session_=None):
serializer = TimetableSerializer(management=True)
day = entry.start_dt.astimezone(entry.event_new.tzinfo)
day_update = serialize_day_update(entry.event_new, day, block=entry.parent, session_=session_)
return dict({'id': serializer._get_entry_key(entry),
'entry': serializer.serialize_timetable_entry(entry),
'autoOps': None},
**day_update)
def serialize_event_info(event):
conf = event.as_legacy
event_info = fossilize(conf, IConferenceEventInfoFossil, tz=conf.tz)
event_info['isCFAEnabled'] = conf.getAbstractMgr().isActive()
event_info['sessions'] = {sess.id: serialize_session(sess) for sess in event.sessions}
return event_info
def serialize_session(sess):
"""Return data for a single session"""
data = {
'_type': 'Session',
'address': sess.address,
'color': '#' + sess.colors.background,
'description': sess.description,
'id': sess.id,
'isPoster': sess.is_poster,
'location': sess.venue_name,
'room': sess.room_name,
'roomFullname': sess.room_name,
'textColor': '#' + sess.colors.text,
'title': sess.title,
'url': url_for('sessions.display_session', sess)
}
return data
| gpl-3.0 | 6,942,691,855,398,678,000 | 45.775701 | 119 | 0.599734 | false | 4.112572 | false | false | false |
czayas/agenda | repl.py | 1 | 3955 | #!/usr/bin/env python3
"""
Módulo repl: Interfaz de usuario en modo consola (vista).
Proyecto de ejemplo - Paradigmas de la Programación
Autor: Carlos Zayas (czayas en gmail)
"""
import sys
from traceback import format_exc
from collections.abc import Iterable
try:
# El módulo readline agrega autocompletado e historial a input().
from readline import set_completer
from readline import parse_and_bind
except ImportError:
# El módulo readline no está disponible en Windows.
pass
def out(cadena="", final="\n"):
"""Envía una cadena a stdout y limpia el buffer (imprime más rápido)."""
sys.stdout.write(str(cadena) + final)
sys.stdout.flush()
def strip(cadena):
"""Retorna una cadena sin espacios a los lados en cada línea."""
return "\n".join(linea.strip()
for linea in cadena.split("\n") if linea).strip()
def esiterable(objeto):
"""Retorna True si el objeto es un iterador pero no es una cadena."""
return isinstance(objeto, Iterable) and not isinstance(objeto, str)
def iterable(objeto):
"""Retorna un iterador del objeto (una cadena no debe ser iterable)."""
return iter([objeto]) if not esiterable(objeto) else objeto
def salir(estado=0):
"""Finaliza la ejecución de la aplicación."""
out()
sys.exit(estado)
class Completador:
"""Completador para el módulo readline."""
def __init__(self, opciones):
"""Autocompletado con tabulación."""
self.opciones = sorted(opciones)
self.o = self.opciones[:] # Copia de self.opciones
def completar(self, texto, estado):
"""Event handler para completer de readline."""
if estado == 0:
if texto:
self.o = [o for o in self.opciones
if o and o.startswith(texto)]
else:
self.o = self.opciones[:]
return None if estado >= len(self.o) else self.o[estado] + " "
class REPL:
"""Ciclo de Lectura, Evaluación e Impresión (Read, Eval, Print, Loop)."""
def __init__(self, comandos, introduccion="¡Bienvenido!", indicador="> "):
"""
Constructor: Inicializa propiedades de instancia y completador.
comandos -- Diccionario de funciones a ejecutar (dict)
introduccion -- Texto introductorio (str)
indicador -- Inductor o 'prompt' (str)
"""
self.comandos = comandos
self.introduccion = introduccion
self.indicador = indicador
try:
# Asignación de método de autocompletado para el módulo readline.
set_completer(Completador(comandos.keys()).completar)
parse_and_bind('tab:complete')
except NameError:
# El módulo readline no está disponible en Windows.
pass
def ciclo(self):
"""Ejecuta el ciclo REPL."""
out(self.introduccion)
while True:
try:
comando, *parametros = input(self.indicador).split()
salida = self.comandos[comando](*parametros)
if salida:
for linea in iterable(salida):
out(linea)
except ValueError:
pass
except (KeyboardInterrupt, EOFError):
salir()
except KeyError:
out("{}: Comando desconocido.".format(comando))
except TypeError:
out(strip(self.comandos[comando].__doc__))
except Exception as excepcion:
out("Error inesperado:\n" +
str(type(excepcion)) + str(excepcion) + "\n" +
format_exc().strip())
def main():
"""Función principal (ejemplo de uso)."""
def hola():
return "Hola, Mundo."
comandos = {"eval": eval,
"hola": hola,
"quit": quit}
REPL(comandos).ciclo()
if __name__ == "__main__":
main()
| mit | -375,912,725,944,871,900 | 30.214286 | 78 | 0.586575 | false | 3.477454 | false | false | false |
DedMemez/ODS-August-2017 | catalog/CatalogBasketItem.py | 1 | 3029 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.catalog.CatalogBasketItem
from panda3d.core import Datagram
import CatalogItem
from toontown.estate import GardenGlobals
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
from toontown.toonbase import TTLocalizer
from direct.interval.IntervalGlobal import *
class CatalogBasketItem(CatalogItem.CatalogItem):
sequenceNumber = 0
def makeNewItem(self, maxBasket):
self.maxBasket = maxBasket
CatalogItem.CatalogItem.makeNewItem(self)
def getPurchaseLimit(self):
return 1
def reachedPurchaseLimit(self, avatar):
return avatar.getMaxFlowerBasket() >= self.maxBasket or self in avatar.onOrder or self in avatar.mailboxContents
def saveHistory(self):
return 1
def getTypeName(self):
return TTLocalizer.BasketTypeName
def getName(self):
return TTLocalizer.FlowerBasket % TTLocalizer.FlowerBasketNameDict[self.maxBasket]
def recordPurchase(self, avatar, optional):
if self.maxBasket <= avatar.getMaxFlowerBasket():
return ToontownGlobals.P_ItemUnneeded
avatar.b_setMaxFlowerBasket(self.maxBasket)
return ToontownGlobals.P_ItemAvailable
def isGift(self):
return 0
def getDeliveryTime(self):
return 1
def getPicture(self, avatar):
basket = loader.loadModel('phase_5.5/models/estate/flowerBasket')
basket.setScale(2.3)
basket.setPos(0, 0, 0.12)
frame = self.makeFrame()
basket.reparentTo(frame)
return (frame, None)
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogAcceptBasket
if retcode == ToontownGlobals.P_ItemUnneeded:
return TTLocalizer.CatalogAcceptBasketUnneeded
return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode)
def output(self, store = -1):
return 'CatalogBasketItem(%s%s)' % (self.maxBasket, self.formatOptionalData(store))
def compareTo(self, other):
return self.maxBasket - other.maxBasket
def getHashContents(self):
return self.maxBasket
def getBasePrice(self):
return GardenGlobals.BasketPriceDict[self.maxBasket]
def decodeDatagram(self, di, versionNumber, store):
CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store)
self.maxBasket = di.getUint8()
def encodeDatagram(self, dg, store):
CatalogItem.CatalogItem.encodeDatagram(self, dg, store)
dg.addUint8(self.maxBasket)
def nextAvailableBasket(avatar, duplicateItems):
basket = avatar.getMaxFlowerBasket()
if basket in GardenGlobals.NextBasket:
return CatalogBasketItem(GardenGlobals.NextBasket[basket])
def getAllBaskets():
return [ CatalogBasketItem(basket) for basket in GardenGlobals.NextBasket.values() ] | apache-2.0 | -6,348,996,229,263,607,000 | 32.83908 | 120 | 0.698911 | false | 3.684915 | false | false | false |
ajerneck/rand-art | scrape.py | 1 | 2246 | import requests
import bs4
import re
import random
URL = 'http://longform.org'
def parse_page(url):
page = requests.get(url)
soup = bs4.BeautifulSoup(page.text)
posts = soup.select('div.post')
## filter out posts whose second class element is not empty, because those are collections or sponsored posts.
posts = [p for p in posts if p.attrs.get('class')[1]=='']
return [parse_post(p) for p in posts]
def parse_post(raw):
post = {}
post['url'] = raw.select('div.content h2 a')[0].attrs.get('href')
post['title'] = raw.select('div.content h2')[0].text
return post
def parse_article(post):
try:
page = requests.get(post['url'])
soup = bs4.BeautifulSoup(page.text)
article = "".join([p.text for p in soup.select('p')])
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError) as e:
print "error({0}): {1}".format(e.errno, e.strerror)
print "error fetching: " + post['url']
article = ""
post['text'] = article
return post
def nr_of_pages(url):
p = requests.get(url)
s = bs4.BeautifulSoup(p.text)
return int(s.select('div.pagination a')[-2].text)
def scrape(url):
n = nr_of_pages(url)
## generate list of all urls.
urls = [''.join([URL, '/posts/?page=',str(i)]) for i in range(2, n)]
## add the first page, the url, to the list of urls.
urls.insert(0, urls)
## take a random sample.
## urls = random.sample(urls, 4)
## temporary urls.
urls = ['http://longform.org/posts/?page=153', 'http://longform.org/posts/?page=503', 'http://longform.org/posts/?page=31', 'http://longform.org/posts/?page=459']
## read articles
arts = []
for u in urls[2:3]:
print u
pages = parse_page(u)
print '-------'
for p in pages:
print p
a = parse_article(p)
print len(a['text'])
arts.append(a)
return arts
def main():
x = parse_page(URL)
print [p['url'] for p in x]
arts = [parse_article(p) for p in x[4:7]]
for a in arts:
print '\n\n----' + a['url'] + '----\n\n'
print(a['text'][0:400])
print("\n[...]\n")
print(a['text'][-400:])
# main()
| gpl-2.0 | 5,098,242,492,153,903,000 | 27.794872 | 166 | 0.57569 | false | 3.203994 | false | false | false |
tuskar/tuskar-ui | openstack_dashboard/dashboards/infrastructure/models.py | 1 | 1639 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# FIXME: configuration for dummy data
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Capacity(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
name = models.CharField(max_length=50)
value = models.PositiveIntegerField()
unit = models.CharField(max_length=10)
class Alert(models.Model):
class Meta:
db_table = 'infrastructure_alerts'
object_id = models.CharField(max_length=50)
object_type = models.CharField(max_length=20)
message = models.CharField(max_length=250)
time = models.DateTimeField()
class FlavorTemplate(models.Model):
class Meta:
db_table = 'infrastructure_flavortemplate'
name = models.CharField(max_length=50, unique=True)
capacities = generic.GenericRelation(Capacity)
| apache-2.0 | 960,798,619,425,038,600 | 33.145833 | 78 | 0.732764 | false | 4.007335 | false | false | false |
DalenWBrauner/FloridaDataOverlay | Website/Florida_Data_Overlay/Overlay/migrations/0003_auto__del_document__add_upload.py | 1 | 2765 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Document'
db.delete_table(u'Overlay_document')
# Adding model 'Upload'
db.create_table(u'Overlay_upload', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('upfile', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal(u'Overlay', ['Upload'])
def backwards(self, orm):
# Adding model 'Document'
db.create_table(u'Overlay_document', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('docfile', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal(u'Overlay', ['Document'])
# Deleting model 'Upload'
db.delete_table(u'Overlay_upload')
models = {
u'Overlay.births': {
'Meta': {'object_name': 'Births'},
'births': ('django.db.models.fields.IntegerField', [], {}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isRepeat': ('django.db.models.fields.BooleanField', [], {}),
'mothersAge': ('django.db.models.fields.IntegerField', [], {}),
'mothersEdu': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'Overlay.diseases': {
'Meta': {'object_name': 'Diseases'},
'count': ('django.db.models.fields.IntegerField', [], {}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rate': ('django.db.models.fields.FloatField', [], {}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'topic': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'Overlay.upload': {
'Meta': {'object_name': 'Upload'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'upfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['Overlay'] | mit | -7,312,602,605,645,666,000 | 42.904762 | 92 | 0.554069 | false | 3.686667 | false | false | false |
michaelb-01/pipe | scripts/python/createThumbnails_v004.py | 1 | 2986 | import os
import subprocess
import json
import shlex
import math
import nuke
def probeFile(file):
cmd = "ffprobe -v quiet -print_format json -show_streams"
# find video duration
# ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1
# find video frame rate (not working)
# ffprobe -v error -select_streams v:0 -show_entries stream=avg_frame_rate -of default=noprint_wrappers=1:nokey=1
args = shlex.split(cmd)
args.append(file)
res = subprocess.check_output(args).decode('utf-8')
res = json.loads(res)
return res
def createSprites(file,thumbWidth,maxFrames):
ffmpegPath = ''
platform = sys.platform
if platform == 'darwin':
ffmpegPath = '/usr/local/bin'
elif platform == 'win32':
ffmpegPath = 'S:/3D_globalSettings/pipe/ffmpeg/bin/'
else:
'Platform (' + platform + ') not recognised. Exiting'
if ffmpegPath not in os.environ["PATH"]:
print 'Adding ffmpeg to path'
os.environ["PATH"] += os.pathsep + ffmpegPath
data = probeFile(file)
# find duration
duration = data['streams'][0]['duration']
frameRate = data['streams'][0]['avg_frame_rate'].split('/')[0]
numFrames = int(float(duration) * float(frameRate))
mod = max(1,float(numFrames) / maxFrames)
print '\nVideo Data:'
print 'duration (seconds: ' + duration
print 'duration (frames): ' + str(numFrames)
print 'frame rate: ' + frameRate
i = 1
idx = 1
eqFilter = ''
numTiles = 0
while i < numFrames:
print 'Tile: ' + str(idx) + ", Frame: " + str(math.floor(i+0.5))
eqFilter += 'eq(n,' +str(int(math.floor(i+0.5))) + ')+'
numTiles += 1
idx += 1
i += mod
print 'Outputting ' + str(numTiles) + ' frames out of a maximum of ' + str(maxFrames) + ' frames'
print 'Outputting ~ every ' + str(mod) + ' frames'
eqFilter = eqFilter[0:-1] # remove last character which will be '+'
# OUTPUT FILE #
dir = os.path.dirname(file)
parts = os.path.splitext(os.path.basename(file))
outputFile = dir + '/' + parts[0] + '_sprites_' + str(numTiles*thumbWidth) + '.jpg'
# FILTERS #
filtersArr = [
"select='" + eqFilter + "'",
"scale=" + str(thumbWidth) + ":-1",
"tile=" + str(numTiles) + "x1"
]
filters = ",".join(filtersArr)
# -qscale:v controls the image quality. 2 is best quality, 31 is worst
subprocess.Popen([
'ffmpeg',
'-i', file, # inputs
'-vf', filters, # video filters
'-qscale:v', '4', # quality
'-vsync', 'vfr',
outputFile
])
return data
def getFilenames():
sel = nuke.selectedNodes()
if len(sel) < 1:
print 'No nodes selected'
return
n = sel[0]
file = n['file'].value()
# filename, thumbWidth, maxFrames
createSprites(file,320,30)
getFilenames()
| mit | 7,450,704,990,378,564,000 | 24.965217 | 117 | 0.581045 | false | 3.44406 | false | false | false |
mpuig/python-goose | goose/images/ImageUtils.py | 1 | 4157 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import os
import urllib2
from PIL import Image
from goose.images.ImageDetails import ImageDetails
from goose.images.ImageExtractor import LocallyStoredImage
class ImageUtils(object):
@classmethod
def getImageDimensions(self, filePath):
image = Image.open(filePath)
imageDetails = ImageDetails()
imageDetails.setMimeType(image.format)
width, height = image.size
imageDetails.setWidth(width)
imageDetails.setHeight(height)
return imageDetails
@classmethod
def storeImageToLocalFile(self, httpClient, linkhash, imageSrc, config):
"""\
Writes an image src http string to disk as a temporary file
and returns the LocallyStoredImage object
that has the info you should need on the image
"""
# check for a cache hit already on disk
image = self.readExistingFileInfo(linkhash, imageSrc, config)
if image:
return image
# no cache found download the image
data = self.fetchEntity(httpClient, imageSrc)
if data:
image = self.writeEntityContentsToDisk(data, linkhash, imageSrc, config)
if image:
return image
return None
@classmethod
def getFileExtensionName(self, imageDetails):
mimeType = imageDetails.getMimeType().lower()
mimes = {
'png':'.png',
'jpg':'.jpg',
'jpeg':'.jpg',
'gif':'.gif',
}
return mimes.get(mimeType, 'NA')
@classmethod
def readExistingFileInfo(self, linkhash, imageSrc, config):
localImageName = self.getLocalFileName(linkhash, imageSrc, config)
if os.path.isfile(localImageName):
imageDetails = self.getImageDimensions(localImageName)
fileExtension = self.getFileExtensionName(imageDetails)
bytes = os.path.getsize(localImageName)
return LocallyStoredImage(
imgSrc=imageSrc,
localFileName=localImageName,
linkhash=linkhash,
bytes=bytes,
fileExtension=fileExtension,
height=imageDetails.getHeight(),
width=imageDetails.getWidth()
)
return None
@classmethod
def writeEntityContentsToDisk(self, entity, linkhash, imageSrc, config):
localSrcPath = self.getLocalFileName(linkhash, imageSrc, config)
f = open(localSrcPath, 'w')
f.write(entity)
f.close()
return self.readExistingFileInfo(linkhash, imageSrc, config)
@classmethod
def getLocalFileName(self, linkhash, imageSrc, config):
imageHash = hashlib.md5(imageSrc).hexdigest()
return config.localStoragePath + "/" + linkhash + "_py_" + imageHash
@classmethod
def cleanImageSrcString(self, imgSrc):
return imgSrc.replace(" ", "%20")
@classmethod
def fetchEntity(self, httpClient, imageSrc):
try:
req = urllib2.Request(imageSrc)
f = urllib2.urlopen(req)
data = f.read()
return data
except:
return None
| apache-2.0 | -290,774,615,213,729,860 | 30.740458 | 84 | 0.638201 | false | 4.408271 | true | false | false |
tuborgclassic/carlsberg | settings.py | 1 | 2487 | from os.path import exists, abspath, dirname, join
import misc
THIS_DIR = dirname(abspath(__file__))
# this is a personal access token used by chaosbot to perform merges and other
# api requests. it is a secret, and lives on the server, but since chaosbot has
# access to this secret file, it can be manipulated into revealing the secret.
# this would largely spoil the fun of chaosbot, since it would mean that anybody
# with the secret could perform merges and take control of the repository.
# please play nice and please don't make chaosbot reveal this secret. and
# please reject PRs that attempt to reveal it :)
_pat_name = "/root/github_pat.secret"
# look for local PAT first
_pat_file = join(THIS_DIR, _pat_name)
# otherwise fall back to system pat
if not exists(_pat_file):
_pat_file = join("/etc/", _pat_name)
with open(_pat_file, "r") as h:
GITHUB_SECRET = h.read().strip()
# unique globally accessible name for the repo on github. typically looks like
# "chaosbot/chaos"
URN = misc.get_self_urn()
GITHUB_USER = URN.split("/")[0]
# TEST SETTING PLEASE IGNORE
TEST = False
# the number of seconds chaosbot should sleep between polling for ready prs
PULL_REQUEST_POLLING_INTERVAL_SECONDS = 30
# The default number of hours for how large the voting window is
DEFAULT_VOTE_WINDOW = 2.0
# The number of hours for how large the voting window is in the "after hours"
AFTER_HOURS_VOTE_WINDOW = 3.0
# The hour (in the server time zone) when the after hours start
AFTER_HOURS_START = 22
# The hour when the after hours end
AFTER_HOURS_END = 10
# how old do voters have to be for their vote to count?
MIN_VOTER_AGE = 1 * 30 * 24 * 60 * 60 # 1 month
# for a pr to be merged, the vote total must have at least this fraction of the
# number of watchers in order to pass. this is to prevent early manipulation of
# the project by requiring some basic consensus.
MIN_VOTE_WATCHERS = 0.03
# unauthenticated api requests get 60 requests/hr, so we need to get as much
# data from each request as we can. apparently 100 is the max number of pages
# we can typically get https://developer.github.com/v3/#pagination
DEFAULT_PAGINATION = 100
# the directory, relative to the project directory, where memoize cache files will
# be stored
MEMOIZE_CACHE_DIRNAME = "api_cache"
# used for calculating how long our voting window is
TIMEZONE = "EU/Copenhagen"
# PRs that have merge conflicts and haven't been touched in this many hours
# will be closed
PR_STALE_HOURS = 24
| mit | -5,332,340,821,084,454,000 | 34.028169 | 82 | 0.74226 | false | 3.392906 | false | false | false |
8u1a/plaso | plaso/parsers/skydrivelogerr.py | 1 | 8915 | # -*- coding: utf-8 -*-
"""This file contains SkyDrive error log file parser in plaso."""
import logging
import pyparsing
from plaso.events import time_events
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import manager
from plaso.parsers import text_parser
__author__ = 'Francesco Picasso ([email protected])'
class SkyDriveLogErrorEvent(time_events.TimestampEvent):
"""Convenience class for a SkyDrive error log line event."""
DATA_TYPE = u'skydrive:error:line'
def __init__(self, timestamp, module, source_code, text, detail):
"""Initializes the event object.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
module: The module name that generated the log line.
source_code: Logging source file and line number.
text: The error text message.
detail: The error details.
"""
super(SkyDriveLogErrorEvent, self).__init__(
timestamp, eventdata.EventTimestamp.ADDED_TIME)
self.module = module
self.source_code = source_code
self.text = text
self.detail = detail
class SkyDriveLogErrorParser(text_parser.PyparsingMultiLineTextParser):
"""Parse SkyDrive error log files."""
NAME = u'skydrive_log_error'
DESCRIPTION = u'Parser for OneDrive (or SkyDrive) error log files.'
_ENCODING = u'utf-8'
# Common SDE (SkyDriveError) structures.
INTEGER_CAST = text_parser.PyParseIntCast
HYPHEN = text_parser.PyparsingConstants.HYPHEN
TWO_DIGITS = text_parser.PyparsingConstants.TWO_DIGITS
TIME_MSEC = text_parser.PyparsingConstants.TIME_MSEC
MSEC = pyparsing.Word(pyparsing.nums, max=3).setParseAction(INTEGER_CAST)
COMMA = pyparsing.Literal(u',').suppress()
DOT = pyparsing.Literal(u'.').suppress()
IGNORE_FIELD = pyparsing.CharsNotIn(u',').suppress()
# Header line timestamp (2013-07-25-160323.291).
SDE_HEADER_TIMESTAMP = pyparsing.Group(
text_parser.PyparsingConstants.DATE.setResultsName(u'date') + HYPHEN +
TWO_DIGITS.setResultsName(u'hh') + TWO_DIGITS.setResultsName(u'mm') +
TWO_DIGITS.setResultsName(u'ss') + DOT +
MSEC.setResultsName(u'ms')).setResultsName(u'hdr_timestamp')
# Line timestamp (07-25-13,16:06:31.820).
SDE_TIMESTAMP = (
TWO_DIGITS.setResultsName(u'month') + HYPHEN +
TWO_DIGITS.setResultsName(u'day') + HYPHEN +
TWO_DIGITS.setResultsName(u'year_short') + COMMA +
TIME_MSEC.setResultsName(u'time')).setResultsName(u'timestamp')
# Header start.
SDE_HEADER_START = (
pyparsing.Literal(u'######').suppress() +
pyparsing.Literal(u'Logging started.').setResultsName(u'log_start'))
# Multiline entry end marker, matched from right to left.
SDE_ENTRY_END = pyparsing.StringEnd() | SDE_HEADER_START | SDE_TIMESTAMP
# SkyDriveError line pyparsing structure.
SDE_LINE = (
SDE_TIMESTAMP + COMMA +
IGNORE_FIELD + COMMA + IGNORE_FIELD + COMMA + IGNORE_FIELD + COMMA +
pyparsing.CharsNotIn(u',').setResultsName(u'module') + COMMA +
pyparsing.CharsNotIn(u',').setResultsName(u'source_code') + COMMA +
IGNORE_FIELD + COMMA + IGNORE_FIELD + COMMA + IGNORE_FIELD + COMMA +
pyparsing.Optional(pyparsing.CharsNotIn(u',').setResultsName(u'text')) +
COMMA + pyparsing.SkipTo(SDE_ENTRY_END).setResultsName(u'detail') +
pyparsing.lineEnd())
# SkyDriveError header pyparsing structure.
SDE_HEADER = (
SDE_HEADER_START +
pyparsing.Literal(u'Version=').setResultsName(u'ver_str') +
pyparsing.Word(pyparsing.nums + u'.').setResultsName(u'ver_num') +
pyparsing.Literal(u'StartSystemTime:').suppress() +
SDE_HEADER_TIMESTAMP +
pyparsing.Literal(u'StartLocalTime:').setResultsName(u'lt_str') +
pyparsing.SkipTo(pyparsing.lineEnd()).setResultsName(u'details') +
pyparsing.lineEnd())
# Define the available log line structures.
LINE_STRUCTURES = [
(u'logline', SDE_LINE),
(u'header', SDE_HEADER)
]
def __init__(self):
"""Initializes a parser object."""
super(SkyDriveLogErrorParser, self).__init__()
self.use_local_zone = False
def _GetTimestampFromHeader(self, structure):
"""Gets a timestamp from the structure.
The following is an example of the timestamp structure expected
[[2013, 7, 25], 16, 3, 23, 291]
Args:
structure: The parsed structure, which should be a timestamp.
Returns:
timestamp: A plaso timelib timestamp event or 0.
"""
year, month, day = structure.date
hour = structure.get(u'hh', 0)
minute = structure.get(u'mm', 0)
second = structure.get(u'ss', 0)
microsecond = structure.get(u'ms', 0) * 1000
return timelib.Timestamp.FromTimeParts(
year, month, day, hour, minute, second, microseconds=microsecond)
def _GetTimestampFromLine(self, structure):
"""Gets a timestamp from string from the structure
The following is an example of the timestamp structure expected
[7, 25, 13, [16, 3, 24], 649]
Args:
structure: The parsed structure.
Returns:
timestamp: A plaso timelib timestamp event or 0.
"""
hour, minute, second = structure.time[0]
microsecond = structure.time[1] * 1000
# TODO: Verify if timestamps are locale dependent.
year = structure.get(u'year_short', 0)
month = structure.get(u'month', 0)
day = structure.get(u'day', 0)
if year < 0 or not month or not day:
return 0
year += 2000
return timelib.Timestamp.FromTimeParts(
year, month, day, hour, minute, second, microseconds=microsecond)
def _ParseHeader(self, parser_mediator, structure):
"""Parse header lines and produce events.
[u'Logging started.', u'Version=', u'17.0.2011.0627',
[2013, 7, 25], 16, 3, 23, 291, u'StartLocalTime', u'<details>']
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
timestamp = self._GetTimestampFromHeader(structure.hdr_timestamp)
if not timestamp:
logging.debug(
u'SkyDriveLogError invalid timestamp {0:d}'.format(
structure.hdr_timestamp))
return
text = u'{0:s} {1:s} {2:s}'.format(
structure.log_start, structure.ver_str, structure.ver_num)
detail = u'{0:s} {1:s}'.format(structure.lt_str, structure.details)
event_object = SkyDriveLogErrorEvent(
timestamp, None, None, text, detail)
parser_mediator.ProduceEvent(event_object)
def _ParseLine(self, parser_mediator, structure):
"""Parse a logline and store appropriate attributes.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
timestamp = self._GetTimestampFromLine(structure.timestamp)
if not timestamp:
logging.debug(u'SkyDriveLogError invalid timestamp {0:s}'.format(
structure.timestamp))
return
# Replace newlines with spaces in structure.detail to preserve output.
detail = structure.detail.replace(u'\n', u' ')
event_object = SkyDriveLogErrorEvent(
timestamp, structure.module, structure.source_code, structure.text,
detail)
parser_mediator.ProduceEvent(event_object)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: An identification string indicating the name of the parsed
structure.
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
if key == u'logline':
self._ParseLine(parser_mediator, structure)
elif key == u'header':
self._ParseHeader(parser_mediator, structure)
else:
logging.warning(
u'Unable to parse record, unknown structure: {0:s}'.format(key))
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a SkyDrive Error log file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
line: A single line from the text file.
Returns:
True if this is the correct parser, False otherwise.
"""
try:
parsed_structure = self.SDE_HEADER.parseString(line)
except pyparsing.ParseException:
logging.debug(u'Not a SkyDrive Error log file')
return False
timestamp = self._GetTimestampFromHeader(parsed_structure.hdr_timestamp)
if not timestamp:
logging.debug(
u'Not a SkyDrive Error log file, invalid timestamp {0:s}'.format(
parsed_structure.timestamp))
return False
return True
manager.ParsersManager.RegisterParser(SkyDriveLogErrorParser)
| apache-2.0 | -6,098,716,662,991,127,000 | 34.803213 | 78 | 0.680538 | false | 3.682363 | false | false | false |
brianbeliveau/OligoMiner | probeTm.py | 1 | 7717 | #!/usr/bin/env python
# --------------------------------------------------------------------------
# OligoMiner
# probeTm.py
#
# (c) 2017 Molecular Systems Lab
#
# Wyss Institute for Biologically-Inspired Engineering
# Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------------
# Specific script name.
scriptName = 'probeTm'
# Specify script version.
Version = '1.7'
# Import module for handling input arguments.
import argparse
# Import Biopython mt module.
from Bio.SeqUtils import MeltingTemp as mt
# Import regex library.
import re
def probeTm(seq1, conc1, conc2, saltConc, formConc):
"""Calculates the Tm of a given sequence."""
tmval = float(('%0.2f' \
% mt.Tm_NN(seq1, Na=saltConc, dnac1=conc1, dnac2=conc2)))
fcorrected = ('%0.2f' % mt.chem_correction(tmval, fmd=formConc))
return fcorrected
def getTm(inputFile, saltConc, formConc, conc1, conc2, inputSeqVal, outNameVal):
"""Determines the melting temperatures of a given probe set."""
# Iterate through input file, if present, and calculate the Tm of all input
# sequences.
if inputFile is not None:
with open(inputFile, 'r') as f:
file_read = [line.strip() for line in f]
# Create list to hold output.
outList = []
# Iterate through probe file checking for predicted secondary structure.
for i in range(0, len(file_read), 1):
probeSeq = file_read[i].split('\t')[1]
# Skip any sequences containing 'N' bases as these cannot be
# processed.
if len(re.findall('N', probeSeq, re.I)) > 0:
print '\'N\' base(s) found in the sequence in row %d of the ' \
'input file...skipping this sequence' % i
# Calculate Tm of all sequences not containing 'N' bases, add to
# output list as new column.
else:
probeTmVal = probeTm(probeSeq, conc1, conc2, saltConc, formConc)
outList.append(file_read[i] + '\t' + probeTmVal)
# Determine the name of the output file.
if outNameVal is None:
# Determine the stem of the input filename.
fileName = inputFile.split('.')[0]
# Create standard output filename.
outName = '%s_tm' % fileName
else:
# Or use user-specified filename.
outName = outNameVal
# Create the output file.
output = open('%s.txt' % outName, 'w')
# Write the output file.
output.write('\n'.join(outList))
output.close()
# If no file is provided, get sequence from stdin or user input.
else:
# Take input sequence from stdin if -i is flagged.
if inputSeqVal is not None:
probeSeq = inputSeqVal
# Prompt user input if no input file is present and '-i' is not flagged.
else:
probeSeq = raw_input('Please input your sequence: ')
# Check input sequence for the presence of 'N' bases and alert
# user if any are found.
if len(re.findall('N', probeSeq, re.I)) > 0:
print '\'N\' base(s) found in the sequence ... Tm calculation ' \
'cannot be performed'
# Print Tm value of input sequence to terminal / stdout.
else:
print probeTm(probeSeq, conc1, conc2, saltConc, formConc)
def main():
"""Determines the melting temperatures of given sequences, provided either
as a commandline argument are through stdin."""
# Allow user to input parameters on command line.
userInput = argparse.ArgumentParser(description=\
'%s version %s. Requires a two column input file in the format: '
'sequence ID <tab> sequence. Returns a file in the format sequence ID '
'<tab> sequence <tab> sequence Tm. Will prompt user for input if no '
'input sequences are provided.' % (scriptName, Version))
userInput.add_argument('-f', '--file', action='store',
help='The file to containing the sequences that Tm '
'calculation will be performed on. Providing a '
'file will override the \'-i\' flag.')
userInput.add_argument('-s', '--salt', action='store', default=390,
type=int,
help='The mM Na+ concentration, default is 390')
userInput.add_argument('-F', '--formamide', action='store', default=50,
type=float,
help='The percent formamide being used, default is '
'50')
userInput.add_argument('-c', '--dnac1', action='store', default=25,
type=float,
help='Concentration of higher concentration strand '
'[nM] -typically the probe- to use for '
'thermodynamic calculations. Default is 25')
userInput.add_argument('-C', '--dnac2', action='store', default=25,
type=float,
help='Concentration of lower concentration strand '
'[nM] -typically the target- to use for '
'thermodynamic calculations. Default is 25')
userInput.add_argument('-i', '--inputSeq', action='store', default=None,
help='Use this to input a sequence directly on the '
'command line /stdin instead of providing an '
'in input file. User will be prompted for '
'input if no sequence is provided. Will print '
'result to terminal / stdout.')
userInput.add_argument('-o', '--output', action='store', default=None,
type=str,
help='Specify the name prefix of the output file')
# Import user-specified command line values.
args = userInput.parse_args()
inputFile = args.file
saltConc = args.salt
formConc = args.formamide
conc1 = args.dnac1
conc2 = args.dnac2
inputSeqVal = args.inputSeq
outNameVal = args.output
# Assign concentration variables based on magnitude.
if args.dnac1 >= args.dnac2:
conc1 = args.dnac1
conc2 = args.dnac2
else:
conc1 = args.dnac2
conc2 = args.dnac1
getTm(inputFile, saltConc, formConc, conc1, conc2, inputSeqVal, outNameVal)
if __name__ == '__main__':
main()
| mit | 4,938,888,727,236,498,000 | 41.401099 | 80 | 0.585979 | false | 4.221554 | false | false | false |
Zloool/manyfaced-honeypot | manyfaced/common/utils.py | 1 | 1349 | import time
import pickle
from socket import error as socket_error
from status import CLIENT_TIMEOUT
def dump_file(data):
try:
with file('temp.db') as f:
string_file = f.read()
db = pickle.loads(string_file)
except:
db = list()
db.append(data)
with open('temp.db', "w") as f:
f.write(str(pickle.dumps(db)))
def receive_timeout(the_socket, timeout=CLIENT_TIMEOUT):
# make socket non blocking
the_socket.setblocking(0)
# total data partwise in an array
total_data = []
# beginning time
begin = time.time()
while True:
# if you got some data, then break after timeout
if total_data and time.time() - begin > timeout:
break
# if you got no data at all, wait a little longer, twice the timeout
elif time.time() - begin > timeout * 2:
break
# recv something
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
# change the beginning time for measurement
begin = time.time()
else:
# sleep for sometime to indicate a gap
time.sleep(0.1)
except socket_error:
pass
# join all parts to make final string
return ''.join(total_data)
| mit | -9,115,461,342,038,738,000 | 24.942308 | 76 | 0.564863 | false | 4.112805 | false | false | false |
mitsuhiko/solace | solace/badges.py | 1 | 7311 | # -*- coding: utf-8 -*-
"""
solace.badges
~~~~~~~~~~~~~
This module implements the badge system.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from operator import attrgetter
from solace.i18n import lazy_gettext, _
from solace.utils.remoting import RemoteObject
def try_award(event, *args):
"""Tries to avard a badge for the given event. The events correspond
to the `on_X` callbacks on the badges, just without the `on_` prefix.
"""
lookup = attrgetter('on_' + event)
for badge in badge_list:
cb = lookup(badge)
if cb is None:
continue
user = cb(*args)
if user is not None:
if isinstance(user, tuple):
user, payload = user
else:
payload = None
if badge.single_awarded and badge in user.badges:
continue
user._badges.append(UserBadge(badge, payload))
# inactive or banned users don't get messages.
if user.is_active and not user.is_banned:
UserMessage(user, _(u'You earned the “%s” badge') % badge.name)
_numeric_levels = dict(zip(('bronce', 'silver', 'gold', 'platin'),
range(4)))
class Badge(RemoteObject):
"""Represents a badge.
It can react to the following events::
on_vote = lambda user, post, delta
on_accept = lambda user, post, answer
on_reply = lambda user, post
on_new_topic = lambda user, topic
on_edit = lambda user, post
"""
remote_object_type = 'solace.badge'
public_fields = ('level', 'identifier', 'name', 'description')
def __init__(self, level, identifier, name, description=None,
single_awarded=False,
on_vote=None, on_accept=None, on_reply=None,
on_new_topic=None, on_edit=None):
assert level in ('bronce', 'silver', 'gold', 'platin')
assert len(identifier) <= 30
self.level = level
self.identifier = identifier
self.name = name
self.single_awarded = single_awarded
self.description = description
self.on_vote = on_vote
self.on_accept = on_accept
self.on_reply = on_reply
self.on_new_topic = on_new_topic
self.on_edit = on_edit
@property
def numeric_level(self):
return _numeric_levels[self.level]
def get_url_values(self):
return 'badges.show_badge', {'identifier': self.identifier}
def __repr__(self):
return '<%s \'%s\' (%s)>' % (
type(self).__name__,
self.name.encode('utf-8'),
('bronce', 'silver', 'gold', 'platin')[self.numeric_level]
)
def _try_award_special_answer(post, badge, votes_required):
"""Helper for nice and good answer."""
pid = str(post.id)
user = post.author
for user_badge in user._badges:
if user_badge.badge == badge and \
user_badge.payload == pid:
return
if post.is_answer and post.votes >= votes_required:
return user, pid
def _try_award_self_learner(post):
"""Helper for the self learner badge."""
pid = str(post.id)
user = post.author
for user_badge in user._badges:
if user_badge.badge == SELF_LEARNER and \
user_badge.payload == pid:
return
if post.is_answer and post.author == post.topic.author \
and post.votes >= 3:
return user, pid
def _try_award_reversal(post):
"""Helper for the reversal badge."""
pid = str(post.id)
user = post.author
for user_badge in user._badges:
if user_badge.badge == REVERSAL and \
user_badge.payload == pid:
return
if post.is_answer and post.votes >= 20 and \
post.topic.votes <= -5:
return user, pid
CRITIC = Badge('bronce', 'critic', lazy_gettext(u'Critic'),
lazy_gettext(u'First down vote'),
single_awarded=True,
on_vote=lambda user, post, delta:
user if delta < 0 and user != post.author else None
)
SELF_CRITIC = Badge('silver', 'self-critic', lazy_gettext(u'Self-Critic'),
lazy_gettext(u'First downvote on own reply or question'),
single_awarded=True,
on_vote=lambda user, post, delta:
user if delta < 0 and user == post.author else None
)
EDITOR = Badge('bronce', 'editor', lazy_gettext(u'Editor'),
lazy_gettext(u'First edited post'),
single_awarded=True,
on_edit=lambda user, post: user
)
INQUIRER = Badge('bronce', 'inquirer', lazy_gettext(u'Inquirer'),
lazy_gettext(u'First asked question'),
single_awarded=True,
on_new_topic=lambda user, topic: user
)
TROUBLESHOOTER = Badge('silver', 'troubleshooter',
lazy_gettext(u'Troubleshooter'),
lazy_gettext(u'First answered question'),
single_awarded=True,
on_accept=lambda user, topic, post: post.author if post else None
)
NICE_ANSWER = Badge('bronce', 'nice-answer', lazy_gettext(u'Nice Answer'),
lazy_gettext(u'Answer was upvoted 10 times'),
on_accept=lambda user, topic, post: _try_award_special_answer(post,
NICE_ANSWER, 10) if post else None,
on_vote=lambda user, post, delta: _try_award_special_answer(post,
NICE_ANSWER, 10)
)
GOOD_ANSWER = Badge('silver', 'good-answer', lazy_gettext(u'Good Answer'),
lazy_gettext(u'Answer was upvoted 25 times'),
on_accept=lambda user, topic, post: _try_award_special_answer(post,
GOOD_ANSWER, 25) if post else None,
on_vote=lambda user, post, delta: _try_award_special_answer(post,
GOOD_ANSWER, 25)
)
GREAT_ANSWER = Badge('gold', 'great-answer', lazy_gettext(u'Great Answer'),
lazy_gettext(u'Answer was upvoted 75 times'),
on_accept=lambda user, topic, post: _try_award_special_answer(post,
GOOD_ANSWER, 75) if post else None,
on_vote=lambda user, post, delta: _try_award_special_answer(post,
GOOD_ANSWER, 75)
)
UNIQUE_ANSWER = Badge('platin', 'unique-answer', lazy_gettext(u'Unique Answer'),
lazy_gettext(u'Answer was upvoted 150 times'),
on_accept=lambda user, topic, post: _try_award_special_answer(post,
GOOD_ANSWER, 150) if post else None,
on_vote=lambda user, post, delta: _try_award_special_answer(post,
GOOD_ANSWER, 150)
)
REVERSAL = Badge('gold', 'reversal', lazy_gettext(u'Reversal'),
lazy_gettext(u'Provided answer of +20 score to a question of -5 score'),
on_accept=lambda user, topic, post: _try_award_reversal(post) if post else None,
on_vote=lambda user, post, delta: _try_award_reversal(post)
)
SELF_LEARNER = Badge('silver', 'self-learner', lazy_gettext(u'Self-Learner'),
lazy_gettext(u'Answered your own question with at least 4 upvotes'),
on_accept=lambda user, topic, post: _try_award_self_learner(post) if post else None,
on_vote=lambda user, post, delta: _try_award_self_learner(post)
)
#: list of all badges
badge_list = [CRITIC, EDITOR, INQUIRER, TROUBLESHOOTER, NICE_ANSWER,
GOOD_ANSWER, SELF_LEARNER, SELF_CRITIC, GREAT_ANSWER,
UNIQUE_ANSWER, REVERSAL]
#: all the badges by key
badges_by_id = dict((x.identifier, x) for x in badge_list)
# circular dependencies
from solace.models import UserBadge, UserMessage
| bsd-3-clause | -3,068,501,602,873,117,000 | 32.672811 | 88 | 0.626249 | false | 3.316841 | false | false | false |
bakkerjarr/ACLSwitch | Ryu_Application/controller.py | 1 | 11192 | # Copyright 2015 Jarrod N. Bakker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ryu and OpenFlow modules
from ryu.app.ofctl import api
from ryu.app.wsgi import WSGIApplication
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, HANDSHAKE_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.controller import dpset
# Application modules
from l2switch.l2switch import L2Switch
from aclswitch.aclswitch import ACLSwitch
__author__ = "Jarrod N. Bakker"
__status__ = "Development"
class Controller(dpset.DPSet):
"""Abstracts the details of the Ryu controller.
This class is used to provide applications with endpoints for
modifying OpenFlow switches. Multiple Ryu applications can be
instantiated from the controller class as a result.
"""
_CONTEXTS = {"wsgi": WSGIApplication}
_EVENT_OFP_SW_FEATURES = ofp_event.EventOFPSwitchFeatures.__name__
_EVENT_OFP_FLOW_REMOVED = ofp_event.EventOFPFlowRemoved.__name__
_EVENT_OFP_PACKET_IN = ofp_event.EventOFPPacketIn.__name__
_INSTANCE_NAME_CONTR = "ryu_controller_abstraction"
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self._apps = {}
self._handlers = {self._EVENT_OFP_SW_FEATURES: [],
self._EVENT_OFP_FLOW_REMOVED: [],
self._EVENT_OFP_PACKET_IN: []}
self._wsgi = kwargs['wsgi']
# Insert Ryu applications below
self._register_app(L2Switch(self))
self._register_app(ACLSwitch(self))
def get_ofpe_handlers(self):
"""Return the tuple of the OpenFlow protocol event handlers.
:return: A tuple.
"""
return self._handlers.keys()
def register_rest_wsgi(self, rest_wsgi, **kwargs):
"""Register a WSGI with Ryu.
:param rest_wsgi: The WSGI to register.
:return: True is successful, False otherwise.
"""
all_kwargs = kwargs["kwargs"].copy()
all_kwargs[self._INSTANCE_NAME_CONTR] = self
self._wsgi.register(rest_wsgi, all_kwargs)
return True
def _register_app(self, app_obj):
"""Register a Ryu app with the controller abstraction.
:param app_obj: Reference to the app's Python module.
"""
# Check that the Ryu app can be supported by the controller
app_name = app_obj.get_app_name()
if app_obj.is_supported() is True:
self.logger.info("Registering Ryu app: %s", app_name)
self._apps[app_name] = app_obj
else:
self.logger.error("Ryu app %s cannot be supported by the "
"controller.", app_name)
return
# Record what event handlers the Ryu app is listening for
app_handlers = app_obj.get_expected_handlers()
for handler in app_handlers:
self._handlers[handler].append(app_name)
# Methods that send data to OpenFlow switches
def add_flow(self, datapath, priority, match, inst, hard_timeout,
table_id, buffer_id=None, in_port=None, msg=None,
idle_timeout=0, packet_out=True, cookie=0):
"""Reactively add a flow table entry to a switch's flow table.
:param datapath: The switch to add the flow-table entry to.
:param priority: Priority of the flow-table entry.
:param match: What packet header fields should be matched.
:param inst: The behaviour that matching flows should follow.
:param hard_timeout: When the rule should expire.
:param table_id: What flow table the flow-table entry should
be sent to.
:param buffer_id: Identifier of buffer queue if traffic is
being buffered.
:param in_port: Ingress switch port.
:param msg: OpenFlow message.
:param idle_timeout: Idle time before the flow is removed.
:param packet_out: True if this is a packet_out, False otherwise.
:param cookie: Cookie for the message.
"""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath,
buffer_id=buffer_id,
hard_timeout=0,
idle_timeout=idle_timeout,
priority=priority, match=match,
flags=ofproto.OFPFF_SEND_FLOW_REM,
instructions=inst, table_id=table_id, cookie=cookie)
else:
mod = parser.OFPFlowMod(datapath=datapath,
hard_timeout=0,
idle_timeout=idle_timeout,
priority=priority, match=match,
flags=ofproto.OFPFF_SEND_FLOW_REM,
instructions=inst, table_id=table_id, cookie=cookie)
self._send_msg(datapath, mod)
if packet_out:
if msg:
out = None
if buffer_id and buffer_id != 0xffffffff:
out = parser.OFPPacketOut(
datapath=datapath,
actions=[parser.OFPActionOutput(ofproto.OFPP_TABLE)],
in_port=in_port,
buffer_id=buffer_id,
data=msg.data)
datapath.send_msg(out)
else:
out = parser.OFPPacketOut(
datapath=datapath,
actions=[parser.OFPActionOutput(ofproto.OFPP_TABLE)],
in_port=in_port,
buffer_id=0xffffffff,
data=msg.data)
datapath.send_msg(out)
def remove_flow(self, datapath, parser, table, remove_type, priority,
match, out_port, out_group, cookie=0, cookie_mask=0):
"""Remove a flow table entry from a switch.
The callee should decide of the removal type.
:param datapath: The switch to remove the flow from.
:param parser: Parser for the OpenFlow switch.
:param table: Table id to send the flow mod to.
:param remove_type: OFPFC_DELETE or OFPFC_DELETE_STRICT.
:param priority: Priority of the flow table entry.
:param match: What packet header fields should be matched.
:param out_port: Switch port to match.
:param out_group: Switch group to match.
"""
mod = parser.OFPFlowMod(datapath=datapath, table_id=table,
command=remove_type, priority=priority,
match=match, out_port=out_port,
out_group=out_group,
cookie=cookie, cookie_mask=cookie_mask)
datapath.send_msg(mod)
def packet_out(self, datapath, out):
"""Send a packet out message to a switch.
:param datapath: The switch to send the message to.
:param out: The packet out message.
"""
self._send_msg(datapath, out)
def _send_msg(self, datapath, msg):
"""Send a message to a switch such as an OFPPacketOut message.
:param datapath: The switch to send the message to.
:param msg: The message to send to switch specified in datapath.
"""
datapath.send_msg(msg)
# Misc.
def switch_get_datapath(self, datapath_id):
"""Return a datapath object given its datapath ID.
:param datapath_id: ID of a datapath i.e. switch ID.
:return: Datapath object.
"""
return api.get_datapath(self, datapath_id)
# OpenFlow switch event handlers
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def _switch_features_handler(self, event):
"""Catch and handle OpenFlow Protocol SwitchFeatures events.
:param event: The OpenFlow event.
"""
datapath_id = event.msg.datapath_id
datapath = event.msg.datapath
ofproto = event.msg.datapath.ofproto
parser = event.msg.datapath.ofproto_parser
self.logger.info("Switch \'{0}\' connected.".format(datapath_id))
mod = parser.OFPFlowMod(datapath=datapath, table_id=ofproto.OFPTT_ALL,
command=ofproto.OFPFC_DELETE, priority=0,
match=parser.OFPMatch(), out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
cookie=0, cookie_mask=0,
buffer_id=0xffffffff)
datapath.send_msg(mod)
self.logger.info("Switch \'{0}\' all tables cleared.".format(datapath_id)
)
for app in self._handlers[self._EVENT_OFP_SW_FEATURES]:
self._apps[app].switch_features(event)
@set_ev_cls(ofp_event.EventOFPFlowRemoved)
def _flow_removed_handler(self, event):
"""Catch and handle OpenFlow Protocol FlowRemoved events.
:param event: The OpenFlow event.
"""
msg = event.msg
match = msg.match
self.logger.info("Flow table entry removed.\n\t Flow match: "
"{0}".format(match))
self.logger.info("Cookie: %x", msg.cookie)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, event):
"""Catch and handle OpenFlow Protocol PacketIn events.
:param event: The OpenFlow event.
"""
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if event.msg.msg_len < event.msg.total_len:
self.logger.warning("Packet truncated: only {0} of {1} "
"bytes".format(event.msg.msg_len,
event.msg.total_len))
for app in self._handlers[self._EVENT_OFP_PACKET_IN]:
self._apps[app].packet_in(event)
@set_ev_cls(ofp_event.EventOFPErrorMsg, [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
msg = ev.msg
self.logger.warning('OFPErrorMsg received: type=0x%02x code=0x%02x '
'message=%s',
msg.type, msg.code, msg.data)
@set_ev_cls(ofp_event.EventOFPTableFeaturesStatsReply, MAIN_DISPATCHER)
def h(self, ev):
self.logger.info("TableFeaturesStats reply: {0}".format(ev.msg))
| apache-2.0 | 7,496,221,329,370,704,000 | 40.147059 | 103 | 0.586401 | false | 4.193331 | false | false | false |
jcrudy/grm | grm/binomial_earth_example.py | 1 | 1250 | import numpy
from grm import GeneralizedRegressor, BinomialLossFunction, LogitLink
import scipy.stats
from pyearth.earth import Earth
numpy.seterr(all='raise')
m = 1000
n = 10
p = 10
def earth_basis(X, vars, parents, knots, signs):
p = vars.shape[0]
B = numpy.empty(shape=(m,p+1))
B[:,0] = 1.0
for i in range(p):
knot = numpy.sort(X[:,vars[i]])[knots[i]]
B[:,i+1] = B[:,parents[i]] * numpy.maximum(signs[i]*(X[:,vars[i]] - knot), 0.0)
return B
numpy.random.seed(1)
X = numpy.random.normal(size=(m,n))
vars = numpy.argmax(numpy.random.multinomial(1, (1.0/float(n))*numpy.ones(n), p),1)
knots = numpy.random.randint(6, m-6, size=p)
parents = numpy.array([numpy.random.binomial(i, 1.0/float(p**2)) if i>0 else 0 for i in range(p)])
signs = numpy.random.binomial(1, .5, size=p)
B = earth_basis(X, vars, parents, knots, signs)
beta = numpy.random.uniform(-2.0,2.0,size=p+1)
eta = numpy.dot(B, beta)
model = GeneralizedRegressor(base_regressor=Earth(),
loss_function=BinomialLossFunction(LogitLink()))
n = numpy.random.randint(1, 10, size=m)
mu = 1.0 / (1.0 + numpy.exp(-eta))
y = numpy.random.binomial(n, mu)
model.fit(X, y, n=n)
assert scipy.stats.pearsonr(model.predict(X), eta) > .99
| gpl-3.0 | 93,117,187,002,019,150 | 34.714286 | 98 | 0.6512 | false | 2.587992 | false | false | false |
pcolmant/repanier | repanier/xlsx/xlsx_stock.py | 1 | 24914 | import repanier.apps
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from repanier.const import *
from repanier.models.offeritem import OfferItemReadOnly
from repanier.models.product import Product
from repanier.packages.openpyxl import load_workbook
from repanier.packages.openpyxl.style import Fill
from repanier.packages.openpyxl.styles import Color
from repanier.tools import update_offer_item, next_row
from repanier.xlsx.export_tools import *
from repanier.xlsx.import_tools import get_row, get_header
def export_permanence_stock(
permanence, deliveries_id=(), customer_price=False, wb=None, ws_customer_title=None
):
if wb is not None:
yellowFill = Fill()
yellowFill.start_color.index = "FFEEEE11"
yellowFill.end_color.index = "FFEEEE11"
yellowFill.fill_type = Fill.FILL_SOLID
header = [
(_("Id"), 5),
(_("OfferItem"), 5),
(_("Reference"), 20),
(_("Product"), 60),
(
_("Customer unit price")
if customer_price
else _("Producer unit price"),
10,
),
(_("Deposit"), 10),
(_("Asked"), 10),
(_("Quantity ordered"), 10),
(_("Initial stock"), 10),
(repanier.apps.REPANIER_SETTINGS_CURRENCY_DISPLAY, 15),
(_("Stock used"), 10),
(_("Additional"), 10),
(_("Remaining stock"), 10),
(repanier.apps.REPANIER_SETTINGS_CURRENCY_DISPLAY, 15),
]
offer_items = (
OfferItemReadOnly.objects.filter(
permanence_id=permanence.id,
manage_production=True,
)
.order_by("producer", "long_name_v2", "order_average_weight")
.select_related("producer", "department_for_customer")
.iterator()
)
offer_item = next_row(offer_items)
if offer_item is not None:
# Check if there are deliveries_ws
deliveries_ws = []
if len(deliveries_id) > 0:
for delivery_cpt, delivery_id in enumerate(deliveries_id):
ws_sc_name = format_worksheet_title(
"{}-{}".format(delivery_cpt, ws_customer_title)
)
for sheet in wb.worksheets:
if ws_sc_name == sheet.title:
deliveries_ws.append(ws_sc_name)
break
else:
ws_sc_name = format_worksheet_title(ws_customer_title)
for sheet in wb.worksheets:
if ws_sc_name == sheet.title:
deliveries_ws.append(ws_sc_name)
break
wb, ws = new_landscape_a4_sheet(wb, _("Stock check"), permanence, header)
formula_main_total_a = []
formula_main_total_b = []
show_column_reference = False
show_column_qty_ordered = False
show_column_add2stock = False
row_num = 1
while offer_item is not None:
producer_save = offer_item.producer
row_start_producer = row_num + 1
c = ws.cell(row=row_num, column=2)
c.value = "{}".format(producer_save.short_profile_name)
c.style.font.bold = True
c.style.font.italic = True
while (
offer_item is not None
and producer_save.id == offer_item.producer_id
):
department_for_customer_save__id = (
offer_item.department_for_customer_id
)
department_for_customer_save__short_name = (
offer_item.department_for_customer.short_name_v2
if offer_item.department_for_customer is not None
else None
)
while (
offer_item is not None
and producer_save.id == offer_item.producer_id
and department_for_customer_save__id
== offer_item.department_for_customer_id
):
if len(offer_item.reference) < 36:
if offer_item.reference.isdigit():
# Avoid display of exponent by Excel
offer_item_reference = "[{}]".format(
offer_item.reference
)
else:
offer_item_reference = offer_item.reference
show_column_reference = True
else:
offer_item_reference = EMPTY_STRING
if offer_item.order_unit < PRODUCT_ORDER_UNIT_DEPOSIT:
asked = offer_item.quantity_invoiced
stock = offer_item.stock
c = ws.cell(row=row_num, column=0)
c.value = offer_item.producer_id
c = ws.cell(row=row_num, column=1)
c.value = offer_item.id
c = ws.cell(row=row_num, column=2)
c.value = "{}".format(offer_item_reference)
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=3)
if department_for_customer_save__short_name is not None:
c.value = "{} - {}".format(
offer_item.get_long_name_with_customer_price(),
department_for_customer_save__short_name,
)
else:
c.value = "{}".format(offer_item.get_long_name_with_customer_price())
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.alignment.wrap_text = True
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=4)
unit_price = (
offer_item.customer_unit_price
if customer_price
else offer_item.producer_unit_price
)
c.value = unit_price.amount
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=5)
c.value = offer_item.unit_deposit.amount
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=6)
if ws_customer_title is None:
c.value = asked
else:
if len(deliveries_ws) > 0:
sum_value = "+".join(
"SUMIF('{}'!B:B,B{},'{}'!F:F)".format(
delivery_ws, row_num + 1, delivery_ws
)
for delivery_ws in deliveries_ws
)
c.value = "={}".format(sum_value)
else:
c.value = DECIMAL_ZERO
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=7)
c.value = "=G{}-K{}+L{}".format(
row_num + 1, row_num + 1, row_num + 1
)
if not show_column_qty_ordered:
show_column_qty_ordered = (
asked - min(asked, stock)
) > 0
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=8)
c.value = stock
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c.style.font.color = Color(Color.BLUE)
ws.conditional_formatting.addCellIs(
get_column_letter(9) + str(row_num + 1),
"notEqual",
[str(stock)],
True,
wb,
None,
None,
yellowFill,
)
c = ws.cell(row=row_num, column=9)
c.value = "=ROUND(I{}*(E{}+F{}),2)".format(
row_num + 1, row_num + 1, row_num + 1
)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=10)
c.value = "=MIN(G{},I{})".format(row_num + 1, row_num + 1)
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=12)
c.value = "=I{}-K{}+L{}".format(
row_num + 1, row_num + 1, row_num + 1
)
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c.style.font.bold = True
c = ws.cell(row=row_num, column=13)
c.value = "=ROUND(M{}*(E{}+F{}),2)".format(
row_num + 1, row_num + 1, row_num + 1
)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
row_num += 1
offer_item = next_row(offer_items)
row_num += 1
c = ws.cell(row=row_num, column=3)
c.value = "{} {}".format(
_("Total price"), producer_save.short_profile_name
)
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.font.bold = True
c.style.alignment.horizontal = c.style.alignment.HORIZONTAL_RIGHT
c = ws.cell(row=row_num, column=9)
formula = "SUM(J{}:J{})".format(row_start_producer, row_num)
c.value = "=" + formula
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
formula_main_total_a.append(formula)
c = ws.cell(row=row_num, column=13)
formula = "SUM(N{}:N{})".format(row_start_producer, row_num)
c.value = "=" + formula
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
formula_main_total_b.append(formula)
if offer_items is not None:
# Display a separator line between producers
row_num += 1
for col_num in range(16):
c = ws.cell(row=row_num, column=col_num)
c.style.borders.bottom.border_style = Border.BORDER_MEDIUMDASHED
row_num += 2
c = ws.cell(row=row_num, column=3)
c.value = "{}".format(_("Total price"))
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.font.bold = True
c.style.alignment.horizontal = c.style.alignment.HORIZONTAL_RIGHT
c = ws.cell(row=row_num, column=9)
c.value = "=" + "+".join(formula_main_total_a)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
c = ws.cell(row=row_num, column=13)
c.value = "=" + "+".join(formula_main_total_b)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
row_num += 1
for col_num in range(16):
c = ws.cell(row=row_num, column=col_num)
c.style.borders.bottom.border_style = Border.BORDER_MEDIUMDASHED
ws.column_dimensions[get_column_letter(1)].visible = False
ws.column_dimensions[get_column_letter(2)].visible = False
ws.column_dimensions[get_column_letter(11)].visible = False
if not show_column_reference:
ws.column_dimensions[get_column_letter(3)].visible = False
if not show_column_qty_ordered:
ws.column_dimensions[get_column_letter(8)].visible = False
if not show_column_add2stock:
ws.column_dimensions[get_column_letter(12)].visible = False
return wb
# @transaction.atomic
# def import_stock_sheet(worksheet, permanence=None):
# error = False
# error_msg = None
# if permanence.status < PERMANENCE_DONE:
# header = get_header(worksheet)
# if header:
# row_num = 1
# row = get_row(worksheet, header, row_num)
# while row and not error:
# try:
# # with transaction.atomic():
# stock = None if row[_('Initial stock')] is None else Decimal(row[_('Initial stock')]).quantize(THREE_DECIMALS)
# if stock is not None:
# producer_id = None if row[_('Id')] is None else Decimal(row[_('Id')])
# offer_item_id = None if row[_('OfferItem')] is None else Decimal(row[_('OfferItem')])
# offer_item = OfferItem.objects.filter(
# id=offer_item_id,
# permanence_id=permanence.id,
# producer_id=producer_id
# ).order_by('?').first()
# if offer_item is not None \
# and (offer_item.stock != stock):
# offer_item.stock = stock
# offer_item.save()
# Product.objects.filter(
# id=offer_item.product_id,
# producer_id=producer_id
# ).update(stock=stock)
# row_num += 1
# row = get_row(worksheet, header, row_num)
# except KeyError, e:
# # Missing field
# error = True
# error_msg = _("Row %(row_num)d : A required column is missing.") % {'row_num': row_num + 1}
# except Exception, e:
# error = True
# error_msg = _("Row %(row_num)d : %(error_msg)s.") % {'row_num': row_num + 1, 'error_msg': str(e)}
# else:
# error = True
# error_msg = _("The status of this permanence prohibit you to update the stock.")
# return error, error_msg
def export_producer_stock(producers, customer_price=False, wb=None):
yellowFill = Fill()
yellowFill.start_color.index = "FFEEEE11"
yellowFill.end_color.index = "FFEEEE11"
yellowFill.fill_type = Fill.FILL_SOLID
header = [
(_("Id"), 5),
(_("Producer"), 60),
(_("Reference"), 20),
(_("Product"), 60),
(_("Customer unit price") if customer_price else _("Producer unit price"), 10),
(_("Deposit"), 10),
(_("Maximum quantity"), 10),
(repanier.apps.REPANIER_SETTINGS_CURRENCY_DISPLAY, 15),
]
producers = producers.iterator()
producer = next_row(producers)
wb, ws = new_landscape_a4_sheet(wb, _("Maximum quantity"), _("Maximum quantity"), header)
show_column_reference = False
row_num = 1
while producer is not None:
products = (
Product.objects.filter(
producer_id=producer.id,
is_active=True,
)
.order_by("long_name_v2", "order_average_weight")
.select_related("producer", "department_for_customer")
.iterator()
)
product = next_row(products)
while product is not None:
if product.order_unit < PRODUCT_ORDER_UNIT_DEPOSIT:
c = ws.cell(row=row_num, column=0)
c.value = product.id
c = ws.cell(row=row_num, column=1)
c.value = "{}".format(product.producer)
if len(product.reference) < 36:
if product.reference.isdigit():
# Avoid display of exponent by Excel
product_reference = "[{}]".format(product.reference)
else:
product_reference = product.reference
show_column_reference = True
else:
product_reference = EMPTY_STRING
c = ws.cell(row=row_num, column=2)
c.value = "{}".format(product_reference)
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=3)
if product.department_for_customer is not None:
c.value = "{} - {}".format(
product.department_for_customer.short_name_v2,
product.get_long_name_with_customer_price(),
)
else:
c.value = product.get_long_name_with_customer_price()
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.alignment.wrap_text = True
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=4)
unit_price = (
product.customer_unit_price
if customer_price
else product.producer_unit_price
)
c.value = unit_price.amount
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=5)
c.value = product.unit_deposit.amount
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=6)
c.value = product.stock
c.style.number_format.format_code = (
'_ * #,##0.00_ ;_ * -#,##0.00_ ;_ * "-"??_ ;_ @_ '
)
c.style.font.color = Color(Color.BLUE)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=7)
c.value = "=ROUND((E{}+F{})*G{},2)".format(
row_num + 1, row_num + 1, row_num + 1
)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
ws.conditional_formatting.addCellIs(
get_column_letter(8) + str(row_num + 1),
"notEqual",
[
str(
(
(unit_price.amount + product.unit_deposit.amount)
* product.stock
).quantize(TWO_DECIMALS)
)
],
True,
wb,
None,
None,
yellowFill,
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
row_num += 1
product = next_row(products)
row_num += 1
c = ws.cell(row=row_num, column=4)
c.value = "{}".format(_("Total"))
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.font.bold = True
c.style.alignment.horizontal = c.style.alignment.HORIZONTAL_RIGHT
c = ws.cell(row=row_num, column=7)
formula = "SUM(H{}:H{})".format(2, row_num)
c.value = "=" + formula
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
ws.column_dimensions[get_column_letter(1)].visible = False
if not show_column_reference:
ws.column_dimensions[get_column_letter(3)].visible = False
producer = next_row(producers)
return wb
@transaction.atomic
def import_producer_stock(worksheet):
error = False
error_msg = None
header = get_header(worksheet)
if header:
row_num = 1
row = get_row(worksheet, header, row_num)
while row and not error:
try:
# with transaction.atomic():
product_id = None if row[_("Id")] is None else Decimal(row[_("Id")])
if product_id is not None:
stock = (
DECIMAL_ZERO
if row[_("Maximum quantity")] is None
else Decimal(row[_("Maximum quantity")]).quantize(THREE_DECIMALS)
)
stock = stock if stock >= DECIMAL_ZERO else DECIMAL_ZERO
Product.objects.filter(id=product_id).update(stock=stock)
update_offer_item(product_id=product_id)
row_num += 1
row = get_row(worksheet, header, row_num)
except KeyError as e:
# Missing field
error = True
error_msg = _("Row %(row_num)d : A required column is missing.") % {
"row_num": row_num + 1
}
except Exception as e:
error = True
error_msg = _("Row %(row_num)d : %(error_msg)s.") % {
"row_num": row_num + 1,
"error_msg": str(e),
}
return error, error_msg
def handle_uploaded_stock(request, producers, file_to_import, *args):
error = False
error_msg = None
wb = load_workbook(file_to_import)
if wb is not None:
ws = wb.get_sheet_by_name(format_worksheet_title(_("Maximum quantity")))
if ws is not None:
error, error_msg = import_producer_stock(ws, producers=producers)
if error:
error_msg = format_worksheet_title(_("Maximum quantity")) + " > " + error_msg
return error, error_msg
| gpl-3.0 | -446,838,558,719,094,400 | 46.911538 | 132 | 0.457333 | false | 4.294777 | false | false | false |
1and1/confluencer | src/confluencer/tools/content.py | 1 | 10540 | # -*- coding: utf-8 -*-
# pylint: disable=bad-continuation
""" Tools to discover and modify content.
"""
# Copyright © 2015 1&1 Group <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
import re
import difflib
try:
import html.entities as htmlentitydefs
except ImportError: # Python 2
import htmlentitydefs # pylint: disable=import-error,wrong-import-order
from xml.sax.saxutils import quoteattr # pylint: disable=wrong-import-order
import arrow
from munch import munchify as bunchify
from lxml.etree import fromstring, HTMLParser, XMLParser, XMLSyntaxError # pylint: disable=no-name-in-module
from rudiments.reamed import click
from .._compat import BytesIO
# Mapping of CLI content format names to Confluence API names
CLI_CONTENT_FORMATS = dict(view='view', editor='editor', storage='storage', export='export_view', anon='anonymous_export_view')
# Simple replacement rules, order is important!
TIDY_REGEX_RULES = ((_name, re.compile(_rule), _subst) for _name, _rule, _subst in [
("FosWiki: Remove CSS class from section title",
r'<(h[1-5]) class="[^"]*">', r'<\1>'),
("FosWiki: Remove static section numbering",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)[0-9.]+?\s*(?=<span class="tok"> </span>)', r'\1'),
("FosWiki: Empty anchor in headers",
r'(?<=<h.>)<a></a>\s* +', ''),
("FosWiki: 'tok' spans in front of headers",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s*<span class="tok"> </span>', r'\1'),
("FosWiki: Section edit icons at the end of headers",
r'\s*<a(?: class="[^"]*")? href="[^"]+"(?: title="[^"]*")?>'
r'<ac:image [^>]+><ri:url ri:value="[^"]+/EditChapterPlugin/pencil.png" ?/>'
r'</ac:image></a>(?=</span></h)', ''),
("FosWiki: 'Edit Chapter Plugin' spans (old)",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s*<span class="ecpHeading">'
r'\s*([^<]+)(?:<br\s*/>)</span>\s*(?=</h.>)', r'\1\2'),
("FosWiki: 'Edit Chapter Plugin' spans (new)",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s*<span class="ecpHeading">'
r'\s*([^<]+)(?:<br\s*/>)<a class="ecpEdit".+?</a></span>\s*(?=</h.>)', r'\1\2'),
("FosWiki: Residual leading whitespace in headers",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s* +', r'\1'),
("FosWiki: Replace TOC div with macro",
r'(<a name="foswikiTOC" ?/>)?<div class="foswikiToc">.*?</div>', '''
<ac:structured-macro ac:name="panel" ac:schema-version="1">
<ac:parameter ac:name="title">Contents</ac:parameter>
<ac:rich-text-body>
<p>
<ac:structured-macro ac:name="toc" ac:schema-version="1"/>
</p>
</ac:rich-text-body>
</ac:structured-macro>'''),
("FosWiki: Replace TOC in a Twisty with Expand+TOC macro",
r'<div class="twistyPlugin">.+?<big><strong>Table of Contents</strong></big></span></a></span></div>', '''
<ac:structured-macro ac:name="expand" ac:schema-version="1">
<ac:parameter ac:name="title">Table of Contents</ac:parameter>
<ac:rich-text-body>
<p>
<ac:structured-macro ac:name="toc" ac:schema-version="1"/>
</p>
</ac:rich-text-body>
</ac:structured-macro>'''),
("FosWiki: Named anchors (#WikiWords)",
r'(<a name=[^>]+></a><a href=")http[^#]+(#[^"]+" style="[^"]+)(" title="[^"]+"><big>[^<]+</big></a>)',
r'\1\2; float: right;\3'),
("FosWiki: Wrap HTML '<pre>' into 'panel' macro",
r'(?<!<ac:rich-text-body>)(<pre(?: class="[^"]*")?>)',
r'<ac:structured-macro ac:name="panel" ac:schema-version="1">'
r'<ac:parameter ac:name="bgColor">#eeeeee</ac:parameter>'
r'<ac:rich-text-body>'
r'\1'),
("FosWiki: Wrap HTML '</pre>' into 'panel' macro",
r'</pre>(?!</ac:rich-text-body>)', '</pre></ac:rich-text-body></ac:structured-macro>'),
("FosWiki: Embedded CSS - custom list indent",
r'<ul style="margin-left: [.0-9]+em;">', '<ul>'),
("FosWiki: Empty paragraphs",
r'<p> </p>', r''),
("FosWiki: Obsolete CSS classes",
r'(<(?:div|p|span|h[1-5])) class="(foswikiTopic)"', r'\1'),
])
def _apply_tidy_regex_rules(body, log=None):
"""Return tidied body after applying regex rules."""
body = body.replace(u'\u00A0', ' ')
for name, rule, subst in TIDY_REGEX_RULES:
length = len(body)
try:
body, count = rule.subn(subst, body)
except re.error as cause:
raise click.LoggedFailure('Error "{}" in "{}" replacement: {} => {}'.format(
cause, name, rule.pattern, subst,
))
if count and log:
length -= len(body)
log.info('Replaced %d matche(s) of "%s" (%d chars %s)',
count, name, abs(length), "added" if length < 0 else "removed")
return body
def _make_etree(body, content_format='storage', attrs=None):
"""Create an ElementTree from a page's body."""
attrs = (attrs or {}).copy()
attrs.update({
'xmlns:ac': 'http://www.atlassian.com/schema/confluence/4/ac/',
'xmlns:ri': 'http://www.atlassian.com/schema/confluence/4/ri/',
})
xml_body = re.sub(r'&(?!(amp|lt|gt|quot|apos))([a-zA-Z0-9]+);',
lambda cref: '&#{};'.format(htmlentitydefs.name2codepoint[cref.group(2)]), body)
#print(body.encode('utf8'))
xmldoc = u'<{root} {attrs}>{body}</{root}>'.format(
root=content_format,
attrs=' '.join('{}={}'.format(k, quoteattr(v)) for k, v in sorted(attrs.items())),
body=xml_body)
parser = (XMLParser if content_format == 'storage' else HTMLParser)(remove_blank_text=True)
try:
return fromstring(xmldoc, parser)
except XMLSyntaxError as cause:
raise click.LoggedFailure('{}\n{}'.format(
cause, '\n'.join(['{:7d} {}'.format(i+1, k) for i, k in enumerate(xmldoc.splitlines())])
))
def _pretty_xml(body, content_format='storage', attrs=None):
"""Pretty-print the given page body and return a list of lines."""
root = _make_etree(body, content_format=content_format, attrs=attrs)
prettyfied = BytesIO()
root.getroottree().write(prettyfied, encoding='utf8', pretty_print=True, xml_declaration=False)
return prettyfied.getvalue().decode('utf8').splitlines()
class ConfluencePage(object):
"""A page that holds enough state so it can be modified."""
DIFF_COLS = {
'+': 'green',
'-': 'red',
'@': 'yellow',
}
def __init__(self, cf, url, markup='storage', expand=None):
""" Load the given page.
"""
if expand and isinstance(expand, str):
expand = expand.split(',')
expand = set(expand or []) | {'space', 'version', 'body.' + markup}
self.cf = cf
self.url = url
self.markup = markup
self._data = cf.get(self.url, expand=','.join(expand))
self.body = self._data.body[self.markup].value
@property
def page_id(self):
"""The numeric page ID."""
return self._data.id
@property
def space_key(self):
"""The space this page belongs to."""
return self._data.space.key
@property
def title(self):
"""The page's title."""
return self._data.title
@property
def json(self):
"""The full JSON response data."""
return self._data
@property
def version(self):
"""The page's version number in history."""
return self._data.version.number
def etree(self):
"""Parse the page's body into an ElementTree."""
attrs = {
'id': 'page-' + self._data.id,
'href': self._data._links.base + (self._data._links.tinyui or ''),
'status': self._data.status,
'title': self._data.title,
}
return _make_etree(self.body, content_format=self.markup, attrs=attrs)
def tidy(self, log=None):
"""Return a tidy copy of this page's body."""
assert self.markup == 'storage', "Can only clean up pages in storage format!"
return _apply_tidy_regex_rules(self.body, log=log)
def update(self, body=None, minor=True):
"""Update a page's content."""
assert self.markup == 'storage', "Cannot update non-storage page markup!"
if body is None:
body = self.body
if body == self._data.body[self.markup].value:
return # No changes
data = {
#'id': self._data.id,
'type': 'page',
'space': {'key': self.space_key},
'title': self.title,
'version': dict(number=self.version + 1, minorEdit=minor),
'body': {
'storage': {
'value': body,
'representation': self.markup,
}
},
'expand': 'version',
}
response = self.cf.session.put(self._data._links.self, json=data)
response.raise_for_status()
##page = response.json(); print(page)
result = bunchify(response.json())
self._data.body[self.markup].value = body
self._data.version = result.version
return result
def dump_diff(self, changed):
"""Dump a diff to terminal between changed and stored body."""
if self.body == changed:
click.secho('=== No changes to "{0}"'.format(self.title), fg='green')
return
diff = difflib.unified_diff(
_pretty_xml(self.body, self.markup),
_pretty_xml(changed, self.markup),
u'v. {0} of "{1}"'.format(self.version, self.title),
u'v. {0} of "{1}"'.format(self.version + 1, self.title),
arrow.get(self._data.version.when).replace(microsecond=0).isoformat(sep=' '),
arrow.now().replace(microsecond=0).isoformat(sep=' '),
lineterm='', n=2)
for line in diff:
click.secho(line, fg=self.DIFF_COLS.get(line and line[0], None))
| apache-2.0 | -8,970,102,084,358,917,000 | 39.69112 | 127 | 0.567226 | false | 3.441868 | false | false | false |
kaushik94/sympy | sympy/utilities/autowrap.py | 3 | 40995 | """Module for compiling codegen output, and wrap the binary for use in
python.
.. note:: To use the autowrap module it must first be imported
>>> from sympy.utilities.autowrap import autowrap
This module provides a common interface for different external backends, such
as f2py, fwrap, Cython, SWIG(?) etc. (Currently only f2py and Cython are
implemented) The goal is to provide access to compiled binaries of acceptable
performance with a one-button user interface, i.e.
>>> from sympy.abc import x,y
>>> expr = ((x - y)**(25)).expand()
>>> binary_callable = autowrap(expr)
>>> binary_callable(1, 2)
-1.0
The callable returned from autowrap() is a binary python function, not a
SymPy object. If it is desired to use the compiled function in symbolic
expressions, it is better to use binary_function() which returns a SymPy
Function object. The binary callable is attached as the _imp_ attribute and
invoked when a numerical evaluation is requested with evalf(), or with
lambdify().
>>> from sympy.utilities.autowrap import binary_function
>>> f = binary_function('f', expr)
>>> 2*f(x, y) + y
y + 2*f(x, y)
>>> (2*f(x, y) + y).evalf(2, subs={x: 1, y:2})
0.e-110
The idea is that a SymPy user will primarily be interested in working with
mathematical expressions, and should not have to learn details about wrapping
tools in order to evaluate expressions numerically, even if they are
computationally expensive.
When is this useful?
1) For computations on large arrays, Python iterations may be too slow,
and depending on the mathematical expression, it may be difficult to
exploit the advanced index operations provided by NumPy.
2) For *really* long expressions that will be called repeatedly, the
compiled binary should be significantly faster than SymPy's .evalf()
3) If you are generating code with the codegen utility in order to use
it in another project, the automatic python wrappers let you test the
binaries immediately from within SymPy.
4) To create customized ufuncs for use with numpy arrays.
See *ufuncify*.
When is this module NOT the best approach?
1) If you are really concerned about speed or memory optimizations,
you will probably get better results by working directly with the
wrapper tools and the low level code. However, the files generated
by this utility may provide a useful starting point and reference
code. Temporary files will be left intact if you supply the keyword
tempdir="path/to/files/".
2) If the array computation can be handled easily by numpy, and you
don't need the binaries for another project.
"""
from __future__ import print_function, division
import sys
import os
import shutil
import tempfile
from subprocess import STDOUT, CalledProcessError, check_output
from string import Template
from warnings import warn
from sympy.core.cache import cacheit
from sympy.core.compatibility import range, iterable
from sympy.core.function import Lambda
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy, Symbol
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.utilities.codegen import (make_routine, get_code_generator,
OutputArgument, InOutArgument,
InputArgument, CodeGenArgumentListError,
Result, ResultBase, C99CodeGen)
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.decorator import doctest_depends_on
_doctest_depends_on = {'exe': ('f2py', 'gfortran', 'gcc'),
'modules': ('numpy',)}
class CodeWrapError(Exception):
pass
class CodeWrapper(object):
"""Base Class for code wrappers"""
_filename = "wrapped_code"
_module_basename = "wrapper_module"
_module_counter = 0
@property
def filename(self):
return "%s_%s" % (self._filename, CodeWrapper._module_counter)
@property
def module_name(self):
return "%s_%s" % (self._module_basename, CodeWrapper._module_counter)
def __init__(self, generator, filepath=None, flags=[], verbose=False):
"""
generator -- the code generator to use
"""
self.generator = generator
self.filepath = filepath
self.flags = flags
self.quiet = not verbose
@property
def include_header(self):
return bool(self.filepath)
@property
def include_empty(self):
return bool(self.filepath)
def _generate_code(self, main_routine, routines):
routines.append(main_routine)
self.generator.write(
routines, self.filename, True, self.include_header,
self.include_empty)
def wrap_code(self, routine, helpers=None):
helpers = helpers or []
if self.filepath:
workdir = os.path.abspath(self.filepath)
else:
workdir = tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routine, helpers)
self._prepare_files(routine)
self._process_files(routine)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, routine.name)
def _process_files(self, routine):
command = self.command
command.extend(self.flags)
try:
retoutput = check_output(command, stderr=STDOUT)
except CalledProcessError as e:
raise CodeWrapError(
"Error while executing command: %s. Command output is:\n%s" % (
" ".join(command), e.output.decode('utf-8')))
if not self.quiet:
print(retoutput)
class DummyWrapper(CodeWrapper):
"""Class used for testing independent of backends """
template = """# dummy module for testing of SymPy
def %(name)s():
return "%(expr)s"
%(name)s.args = "%(args)s"
%(name)s.returns = "%(retvals)s"
"""
def _prepare_files(self, routine):
return
def _generate_code(self, routine, helpers):
with open('%s.py' % self.module_name, 'w') as f:
printed = ", ".join(
[str(res.expr) for res in routine.result_variables])
# convert OutputArguments to return value like f2py
args = filter(lambda x: not isinstance(
x, OutputArgument), routine.arguments)
retvals = []
for val in routine.result_variables:
if isinstance(val, Result):
retvals.append('nameless')
else:
retvals.append(val.result_var)
print(DummyWrapper.template % {
'name': routine.name,
'expr': printed,
'args': ", ".join([str(a.name) for a in args]),
'retvals': ", ".join([str(val) for val in retvals])
}, end="", file=f)
def _process_files(self, routine):
return
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
class CythonCodeWrapper(CodeWrapper):
"""Wrapper that uses Cython"""
setup_template = """\
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
cy_opts = {cythonize_options}
{np_import}
ext_mods = [Extension(
{ext_args},
include_dirs={include_dirs},
library_dirs={library_dirs},
libraries={libraries},
extra_compile_args={extra_compile_args},
extra_link_args={extra_link_args}
)]
setup(ext_modules=cythonize(ext_mods, **cy_opts))
"""
pyx_imports = (
"import numpy as np\n"
"cimport numpy as np\n\n")
pyx_header = (
"cdef extern from '{header_file}.h':\n"
" {prototype}\n\n")
pyx_func = (
"def {name}_c({arg_string}):\n"
"\n"
"{declarations}"
"{body}")
std_compile_flag = '-std=c99'
def __init__(self, *args, **kwargs):
"""Instantiates a Cython code wrapper.
The following optional parameters get passed to ``distutils.Extension``
for building the Python extension module. Read its documentation to
learn more.
Parameters
==========
include_dirs : [list of strings]
A list of directories to search for C/C++ header files (in Unix
form for portability).
library_dirs : [list of strings]
A list of directories to search for C/C++ libraries at link time.
libraries : [list of strings]
A list of library names (not filenames or paths) to link against.
extra_compile_args : [list of strings]
Any extra platform- and compiler-specific information to use when
compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could be
anything. Note that the attribute ``std_compile_flag`` will be
appended to this list.
extra_link_args : [list of strings]
Any extra platform- and compiler-specific information to use when
linking object files together to create the extension (or to create
a new static Python interpreter). Similar interpretation as for
'extra_compile_args'.
cythonize_options : [dictionary]
Keyword arguments passed on to cythonize.
"""
self._include_dirs = kwargs.pop('include_dirs', [])
self._library_dirs = kwargs.pop('library_dirs', [])
self._libraries = kwargs.pop('libraries', [])
self._extra_compile_args = kwargs.pop('extra_compile_args', [])
self._extra_compile_args.append(self.std_compile_flag)
self._extra_link_args = kwargs.pop('extra_link_args', [])
self._cythonize_options = kwargs.pop('cythonize_options', {})
self._need_numpy = False
super(CythonCodeWrapper, self).__init__(*args, **kwargs)
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def _prepare_files(self, routine, build_dir=os.curdir):
# NOTE : build_dir is used for testing purposes.
pyxfilename = self.module_name + '.pyx'
codefilename = "%s.%s" % (self.filename, self.generator.code_extension)
# pyx
with open(os.path.join(build_dir, pyxfilename), 'w') as f:
self.dump_pyx([routine], f, self.filename)
# setup.py
ext_args = [repr(self.module_name), repr([pyxfilename, codefilename])]
if self._need_numpy:
np_import = 'import numpy as np\n'
self._include_dirs.append('np.get_include()')
else:
np_import = ''
with open(os.path.join(build_dir, 'setup.py'), 'w') as f:
includes = str(self._include_dirs).replace("'np.get_include()'",
'np.get_include()')
f.write(self.setup_template.format(
ext_args=", ".join(ext_args),
np_import=np_import,
include_dirs=includes,
library_dirs=self._library_dirs,
libraries=self._libraries,
extra_compile_args=self._extra_compile_args,
extra_link_args=self._extra_link_args,
cythonize_options=self._cythonize_options
))
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name + '_c')
def dump_pyx(self, routines, f, prefix):
"""Write a Cython file with python wrappers
This file contains all the definitions of the routines in c code and
refers to the header file.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
"""
headers = []
functions = []
for routine in routines:
prototype = self.generator.get_prototype(routine)
# C Function Header Import
headers.append(self.pyx_header.format(header_file=prefix,
prototype=prototype))
# Partition the C function arguments into categories
py_rets, py_args, py_loc, py_inf = self._partition_args(routine.arguments)
# Function prototype
name = routine.name
arg_string = ", ".join(self._prototype_arg(arg) for arg in py_args)
# Local Declarations
local_decs = []
for arg, val in py_inf.items():
proto = self._prototype_arg(arg)
mat, ind = [self._string_var(v) for v in val]
local_decs.append(" cdef {0} = {1}.shape[{2}]".format(proto, mat, ind))
local_decs.extend([" cdef {0}".format(self._declare_arg(a)) for a in py_loc])
declarations = "\n".join(local_decs)
if declarations:
declarations = declarations + "\n"
# Function Body
args_c = ", ".join([self._call_arg(a) for a in routine.arguments])
rets = ", ".join([self._string_var(r.name) for r in py_rets])
if routine.results:
body = ' return %s(%s)' % (routine.name, args_c)
if rets:
body = body + ', ' + rets
else:
body = ' %s(%s)\n' % (routine.name, args_c)
body = body + ' return ' + rets
functions.append(self.pyx_func.format(name=name, arg_string=arg_string,
declarations=declarations, body=body))
# Write text to file
if self._need_numpy:
# Only import numpy if required
f.write(self.pyx_imports)
f.write('\n'.join(headers))
f.write('\n'.join(functions))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_args = []
py_returns = []
py_locals = []
py_inferred = {}
for arg in args:
if isinstance(arg, OutputArgument):
py_returns.append(arg)
py_locals.append(arg)
elif isinstance(arg, InOutArgument):
py_returns.append(arg)
py_args.append(arg)
else:
py_args.append(arg)
# Find arguments that are array dimensions. These can be inferred
# locally in the Cython code.
if isinstance(arg, (InputArgument, InOutArgument)) and arg.dimensions:
dims = [d[1] + 1 for d in arg.dimensions]
sym_dims = [(i, d) for (i, d) in enumerate(dims) if
isinstance(d, Symbol)]
for (i, d) in sym_dims:
py_inferred[d] = (arg.name, i)
for arg in args:
if arg.name in py_inferred:
py_inferred[arg] = py_inferred.pop(arg.name)
# Filter inferred arguments from py_args
py_args = [a for a in py_args if a not in py_inferred]
return py_returns, py_args, py_locals, py_inferred
def _prototype_arg(self, arg):
mat_dec = "np.ndarray[{mtype}, ndim={ndim}] {name}"
np_types = {'double': 'np.double_t',
'int': 'np.int_t'}
t = arg.get_datatype('c')
if arg.dimensions:
self._need_numpy = True
ndim = len(arg.dimensions)
mtype = np_types[t]
return mat_dec.format(mtype=mtype, ndim=ndim, name=self._string_var(arg.name))
else:
return "%s %s" % (t, self._string_var(arg.name))
def _declare_arg(self, arg):
proto = self._prototype_arg(arg)
if arg.dimensions:
shape = '(' + ','.join(self._string_var(i[1] + 1) for i in arg.dimensions) + ')'
return proto + " = np.empty({shape})".format(shape=shape)
else:
return proto + " = 0"
def _call_arg(self, arg):
if arg.dimensions:
t = arg.get_datatype('c')
return "<{0}*> {1}.data".format(t, self._string_var(arg.name))
elif isinstance(arg, ResultBase):
return "&{0}".format(self._string_var(arg.name))
else:
return self._string_var(arg.name)
def _string_var(self, var):
printer = self.generator.printer.doprint
return printer(var)
class F2PyCodeWrapper(CodeWrapper):
"""Wrapper that uses f2py"""
def __init__(self, *args, **kwargs):
ext_keys = ['include_dirs', 'library_dirs', 'libraries',
'extra_compile_args', 'extra_link_args']
msg = ('The compilation option kwarg {} is not supported with the f2py '
'backend.')
for k in ext_keys:
if k in kwargs.keys():
warn(msg.format(k))
kwargs.pop(k, None)
super(F2PyCodeWrapper, self).__init__(*args, **kwargs)
@property
def command(self):
filename = self.filename + '.' + self.generator.code_extension
args = ['-c', '-m', self.module_name, filename]
command = [sys.executable, "-c", "import numpy.f2py as f2py2e;f2py2e.main()"]+args
return command
def _prepare_files(self, routine):
pass
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
# Here we define a lookup of backends -> tuples of languages. For now, each
# tuple is of length 1, but if a backend supports more than one language,
# the most preferable language is listed first.
_lang_lookup = {'CYTHON': ('C99', 'C89', 'C'),
'F2PY': ('F95',),
'NUMPY': ('C99', 'C89', 'C'),
'DUMMY': ('F95',)} # Dummy here just for testing
def _infer_language(backend):
"""For a given backend, return the top choice of language"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
return langs[0]
def _validate_backend_language(backend, language):
"""Throws error if backend and language are incompatible"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
if language.upper() not in langs:
raise ValueError(("Backend {0} and language {1} are "
"incompatible").format(backend, language))
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def autowrap(expr, language=None, backend='f2py', tempdir=None, args=None,
flags=None, verbose=False, helpers=None, code_gen=None, **kwargs):
"""Generates python callable binaries based on the math expression.
Parameters
==========
expr
The SymPy expression that should be wrapped as a binary routine.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'f2py' [default],
or 'cython'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
args : iterable, optional
An ordered iterable of symbols. Specifies the argument sequence for the
function.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : 3-tuple or iterable of 3-tuples, optional
Used to define auxiliary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be
passed in via ``helpers``. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be 3-tuples with (<function_name>, <sympy_expression>,
<argument_tuple>). It is mandatory to supply an argument sequence to
helper routines.
code_gen : CodeGen instance
An instance of a CodeGen subclass. Overrides ``language``.
include_dirs : [string]
A list of directories to search for C/C++ header files (in Unix form
for portability).
library_dirs : [string]
A list of directories to search for C/C++ libraries at link time.
libraries : [string]
A list of library names (not filenames or paths) to link against.
extra_compile_args : [string]
Any extra platform- and compiler-specific information to use when
compiling the source files in 'sources'. For platforms and compilers
where "command line" makes sense, this is typically a list of
command-line arguments, but for other platforms it could be anything.
extra_link_args : [string]
Any extra platform- and compiler-specific information to use when
linking object files together to create the extension (or to create a
new static Python interpreter). Similar interpretation as for
'extra_compile_args'.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.autowrap import autowrap
>>> expr = ((x - y + z)**(13)).expand()
>>> binary_func = autowrap(expr)
>>> binary_func(1, 4, 2)
-1.0
"""
if language:
if not isinstance(language, type):
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
# two cases 1) helpers is an iterable of 3-tuples and 2) helpers is a
# 3-tuple
if iterable(helpers) and len(helpers) != 0 and iterable(helpers[0]):
helpers = helpers if helpers else ()
else:
helpers = [helpers] if helpers else ()
args = list(args) if iterable(args, exclude=set) else args
if code_gen is None:
code_gen = get_code_generator(language, "autowrap")
CodeWrapperClass = {
'F2PY': F2PyCodeWrapper,
'CYTHON': CythonCodeWrapper,
'DUMMY': DummyWrapper
}[backend.upper()]
code_wrapper = CodeWrapperClass(code_gen, tempdir, flags if flags else (),
verbose, **kwargs)
helps = []
for name_h, expr_h, args_h in helpers:
helps.append(code_gen.routine(name_h, expr_h, args_h))
for name_h, expr_h, args_h in helpers:
if expr.has(expr_h):
name_h = binary_function(name_h, expr_h, backend='dummy')
expr = expr.subs(expr_h, name_h(*args_h))
try:
routine = code_gen.routine('autofunc', expr, args)
except CodeGenArgumentListError as e:
# if all missing arguments are for pure output, we simply attach them
# at the end and try again, because the wrappers will silently convert
# them to return values anyway.
new_args = []
for missing in e.missing_args:
if not isinstance(missing, OutputArgument):
raise
new_args.append(missing.name)
routine = code_gen.routine('autofunc', expr, args + new_args)
return code_wrapper.wrap_code(routine, helpers=helps)
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def binary_function(symfunc, expr, **kwargs):
"""Returns a sympy function with expr as binary implementation
This is a convenience function that automates the steps needed to
autowrap the SymPy expression and attaching it to a Function object
with implemented_function().
Parameters
==========
symfunc : sympy Function
The function to bind the callable to.
expr : sympy Expression
The expression used to generate the function.
kwargs : dict
Any kwargs accepted by autowrap.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.utilities.autowrap import binary_function
>>> expr = ((x - y)**(25)).expand()
>>> f = binary_function('f', expr)
>>> type(f)
<class 'sympy.core.function.UndefinedFunction'>
>>> 2*f(x, y)
2*f(x, y)
>>> f(x, y).evalf(2, subs={x: 1, y: 2})
-1.0
"""
binary = autowrap(expr, **kwargs)
return implemented_function(symfunc, binary)
#################################################################
# UFUNCIFY #
#################################################################
_ufunc_top = Template("""\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include ${include_file}
static PyMethodDef ${module}Methods[] = {
{NULL, NULL, 0, NULL}
};""")
_ufunc_outcalls = Template("*((double *)out${outnum}) = ${funcname}(${call_args});")
_ufunc_body = Template("""\
static void ${funcname}_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
${declare_args}
${declare_steps}
for (i = 0; i < n; i++) {
${outcalls}
${step_increments}
}
}
PyUFuncGenericFunction ${funcname}_funcs[1] = {&${funcname}_ufunc};
static char ${funcname}_types[${n_types}] = ${types}
static void *${funcname}_data[1] = {NULL};""")
_ufunc_bottom = Template("""\
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"${module}",
NULL,
-1,
${module}Methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_${module}(void)
{
PyObject *m, *d;
${function_creation}
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
return m;
}
#else
PyMODINIT_FUNC init${module}(void)
{
PyObject *m, *d;
${function_creation}
m = Py_InitModule("${module}", ${module}Methods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
}
#endif\
""")
_ufunc_init_form = Template("""\
ufunc${ind} = PyUFunc_FromFuncAndData(${funcname}_funcs, ${funcname}_data, ${funcname}_types, 1, ${n_in}, ${n_out},
PyUFunc_None, "${module}", ${docstring}, 0);
PyDict_SetItemString(d, "${funcname}", ufunc${ind});
Py_DECREF(ufunc${ind});""")
_ufunc_setup = Template("""\
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('',
parent_package,
top_path)
config.add_extension('${module}', sources=['${module}.c', '${filename}.c'])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)""")
class UfuncifyCodeWrapper(CodeWrapper):
"""Wrapper for Ufuncify"""
def __init__(self, *args, **kwargs):
ext_keys = ['include_dirs', 'library_dirs', 'libraries',
'extra_compile_args', 'extra_link_args']
msg = ('The compilation option kwarg {} is not supported with the numpy'
' backend.')
for k in ext_keys:
if k in kwargs.keys():
warn(msg.format(k))
kwargs.pop(k, None)
super(UfuncifyCodeWrapper, self).__init__(*args, **kwargs)
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def wrap_code(self, routines, helpers=None):
# This routine overrides CodeWrapper because we can't assume funcname == routines[0].name
# Therefore we have to break the CodeWrapper private API.
# There isn't an obvious way to extend multi-expr support to
# the other autowrap backends, so we limit this change to ufuncify.
helpers = helpers if helpers is not None else []
# We just need a consistent name
funcname = 'wrapped_' + str(id(routines) + id(helpers))
workdir = self.filepath or tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routines, helpers)
self._prepare_files(routines, funcname)
self._process_files(routines)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, funcname)
def _generate_code(self, main_routines, helper_routines):
all_routines = main_routines + helper_routines
self.generator.write(
all_routines, self.filename, True, self.include_header,
self.include_empty)
def _prepare_files(self, routines, funcname):
# C
codefilename = self.module_name + '.c'
with open(codefilename, 'w') as f:
self.dump_c(routines, f, self.filename, funcname=funcname)
# setup.py
with open('setup.py', 'w') as f:
self.dump_setup(f)
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def dump_setup(self, f):
setup = _ufunc_setup.substitute(module=self.module_name,
filename=self.filename)
f.write(setup)
def dump_c(self, routines, f, prefix, funcname=None):
"""Write a C file with python wrappers
This file contains all the definitions of the routines in c code.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to name the imported module.
funcname
Name of the main function to be returned.
"""
if funcname is None:
if len(routines) == 1:
funcname = routines[0].name
else:
msg = 'funcname must be specified for multiple output routines'
raise ValueError(msg)
functions = []
function_creation = []
ufunc_init = []
module = self.module_name
include_file = "\"{0}.h\"".format(prefix)
top = _ufunc_top.substitute(include_file=include_file, module=module)
name = funcname
# Partition the C function arguments into categories
# Here we assume all routines accept the same arguments
r_index = 0
py_in, _ = self._partition_args(routines[0].arguments)
n_in = len(py_in)
n_out = len(routines)
# Declare Args
form = "char *{0}{1} = args[{2}];"
arg_decs = [form.format('in', i, i) for i in range(n_in)]
arg_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_args = '\n '.join(arg_decs)
# Declare Steps
form = "npy_intp {0}{1}_step = steps[{2}];"
step_decs = [form.format('in', i, i) for i in range(n_in)]
step_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_steps = '\n '.join(step_decs)
# Call Args
form = "*(double *)in{0}"
call_args = ', '.join([form.format(a) for a in range(n_in)])
# Step Increments
form = "{0}{1} += {0}{1}_step;"
step_incs = [form.format('in', i) for i in range(n_in)]
step_incs.extend([form.format('out', i, i) for i in range(n_out)])
step_increments = '\n '.join(step_incs)
# Types
n_types = n_in + n_out
types = "{" + ', '.join(["NPY_DOUBLE"]*n_types) + "};"
# Docstring
docstring = '"Created in SymPy with Ufuncify"'
# Function Creation
function_creation.append("PyObject *ufunc{0};".format(r_index))
# Ufunc initialization
init_form = _ufunc_init_form.substitute(module=module,
funcname=name,
docstring=docstring,
n_in=n_in, n_out=n_out,
ind=r_index)
ufunc_init.append(init_form)
outcalls = [_ufunc_outcalls.substitute(
outnum=i, call_args=call_args, funcname=routines[i].name) for i in
range(n_out)]
body = _ufunc_body.substitute(module=module, funcname=name,
declare_args=declare_args,
declare_steps=declare_steps,
call_args=call_args,
step_increments=step_increments,
n_types=n_types, types=types,
outcalls='\n '.join(outcalls))
functions.append(body)
body = '\n\n'.join(functions)
ufunc_init = '\n '.join(ufunc_init)
function_creation = '\n '.join(function_creation)
bottom = _ufunc_bottom.substitute(module=module,
ufunc_init=ufunc_init,
function_creation=function_creation)
text = [top, body, bottom]
f.write('\n\n'.join(text))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_in = []
py_out = []
for arg in args:
if isinstance(arg, OutputArgument):
py_out.append(arg)
elif isinstance(arg, InOutArgument):
raise ValueError("Ufuncify doesn't support InOutArguments")
else:
py_in.append(arg)
return py_in, py_out
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran', 'gcc'), modules=('numpy',))
def ufuncify(args, expr, language=None, backend='numpy', tempdir=None,
flags=None, verbose=False, helpers=None, **kwargs):
"""Generates a binary function that supports broadcasting on numpy arrays.
Parameters
==========
args : iterable
Either a Symbol or an iterable of symbols. Specifies the argument
sequence for the function.
expr
A SymPy expression that defines the element wise operation.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'numpy' [default],
'cython', or 'f2py'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in
the specified path.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can
be helpful for debugging.
helpers : iterable, optional
Used to define auxiliary expressions needed for the main expr. If
the main expression needs to call a specialized function it should
be put in the ``helpers`` iterable. Autowrap will then make sure
that the compiled main expression can link to the helper routine.
Items should be tuples with (<funtion_name>, <sympy_expression>,
<arguments>). It is mandatory to supply an argument sequence to
helper routines.
kwargs : dict
These kwargs will be passed to autowrap if the `f2py` or `cython`
backend is used and ignored if the `numpy` backend is used.
Notes
=====
The default backend ('numpy') will create actual instances of
``numpy.ufunc``. These support ndimensional broadcasting, and implicit type
conversion. Use of the other backends will result in a "ufunc-like"
function, which requires equal length 1-dimensional arrays for all
arguments, and will not perform any type conversions.
References
==========
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
Examples
========
>>> from sympy.utilities.autowrap import ufuncify
>>> from sympy.abc import x, y
>>> import numpy as np
>>> f = ufuncify((x, y), y + x**2)
>>> type(f)
<class 'numpy.ufunc'>
>>> f([1, 2, 3], 2)
array([ 3., 6., 11.])
>>> f(np.arange(5), 3)
array([ 3., 4., 7., 12., 19.])
For the 'f2py' and 'cython' backends, inputs are required to be equal length
1-dimensional arrays. The 'f2py' backend will perform type conversion, but
the Cython backend will error if the inputs are not of the expected type.
>>> f_fortran = ufuncify((x, y), y + x**2, backend='f2py')
>>> f_fortran(1, 2)
array([ 3.])
>>> f_fortran(np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0]))
array([ 2., 6., 12.])
>>> f_cython = ufuncify((x, y), y + x**2, backend='Cython')
>>> f_cython(1, 2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Argument '_x' has incorrect type (expected numpy.ndarray, got int)
>>> f_cython(np.array([1.0]), np.array([2.0]))
array([ 3.])
"""
if isinstance(args, Symbol):
args = (args,)
else:
args = tuple(args)
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = helpers if helpers else ()
flags = flags if flags else ()
if backend.upper() == 'NUMPY':
# maxargs is set by numpy compile-time constant NPY_MAXARGS
# If a future version of numpy modifies or removes this restriction
# this variable should be changed or removed
maxargs = 32
helps = []
for name, expr, args in helpers:
helps.append(make_routine(name, expr, args))
code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify"), tempdir,
flags, verbose)
if not isinstance(expr, (list, tuple)):
expr = [expr]
if len(expr) == 0:
raise ValueError('Expression iterable has zero length')
if len(expr) + len(args) > maxargs:
msg = ('Cannot create ufunc with more than {0} total arguments: '
'got {1} in, {2} out')
raise ValueError(msg.format(maxargs, len(args), len(expr)))
routines = [make_routine('autofunc{}'.format(idx), exprx, args) for
idx, exprx in enumerate(expr)]
return code_wrapper.wrap_code(routines, helpers=helps)
else:
# Dummies are used for all added expressions to prevent name clashes
# within the original expression.
y = IndexedBase(Dummy('y'))
m = Dummy('m', integer=True)
i = Idx(Dummy('i', integer=True), m)
f_dummy = Dummy('f')
f = implemented_function('%s_%d' % (f_dummy.name, f_dummy.dummy_index), Lambda(args, expr))
# For each of the args create an indexed version.
indexed_args = [IndexedBase(Dummy(str(a))) for a in args]
# Order the arguments (out, args, dim)
args = [y] + indexed_args + [m]
args_with_indices = [a[i] for a in indexed_args]
return autowrap(Eq(y[i], f(*args_with_indices)), language, backend,
tempdir, args, flags, verbose, helpers, **kwargs)
| bsd-3-clause | 163,633,342,712,998,180 | 35.570027 | 115 | 0.587169 | false | 4.04011 | false | false | false |
biorack/metatlas | metatlas/io/write_utils.py | 1 | 2914 | """ Utility functions used in writing files"""
import filecmp
import logging
import os
import tempfile
logger = logging.getLogger(__name__)
def make_dir_for(file_path):
"""makes directories for file_path if they don't already exist"""
directory = os.path.dirname(file_path)
if directory != "":
os.makedirs(directory, exist_ok=True)
def check_existing_file(file_path, overwrite=False):
"""Creates directories as needed and throws an error if file exists and overwrite is False"""
make_dir_for(file_path)
try:
if not overwrite and os.path.exists(file_path):
raise FileExistsError(f"Not overwriting {file_path}.")
except FileExistsError as err:
logger.exception(err)
raise
def export_dataframe(dataframe, file_path, description, overwrite=False, **kwargs):
"""
inputs:
dataframe: pandas DataFrame to save
file_path: string with path of file to create
description: free string for logging
overwrite: if False, raise error if file already exists
remaining arguments are passed through to to_csv()
"""
check_existing_file(file_path, overwrite)
dataframe.to_csv(file_path, **kwargs)
logger.info("Exported %s to %s.", description, file_path)
def raise_on_diff(dataframe, file_path, description, **kwargs):
"""
inputs:
dataframe: pandas DataFrame to save
file_path: string with path of file to compare against
description: free string for logging
kwargs: passed through to to_csv()
If file_path exists and does not match file that would be generated by
saving dataframe to a csv, then raise ValueError
"""
if not os.path.exists(file_path):
return
with tempfile.NamedTemporaryFile(delete=False) as temp_path:
dataframe.to_csv(temp_path, **kwargs)
same = filecmp.cmp(file_path, temp_path.name)
os.remove(temp_path.name)
if same:
logger.info("Data in %s is the same as %s.", description, file_path)
else:
try:
raise ValueError("Data in %s is not the same as %s." % (description, file_path))
except ValueError as err:
logger.exception(err)
raise
def export_dataframe_die_on_diff(dataframe, file_path, description, **kwargs):
"""
inputs:
dataframe: pandas DataFrame to save
file_path: string with path of file to create
description: free string for logging
kwargs: passed through to to_csv()
If file_path does not exist then save the dataframe there
If file_path exists and matches data in dataframe then do nothing
If file_path exists and does not match dataframe then raise ValueError
"""
raise_on_diff(dataframe, file_path, description, **kwargs)
if not os.path.exists(file_path):
export_dataframe(dataframe, file_path, description, **kwargs)
| bsd-3-clause | 6,220,869,091,396,560,000 | 33.690476 | 97 | 0.668497 | false | 4.156919 | false | false | false |
PetePriority/home-assistant | homeassistant/components/zone/zone.py | 1 | 3191 | """Component entity and functionality."""
from homeassistant.const import ATTR_HIDDEN, ATTR_LATITUDE, ATTR_LONGITUDE
from homeassistant.helpers.entity import Entity
from homeassistant.loader import bind_hass
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.location import distance
from .const import DOMAIN
ATTR_PASSIVE = 'passive'
ATTR_RADIUS = 'radius'
STATE = 'zoning'
@bind_hass
def active_zone(hass, latitude, longitude, radius=0):
"""Find the active zone for given latitude, longitude."""
return run_callback_threadsafe(
hass.loop, async_active_zone, hass, latitude, longitude, radius
).result()
@bind_hass
def async_active_zone(hass, latitude, longitude, radius=0):
"""Find the active zone for given latitude, longitude.
This method must be run in the event loop.
"""
# Sort entity IDs so that we are deterministic if equal distance to 2 zones
zones = (hass.states.get(entity_id) for entity_id
in sorted(hass.states.async_entity_ids(DOMAIN)))
min_dist = None
closest = None
for zone in zones:
if zone.attributes.get(ATTR_PASSIVE):
continue
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
within_zone = zone_dist - radius < zone.attributes[ATTR_RADIUS]
closer_zone = closest is None or zone_dist < min_dist
smaller_zone = (zone_dist == min_dist and
zone.attributes[ATTR_RADIUS] <
closest.attributes[ATTR_RADIUS])
if within_zone and (closer_zone or smaller_zone):
min_dist = zone_dist
closest = zone
return closest
def in_zone(zone, latitude, longitude, radius=0) -> bool:
"""Test if given latitude, longitude is in given zone.
Async friendly.
"""
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
return zone_dist - radius < zone.attributes[ATTR_RADIUS]
class Zone(Entity):
"""Representation of a Zone."""
def __init__(self, hass, name, latitude, longitude, radius, icon, passive):
"""Initialize the zone."""
self.hass = hass
self._name = name
self._latitude = latitude
self._longitude = longitude
self._radius = radius
self._icon = icon
self._passive = passive
@property
def name(self):
"""Return the name of the zone."""
return self._name
@property
def state(self):
"""Return the state property really does nothing for a zone."""
return STATE
@property
def icon(self):
"""Return the icon if any."""
return self._icon
@property
def state_attributes(self):
"""Return the state attributes of the zone."""
data = {
ATTR_HIDDEN: True,
ATTR_LATITUDE: self._latitude,
ATTR_LONGITUDE: self._longitude,
ATTR_RADIUS: self._radius,
}
if self._passive:
data[ATTR_PASSIVE] = self._passive
return data
| apache-2.0 | -3,663,965,267,479,924,700 | 28.009091 | 79 | 0.62833 | false | 4.220899 | false | false | false |
jmsleiman/hypercube-election | hypercube.py | 1 | 5446 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Joseph M. Sleiman
# This program is free software; you can redistribute it and/or
# modify it under the terms of the LGPLv2.1 or LGPLv3 License.
import math
import node
class HyperCube(object):
def __init__(self, dimension, nodeValues):
### define variables
self.dimension = dimension
self.listOfNodes = []
self.messageRegistry = []
self.dotString = ""
self.colourList = ['"#575329"', '"#00FECF"', '"#B05B6F"', '"#8CD0FF"', '"#3B9700"', '"#04F757"', '"#C8A1A1"', '"#1E6E00"',
'"#000000"', '"#FFFF00"', '"#1CE6FF"', '"#FF34FF"', '"#FF4A46"', '"#008941"', '"#006FA6"', '"#A30059"',
'"#FFDBE5"', '"#7A4900"', '"#0000A6"', '"#63FFAC"', '"#B79762"', '"#004D43"', '"#8FB0FF"', '"#997D87"',
'"#5A0007"', '"#809693"', '"#FEFFE6"', '"#1B4400"', '"#4FC601"', '"#3B5DFF"', '"#4A3B53"', '"#FF2F80"',
'"#61615A"', '"#BA0900"', '"#6B7900"', '"#00C2A0"', '"#FFAA92"', '"#FF90C9"', '"#B903AA"', '"#D16100"',
'"#DDEFFF"', '"#000035"', '"#7B4F4B"', '"#A1C299"', '"#300018"', '"#0AA6D8"', '"#013349"', '"#00846F"',
'"#372101"', '"#FFB500"', '"#C2FFED"', '"#A079BF"', '"#CC0744"', '"#C0B9B2"', '"#C2FF99"', '"#001E09"',
'"#00489C"', '"#6F0062"', '"#0CBD66"', '"#EEC3FF"', '"#456D75"', '"#B77B68"', '"#7A87A1"', '"#788D66"',
'"#885578"', '"#FAD09F"', '"#FF8A9A"', '"#D157A0"', '"#BEC459"', '"#456648"', '"#0086ED"', '"#886F4C"',
'"#34362D"', '"#B4A8BD"', '"#00A6AA"', '"#452C2C"', '"#636375"', '"#A3C8C9"', '"#FF913F"', '"#938A81"',
'"#7900D7"', '"#A77500"', '"#6367A9"', '"#A05837"', '"#6B002C"', '"#772600"', '"#D790FF"', '"#9B9700"',
'"#549E79"', '"#FFF69F"', '"#201625"', '"#72418F"', '"#BC23FF"', '"#99ADC0"', '"#3A2465"', '"#922329"',
'"#5B4534"', '"#FDE8DC"', '"#404E55"', '"#0089A3"', '"#CB7E98"', '"#A4E804"', '"#324E72"', '"#6A3A4C"']
### do some setting up
### add in those values as nodes
for value in nodeValues:
self.listOfNodes.append(node.Node(value, self.dimension))
self.setConnections(self.listOfNodes)
def setConnections(self, entry):
'''this method splits the list of entries into smaller and
smaller sublists until a list of 2 nodes is reached.
those 2 nodes form a connection in dimension 1, and after that
the other lists are superimposed and forms connections
accordingly:
0 1 2 3
4 5 6 7
0 and 4, 1 and 5, 2 and 6, 3 and 7 all form connections together
in dimension 3 (as this list has 8 elements, 2^3 = 8...)
'''
if(len(entry) > 2):
left, right = split_list(entry)
self.setConnections(left)
self.setConnections(right)
for x in xrange(0, len(left)):
left[x].attach(right[x], int(math.log(len(entry),2)))
right[x].attach(left[x], int(math.log(len(entry),2)))
if(len(entry) == 2):
entry[0].attach(entry[1], 1)
entry[1].attach(entry[0], 1)
# @profile
def election(self, largestWins):
'''
In this scenario, the nodes must find the smallest node among them, and name it their leader.
Strategy:
- Each node must message its neighbour on the i edge:
message contains:
rank
value
- When an active node receives a message:
- If the message received is from a smaller rank, there's been a catastrophic bug.
- If the message received is from an equal rank:
- If the receiver has a higher value, it increments its rank
- If the receiver has a lower value, it points the queen variable to the edge that sent the message, and goes dormant
- If the message received is from a higher rank:
- The node pushes it to a queue and comes back to it when it's ready (ie when the rank matches)
- When a passive node receives a message:
- If the message contains a rank lower than the rank of your queen, switch alliances
'''
messageMatrix = []
for node in self.listOfNodes:
messageMatrix.append(node.createChallenge(0))
clock = 0
victor = None
dots = []
while(victor == None):
dot = self.toDot()[:-1]
clock = clock + 1
messagesToProcess = []
messagesToQueue = []
while( len(messageMatrix) > 0):
msg = messageMatrix.pop(0)
dot += msg.toDot()
if(msg.delay <= 0):
messagesToProcess.append(msg)
else:
messagesToQueue.append(msg)
# now it's time to process messages
while(len(messagesToProcess) > 0):
msg = messagesToProcess.pop(0)
# however, how do we account for a redirected challenge?
# and how do we account for a success, defeat?
toBeContinued = msg.destination.processMessage(msg, largestWins)
if(toBeContinued != None):
messageMatrix.append(toBeContinued)
# now it's time to requeue those messages
for msg in messagesToQueue:
messageMatrix.append(msg)
for msg in messageMatrix:
msg.delay -= 1
dot += "}"
dots.append(dot)
for node in self.listOfNodes:
if node.rank == self.dimension:
print "Winner! {0}".format(node)
victor = node
break
dot = self.toDot()
dots.append(dot)
return dots
def toDot(self):
text = "digraph {\n\tlayout = circo\n"
for entry in self.listOfNodes:
text = text + entry.toDot(self.colourList)
text = text + "}"
self.dotString = text
return self.dotString
# now we need to draw all the leader directions...
# woohoo...
def split_list(a_list):
half = len(a_list)/2
return a_list[:half], a_list[half:]
| lgpl-2.1 | 453,523,443,621,024,500 | 33.251572 | 125 | 0.605215 | false | 2.80577 | false | false | false |
ChillarAnand/junction | junction/base/emailer.py | 1 | 1236 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Standard Library
from os import path
# Third Party Stuff
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
def send_email(to, context, template_dir):
"""Render given templates and send email to `to`.
:param to: User object to send email to..
:param context: dict containing which needs to be passed to django template
:param template_dir: We expect files message.txt, subject.txt,
message.html etc in this folder.
:returns: None
:rtype: None
"""
def to_str(template_name):
return render_to_string(path.join(template_dir, template_name), context).strip()
subject = to_str('subject.txt')
text_message = to_str('message.txt')
html_message = to_str('message.html')
from_email = settings.DEFAULT_FROM_EMAIL
recipient_list = [_format_email(to)]
return send_mail(subject, text_message, from_email, recipient_list, html_message=html_message)
def _format_email(user):
return user.email if user.first_name and user.last_name else \
'"{} {}" <{}>'.format(user.first_name, user.last_name, user.email)
| mit | -6,158,837,254,071,177,000 | 31.526316 | 98 | 0.695793 | false | 3.614035 | false | false | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/status/cmd/devicequery/type_Params.py | 1 | 3783 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Params.py
from types import *
import array
PARAMS_DEVICE_TYPE_USER_SPECIFIC = 0
PARAMS_DEVICE_TYPE_U1394 = 1
PARAMS_DEVICE_TYPE_ADAPTER = 2
PARAMS_DEVICE_TYPE_ALL = 255
PARAMS_DEVICE_TYPE_APM_SUPPORT = 3
PARAMS_DEVICE_TYPE_BATTERY = 4
PARAMS_DEVICE_TYPE_CDROM = 5
PARAMS_DEVICE_TYPE_COMPUTER = 6
PARAMS_DEVICE_TYPE_DECODER = 7
PARAMS_DEVICE_TYPE_DISK_DRIVE = 8
PARAMS_DEVICE_TYPE_DISPLAY = 9
PARAMS_DEVICE_TYPE_FDC = 10
PARAMS_DEVICE_TYPE_FLOPPY = 11
PARAMS_DEVICE_TYPE_GPS = 12
PARAMS_DEVICE_TYPE_HDC = 13
PARAMS_DEVICE_TYPE_HID_CLASS = 14
PARAMS_DEVICE_TYPE_IMAGE = 15
PARAMS_DEVICE_TYPE_INFRARED = 16
PARAMS_DEVICE_TYPE_KEYBOARD = 17
PARAMS_DEVICE_TYPE_LEGACY_DRIVER = 18
PARAMS_DEVICE_TYPE_MEDIA = 19
PARAMS_DEVICE_TYPE_MEDIUM_CHANGER = 20
PARAMS_DEVICE_TYPE_MODEM = 21
PARAMS_DEVICE_TYPE_MONITOR = 22
PARAMS_DEVICE_TYPE_MOUSE = 23
PARAMS_DEVICE_TYPE_MTD = 24
PARAMS_DEVICE_TYPE_MULTIFUNCTION = 25
PARAMS_DEVICE_TYPE_MULTIPORT_SERIAL = 26
PARAMS_DEVICE_TYPE_NET = 27
PARAMS_DEVICE_TYPE_NET_CLIENT = 28
PARAMS_DEVICE_TYPE_NET_SERVICE = 29
PARAMS_DEVICE_TYPE_NET_TRANS = 30
PARAMS_DEVICE_TYPE_NO_DRIVER = 31
PARAMS_DEVICE_TYPE_PARALLEL = 32
PARAMS_DEVICE_TYPE_PCMCIA = 33
PARAMS_DEVICE_TYPE_PORTS = 34
PARAMS_DEVICE_TYPE_PRINTER = 35
PARAMS_DEVICE_TYPE_PRINTER_UPGRADE = 36
PARAMS_DEVICE_TYPE_SCSI_ADAPTER = 37
PARAMS_DEVICE_TYPE_SMART_CARD_READER = 38
PARAMS_DEVICE_TYPE_SOUND = 39
PARAMS_DEVICE_TYPE_STILL_IMAGE = 40
PARAMS_DEVICE_TYPE_SYSTEM = 41
PARAMS_DEVICE_TYPE_TAPE_DRIVE = 42
PARAMS_DEVICE_TYPE_UNKNOWN = 43
PARAMS_DEVICE_TYPE_USB = 44
PARAMS_DEVICE_TYPE_VOLUME = 45
PARAMS_DEVICE_TYPE_U1394DEBUG = 46
PARAMS_DEVICE_TYPE_U61883 = 47
PARAMS_DEVICE_TYPE_AVC = 48
PARAMS_DEVICE_TYPE_BIOMETRIC = 49
PARAMS_DEVICE_TYPE_BLUETOOTH = 50
PARAMS_DEVICE_TYPE_DOT4 = 51
PARAMS_DEVICE_TYPE_DOT4PRINT = 52
PARAMS_DEVICE_TYPE_ENUM1394 = 53
PARAMS_DEVICE_TYPE_INFINIBAND = 54
PARAMS_DEVICE_TYPE_PNPPRINTERS = 55
PARAMS_DEVICE_TYPE_PROCESSOR = 56
PARAMS_DEVICE_TYPE_SBP2 = 57
PARAMS_DEVICE_TYPE_SECURITYACCELERATOR = 58
PARAMS_DEVICE_TYPE_VOLUMESNAPSHOT = 59
PARAMS_DEVICE_TYPE_WCEUSBS = 60
PARAMS_GUID_LEN = 16
class Params:
def __init__(self):
self.__dict__['choice'] = PARAMS_DEVICE_TYPE_USER_SPECIFIC
self.__dict__['guid'] = array.array('B')
i = 0
while i < PARAMS_GUID_LEN:
self.__dict__['guid'].append(0)
i = i + 1
def __getattr__(self, name):
if name == 'choice':
return self.__dict__['choice']
if name == 'guid':
return self.__dict__['guid']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'choice':
self.__dict__['choice'] = value
elif name == 'guid':
self.__dict__['guid'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_PARAMS_CHOICE, self.__dict__['choice'])
submsg.AddData(MSG_KEY_PARAMS_GUID, self.__dict__['guid'])
mmsg.AddMessage(MSG_KEY_PARAMS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['choice'] = submsg.FindU32(MSG_KEY_PARAMS_CHOICE)
try:
self.__dict__['guid'] = submsg.FindData(MSG_KEY_PARAMS_GUID)
except:
pass | unlicense | 8,606,701,729,567,228,000 | 32.785714 | 90 | 0.686228 | false | 3.073111 | false | false | false |
metacloud/python-glanceclient | glanceclient/v1/image_members.py | 1 | 3610 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient.common import base
class ImageMember(base.Resource):
def __repr__(self):
return "<ImageMember %s>" % self._info
@property
def id(self):
return self.member_id
def delete(self):
self.manager.delete(self)
class ImageMemberManager(base.Manager):
resource_class = ImageMember
def get(self, image, member_id):
image_id = base.getid(image)
url = '/v1/images/%s/members/%s' % (image_id, member_id)
resp, body = self.api.json_request('GET', url)
member = body['member']
member['image_id'] = image_id
return ImageMember(self, member, loaded=True)
def list(self, image=None, member=None):
out = []
if image and member:
try:
out.append(self.get(image, member))
#TODO(bcwaldon): narrow this down to 404
except Exception:
pass
elif image:
out.extend(self._list_by_image(image))
elif member:
out.extend(self._list_by_member(member))
else:
#TODO(bcwaldon): figure out what is appropriate to do here as we
# are unable to provide the requested response
pass
return out
def _list_by_image(self, image):
image_id = base.getid(image)
url = '/v1/images/%s/members' % image_id
resp, body = self.api.json_request('GET', url)
out = []
for member in body['members']:
member['image_id'] = image_id
out.append(ImageMember(self, member, loaded=True))
return out
def _list_by_member(self, member):
member_id = base.getid(member)
url = '/v1/shared-images/%s' % member_id
resp, body = self.api.json_request('GET', url)
out = []
for member in body['shared_images']:
member['member_id'] = member_id
out.append(ImageMember(self, member, loaded=True))
return out
def delete(self, image_id, member_id):
self._delete("/v1/images/%s/members/%s" % (image_id, member_id))
def create(self, image, member_id, can_share=False):
"""Creates an image."""
url = '/v1/images/%s/members/%s' % (base.getid(image), member_id)
body = {'member': {'can_share': can_share}}
self._update(url, body=body)
def replace(self, image, members):
memberships = []
for member in members:
try:
obj = {
'member_id': member.member_id,
'can_share': member.can_share,
}
except AttributeError:
obj = {'member_id': member['member_id']}
if 'can_share' in member:
obj['can_share'] = member['can_share']
memberships.append(obj)
url = '/v1/images/%s/members' % base.getid(image)
self.api.json_request('PUT', url, {}, {'memberships': memberships})
| apache-2.0 | 5,861,809,855,356,957,000 | 34.048544 | 78 | 0.579778 | false | 3.860963 | false | false | false |
Juniper/ceilometer | ceilometer/compute/virt/vmware/vsphere_operations.py | 1 | 10191 | # Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.vmware import vim_util
PERF_MANAGER_TYPE = "PerformanceManager"
PERF_COUNTER_PROPERTY = "perfCounter"
VM_INSTANCE_ID_PROPERTY = 'config.extraConfig["nvp.vm-uuid"].value'
# ESXi Servers sample performance data every 20 seconds. 20-second interval
# data is called instance data or real-time data. To retrieve instance data,
# we need to specify a value of 20 seconds for the "PerfQuerySpec.intervalId"
# property. In that case the "QueryPerf" method operates as a raw data feed
# that bypasses the vCenter database and instead retrieves performance data
# from an ESXi host.
# The following value is time interval for real-time performance stats
# in seconds and it is not configurable.
VC_REAL_TIME_SAMPLING_INTERVAL = 20
class VsphereOperations(object):
"""Class to invoke vSphere APIs calls.
vSphere APIs calls are required by various pollsters, collecting data from
VMware infrastructure.
"""
def __init__(self, api_session, max_objects):
self._api_session = api_session
self._max_objects = max_objects
# Mapping between "VM's Nova instance Id" -> "VM's MOID"
# In case a VM is deployed by Nova, then its name is instance ID.
# So this map essentially has VM names as keys.
self._vm_moid_lookup_map = {}
# Mapping from full name -> ID, for VC Performance counters
self._perf_counter_id_lookup_map = None
def _init_vm_moid_lookup_map(self):
session = self._api_session
result = session.invoke_api(vim_util, "get_objects", session.vim,
"VirtualMachine", self._max_objects,
[VM_INSTANCE_ID_PROPERTY],
False)
while result:
for vm_object in result.objects:
vm_moid = vm_object.obj.value
# propSet will be set only if the server provides value
if hasattr(vm_object, 'propSet') and vm_object.propSet:
vm_instance_id = vm_object.propSet[0].val
if vm_instance_id:
self._vm_moid_lookup_map[vm_instance_id] = vm_moid
result = session.invoke_api(vim_util, "continue_retrieval",
session.vim, result)
def get_vm_moid(self, vm_instance_id):
"""Method returns VC MOID of the VM by its NOVA instance ID."""
if vm_instance_id not in self._vm_moid_lookup_map:
self._init_vm_moid_lookup_map()
return self._vm_moid_lookup_map.get(vm_instance_id, None)
def _init_perf_counter_id_lookup_map(self):
# Query details of all the performance counters from VC
session = self._api_session
client_factory = session.vim.client.factory
perf_manager = session.vim.service_content.perfManager
prop_spec = vim_util.build_property_spec(
client_factory, PERF_MANAGER_TYPE, [PERF_COUNTER_PROPERTY])
obj_spec = vim_util.build_object_spec(
client_factory, perf_manager, None)
filter_spec = vim_util.build_property_filter_spec(
client_factory, [prop_spec], [obj_spec])
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = 1
prop_collector = session.vim.service_content.propertyCollector
result = session.invoke_api(session.vim, "RetrievePropertiesEx",
prop_collector, specSet=[filter_spec],
options=options)
perf_counter_infos = result.objects[0].propSet[0].val.PerfCounterInfo
# Extract the counter Id for each counter and populate the map
self._perf_counter_id_lookup_map = {}
for perf_counter_info in perf_counter_infos:
counter_group = perf_counter_info.groupInfo.key
counter_name = perf_counter_info.nameInfo.key
counter_rollup_type = perf_counter_info.rollupType
counter_id = perf_counter_info.key
counter_full_name = (counter_group + ":" + counter_name + ":" +
counter_rollup_type)
self._perf_counter_id_lookup_map[counter_full_name] = counter_id
def get_perf_counter_id(self, counter_full_name):
"""Method returns the ID of VC performance counter by its full name.
A VC performance counter is uniquely identified by the
tuple {'Group Name', 'Counter Name', 'Rollup Type'}.
It will have an id - counter ID (changes from one VC to another),
which is required to query performance stats from that VC.
This method returns the ID for a counter,
assuming 'CounterFullName' => 'Group Name:CounterName:RollupType'.
"""
if not self._perf_counter_id_lookup_map:
self._init_perf_counter_id_lookup_map()
return self._perf_counter_id_lookup_map[counter_full_name]
# TODO([email protected]) Move this method to common library
# when it gets checked-in
def query_vm_property(self, vm_moid, property_name):
"""Method returns the value of specified property for a VM.
:param vm_moid: moid of the VM whose property is to be queried
:param property_name: path of the property
"""
vm_mobj = vim_util.get_moref(vm_moid, "VirtualMachine")
session = self._api_session
return session.invoke_api(vim_util, "get_object_property",
session.vim, vm_mobj, property_name)
def query_vm_aggregate_stats(self, vm_moid, counter_id, duration):
"""Method queries the aggregated real-time stat value for a VM.
This method should be used for aggregate counters.
:param vm_moid: moid of the VM
:param counter_id: id of the perf counter in VC
:param duration: in seconds from current time,
over which the stat value was applicable
:return: the aggregated stats value for the counter
"""
# For aggregate counters, device_name should be ""
stats = self._query_vm_perf_stats(vm_moid, counter_id, "", duration)
# Performance manager provides the aggregated stats value
# with device name -> None
return stats.get(None, 0)
def query_vm_device_stats(self, vm_moid, counter_id, duration):
"""Method queries the real-time stat values for a VM, for all devices.
This method should be used for device(non-aggregate) counters.
:param vm_moid: moid of the VM
:param counter_id: id of the perf counter in VC
:param duration: in seconds from current time,
over which the stat value was applicable
:return: a map containing the stat values keyed by the device ID/name
"""
# For device counters, device_name should be "*" to get stat values
# for all devices.
stats = self._query_vm_perf_stats(vm_moid, counter_id, "*", duration)
# For some device counters, in addition to the per device value
# the Performance manager also returns the aggregated value.
# Just to be consistent, deleting the aggregated value if present.
stats.pop(None, None)
return stats
def _query_vm_perf_stats(self, vm_moid, counter_id, device_name, duration):
"""Method queries the real-time stat values for a VM.
:param vm_moid: moid of the VM for which stats are needed
:param counter_id: id of the perf counter in VC
:param device_name: name of the device for which stats are to be
queried. For aggregate counters pass empty string ("").
For device counters pass "*", if stats are required over all
devices.
:param duration: in seconds from current time,
over which the stat value was applicable
:return: a map containing the stat values keyed by the device ID/name
"""
session = self._api_session
client_factory = session.vim.client.factory
# Construct the QuerySpec
metric_id = client_factory.create('ns0:PerfMetricId')
metric_id.counterId = counter_id
metric_id.instance = device_name
query_spec = client_factory.create('ns0:PerfQuerySpec')
query_spec.entity = vim_util.get_moref(vm_moid, "VirtualMachine")
query_spec.metricId = [metric_id]
query_spec.intervalId = VC_REAL_TIME_SAMPLING_INTERVAL
# We query all samples which are applicable over the specified duration
samples_cnt = (int(duration / VC_REAL_TIME_SAMPLING_INTERVAL)
if duration and
duration >= VC_REAL_TIME_SAMPLING_INTERVAL else 1)
query_spec.maxSample = samples_cnt
perf_manager = session.vim.service_content.perfManager
perf_stats = session.invoke_api(session.vim, 'QueryPerf', perf_manager,
querySpec=[query_spec])
stat_values = {}
if perf_stats:
entity_metric = perf_stats[0]
sample_infos = entity_metric.sampleInfo
if len(sample_infos) > 0:
for metric_series in entity_metric.value:
# Take the average of all samples to improve the accuracy
# of the stat value
stat_value = float(sum(metric_series.value)) / samples_cnt
device_id = metric_series.id.instance
stat_values[device_id] = stat_value
return stat_values
| apache-2.0 | -2,132,534,185,499,780,900 | 43.308696 | 79 | 0.631538 | false | 4.185216 | false | false | false |
YulongWu/my-utils | yidian/WYLDocFeatureDumpFetcher.py | 1 | 6233 | #coding: u8
import sys
reload(sys)
sys.setdefaultencoding('u8')
import urllib
import urllib2
import json
import traceback
import datetime
import re
# call format:
class WYLDocFeatureDumpFetcher(object):
serve_url = "http://10.111.0.54:8025/service/feature?docid={0}" #url for doc feature dump
serve_url = "http://10.111.0.54:8025/service/featuredump?docid={0}" #url for doc feature dump
cfb_cols = ['VClickDoc', 'VShareDoc', 'VViewComment', 'VAddComment', 'VLike', 'VDislike', 'VDWell', 'VDWellShortClick', 'VDWellClickDoc', 'ThumbUp', 'ThumbDown', 'RViewDoc']
docInfoBuffer = {}
def _getInfo(self, docid):
if docid in self.docInfoBuffer:
info = self.docInfoBuffer[docid]
else:
try:
req_url = self.serve_url.format(docid)
info = json.loads(urllib2.urlopen(req_url).read())
self.docInfoBuffer[docid] = info
except Exception, e:
print >> sys.stderr, "Error occured for docid: " + docid
print >> sys.stderr, traceback.format_exc()
return None
return info
def _getMetric(self, docid, f):
info = self._getInfo(docid)
if not info:
return -1
else:
clkbt = f(info)
return clkbt
def getDate(self, docid):
date_diff = self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^date'])
date = datetime.datetime.now() - datetime.timedelta(milliseconds = float(date_diff)*100000)
return date.strftime("%Y-%m-%d %H:%M:%S")
def getCFBFromDict(self, cfb_dict):
res_map = {}
for col in self.cfb_cols:
res_map[col] = -1 if col not in cfb_dict else cfb_dict[col]
return res_map
def getAllCFB(self, docid, prefix='all'):
res_map = {}
t = self._getMetric(docid, lambda info:info['result']['clickfeedbacks'][prefix])
if t and t != -1:
for col in self.cfb_cols:
res_map[col] = self._getMetric(docid, lambda info:info['result']['clickfeedbacks'][prefix][col])
return res_map
def _getSegmentCFBs(self, cfb_dict, cur_key, res_dict):
if not cfb_dict:
return
for key in cfb_dict.keys():
if key == 'stats':
res_dict[cur_key] = getCFBFromDict(self, cfb_dict[key])
elif key != 'all':
self._getSegmentCFBs(self, cfb_dict[key], cur_key + '_' + key, res_dict)
return
def getCFBSegments(self, docid):
cfb_all = self._getMetric(docid, lambda info:info['result']['clickfeedbacks'])
res_dict = {}
self._getSegmentCFBs(cfb_all, '', res_dict)
return res_dict
# 做为getBestCFB()的第三个参数传入
def bestCFB_getter(cfb_dict, numerator_key, denominator_key, denominator_bar):
if numerator_key not in cfb_dict or denominator_key not in cfb_dict:
return -1
denominator = cfb_dict[demoninator_key]
if denominator == -1 or denominator < denominator_bar:
return -1
numerator = cfb_dict[numerator_key]
return 1.0*numerator/denominator
def getBestCFB(self, docid, n_key, d_key, d_bar):
res_dict = self.getCFBSegments(docid)
best_key, best_value = '', 0
for key in res_dict.keys():
v = self.bestCFB_getter(res_dict[key], n_key, d_key, d_bar)
if v > best_value:
best_value = v
best_key = key
return best_key, best_value
def getClkbtScore(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^clkbt'])
def getLCRScore(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^plcr'])
def getSCRScore(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^pscr'])
def _fetchDocumentData(self, docid, pattern):
doc_s = self._getMetric(docid, lambda info:info['result']['documentData'])
if doc_s == -1:
return None
match = re.search(pattern, doc_s)
if match:
return match.group(1)
else:
return None
def getDemandType(self, docid):
return self._fetchDocumentData(docid, 'demandType=(\w+?),')
def getCategories(self, docid):
return self._fetchDocumentData(docid, ' cat=\[(.+?)\]')
def fetchDictValue(self,d, keys):
v = None
t = d
for key in keys:
if d and key in d:
t = t[key]
else:
return None
return t
def getVClickDoc(self, docid):
# return self._getMetric(docid, lambda info:info['result']['clickfeedbacks']['all']['stats']['VClickDoc'])
info = self._getInfo(docid)
res = self.fetchDictValue(info, ['result', 'clickfeedbacks', 'all', 'stats', 'VClickDoc'])
if not res:
return -1
return res
def getIndepthScore(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^indepth'])
def getBpctr(self, docid):
if self.serve_url.find('featuredump') != -1:
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^bpctr'])
else:
return self._fetchDocumentData(docid, ' sc_bpctr=([\d\.\-e]+?),')
def getTmsstScore(self, docid):
# for http://10.111.0.54:8025/service/featuredump?docid=
if self.serve_url.find('featuredump') != -1:
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^tmsst'])
# for http://10.111.0.54:8025/service/feature?docid=
else:
return self._fetchDocumentData(docid, ' tmsst=(\w+?),')
def getMHot(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^mhot'])
def getRnkc(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^rnkc'])
def getRnksc(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^rnksc'])
| mit | -2,834,678,351,225,799,000 | 36.654545 | 177 | 0.589087 | false | 3.411862 | false | false | false |
dplusic/daff | scripts/python23.py | 1 | 1683 | from __future__ import unicode_literals, print_function
try:
import builtins
except:
import __builtin__
builtins = __builtin__
import functools
if hasattr(builtins,'unicode'):
# python2 variant
hxunicode = builtins.unicode
hxunichr = builtins.unichr
hxrange = xrange
def hxnext(x):
return x.next()
if hasattr(functools,"cmp_to_key"):
hx_cmp_to_key = functools.cmp_to_key
else:
# stretch to support python2.6
def hx_cmp_to_key(mycmp):
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
else:
# python3 variant
hxunicode = str
hxrange = range
hxunichr = chr
def hxnext(x):
return x.__next__()
hx_cmp_to_key = functools.cmp_to_key
python_lib_Builtins = python_lib_Builtin = builtins
String = builtins.str
python_lib_Dict = builtins.dict
python_lib_Set = builtins.set
def get_stdout():
return (python_lib_Sys.stdout.buffer if hasattr(python_lib_Sys.stdout,"buffer") else python_lib_Sys.stdout)
| mit | 7,958,329,564,262,106,000 | 31.365385 | 111 | 0.54902 | false | 3.707048 | false | false | false |
p4r4digm/todo-helper | src/todoLogging.py | 1 | 1872 | from src.todoMelvin import settings
from datetime import datetime
from subprocess import check_output
logSender = None
class WarningLevels:
Debug = {'level' : 0, 'tag' : 'DEBUG'}
Info = {'level' : 1, 'tag' : 'INFO'}
Warn = {'level' : 2, 'tag' : 'WARNING'}
Fatal = {'level' : 3, 'tag' : 'FATAL'}
def callWithLogging(callData):
dateTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
messageTag = "%s [%s] [CALL]"%(dateTime, logSender)
try:
with open(settings.logFile, "a") as myfile:
msg = "%s %s"%(messageTag, (' ').join(callData))
myfile.write(msg + "\n")
if settings.logPrintCalls.lower() == 'true':
print msg
output = check_output(callData)
for line in output.split('\n'):
if len(line) > 0:
msg = "%s %s"%(messageTag, line)
myfile.write(msg+ "\n")
if settings.logPrintCalls.lower() == 'true':
print msg
myfile.close()
except:
print "Unable to open logfile for subprocess call \'%s\'"%(' '.join(callData))
return
def log(warningLevel, message):
dateTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
finalMessage = "%s [%s] [%s] %s"%(dateTime, logSender, warningLevel['tag'], message)
if int(settings.logStdoutWLevel) <= warningLevel['level']:
print finalMessage
if int(settings.logFileWLevel) <= warningLevel['level']:
try:
with open(settings.logFile, "a") as myfile:
myfile.write(finalMessage + "\n")
myfile.close()
except:
print "Unable to open logfile."
return
| mit | -1,290,323,906,406,167,300 | 29.688525 | 88 | 0.498932 | false | 3.957717 | false | false | false |
jiadaizhao/LeetCode | 0001-0100/0005-Longest Palindromic Substring/0005-Longest Palindromic Substring.py | 1 | 1407 | class Solution:
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
n = len(s)
maxLen, maxStart = 0, 0
for i in range(n):
l, left, right = 1, i - 1, i + 1
while left >= 0 and right < n and s[left] == s[right]:
left -= 1
right += 1
l += 2
if l > maxLen:
maxLen = l
maxStart = left + 1
l, left, right = 0, i, i + 1
while left >= 0 and right < n and s[left] == s[right]:
left -= 1
right += 1
l += 2
if l > maxLen:
maxLen = l
maxStart = left + 1
return s[maxStart:maxStart + maxLen]
# O(n)
class Solution2:
def longestPalindrome(self, s: str) -> str:
T = '#'.join('^{}$'.format(s))
n = len(T)
P = [0] * n
C = R = 0
for i in range(1, n - 1):
if R > i:
P[i] = min(R - i, P[2*C - i])
while T[i + 1 + P[i]] == T[i - 1 - P[i]]:
P[i] += 1
if i + P[i] > R:
C, R = i, i + P[i]
maxLen, ci = max((l, i) for i, l in enumerate(P))
return s[(ci - maxLen)//2 : (ci + maxLen)//2]
| mit | -528,725,900,913,107,800 | 26.588235 | 66 | 0.340441 | false | 3.580153 | false | false | false |
aio-libs/aiohttp | aiohttp/web_server.py | 1 | 2247 | """Low level HTTP server."""
import asyncio
import warnings
from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa
from .abc import AbstractStreamWriter
from .http_parser import RawRequestMessage
from .streams import StreamReader
from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler
from .web_request import BaseRequest
__all__ = ("Server",)
class Server:
def __init__(
self,
handler: _RequestHandler,
*,
request_factory: Optional[_RequestFactory] = None,
debug: Optional[bool] = None,
**kwargs: Any,
) -> None:
if debug is not None:
warnings.warn(
"debug argument is no-op since 4.0 " "and scheduled for removal in 5.0",
DeprecationWarning,
stacklevel=2,
)
self._loop = asyncio.get_running_loop()
self._connections = {} # type: Dict[RequestHandler, asyncio.Transport]
self._kwargs = kwargs
self.requests_count = 0
self.request_handler = handler
self.request_factory = request_factory or self._make_request
@property
def connections(self) -> List[RequestHandler]:
return list(self._connections.keys())
def connection_made(
self, handler: RequestHandler, transport: asyncio.Transport
) -> None:
self._connections[handler] = transport
def connection_lost(
self, handler: RequestHandler, exc: Optional[BaseException] = None
) -> None:
if handler in self._connections:
del self._connections[handler]
def _make_request(
self,
message: RawRequestMessage,
payload: StreamReader,
protocol: RequestHandler,
writer: AbstractStreamWriter,
task: "asyncio.Task[None]",
) -> BaseRequest:
return BaseRequest(message, payload, protocol, writer, task, self._loop)
async def shutdown(self, timeout: Optional[float] = None) -> None:
coros = [conn.shutdown(timeout) for conn in self._connections]
await asyncio.gather(*coros)
self._connections.clear()
def __call__(self) -> RequestHandler:
return RequestHandler(self, loop=self._loop, **self._kwargs)
| apache-2.0 | -5,668,502,912,688,113,000 | 32.044118 | 88 | 0.634179 | false | 4.39726 | false | false | false |
Lukas-Stuehrk/selenese | selenese/patterns.py | 1 | 1351 | import re
from fnmatch import fnmatch
class Pattern(object):
def __init__(self, pattern_string):
self.pattern_string = pattern_string
class ExactPattern(Pattern):
def compare(self, string):
return self.pattern_string == string
class RegexPattern(Pattern):
def compare(self, string):
if not hasattr(self, '_regex'):
self._regex = re.compile(self.pattern_string)
return self._regex.sub('', string) == ''
class RegexIgnorecasePattern(Pattern):
def compare(self, string):
if not hasattr(self, '_regex'):
self._regex = re.compile(self.pattern_string, flags=re.IGNORECASE)
return self._regex.sub('', string) == ''
class GlobPattern(Pattern):
def compare(self, string):
return fnmatch(string, self.pattern_string)
def create_pattern(pattern_string):
if pattern_string.startswith('exact:'):
return ExactPattern(pattern_string[6:])
elif pattern_string.startswith('glob:'):
return GlobPattern(pattern_string[5:])
elif pattern_string.startswith('regexp:'):
return RegexPattern(pattern_string[7:])
elif pattern_string.startswith('regexpi:'):
return RegexIgnorecasePattern(pattern_string[8:])
# if no pattern scheme is given, asssume that it is a 'glob' pattern
return GlobPattern(pattern_string) | bsd-3-clause | 8,510,309,876,751,397,000 | 29.727273 | 78 | 0.670614 | false | 4.057057 | false | false | false |
shear/rppy | test_ruger_hti.py | 2 | 2682 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 17:24:04 2015
@author: Sean
"""
import rppy
import numpy as np
import matplotlib.pyplot as plt
p1 = 2000
vp1 = 3000
vs1 = 1500
e1 = 0.0
d1 = 0.0
y1 = 0.0
p2 = 2200
vp2 = 4000
vs2 = 2000
y2 = 0.1
d2 = 0.1
e2 = 0.1
theta = 30
phi = np.arange(0, 90, 1)
phit = np.array([1.2500, 4.9342, 8.6184, 11.842, 15.526, 19.211, 22.664,
25.888, 28.421, 30.724, 34.638, 38.092, 41.546, 45.461,
49.375, 53.289, 56.974, 60.888, 65.493, 69.408, 73.783,
79.079, 84.375, 89.211])
exp = np.array([0.19816, 0.19816, 0.19678, 0.19539, 0.19263, 0.19056,
0.18711, 0.18365, 0.18020, 0.17813, 0.17329, 0.16845,
0.16431, 0.15878, 0.15326, 0.14842, 0.14359, 0.13875,
0.13391, 0.12977, 0.12632, 0.12286, 0.12079, 0.12010])
Rpp = np.zeros(np.shape(phi))
Rpo = np.zeros(np.shape(phi))
Rpk = np.zeros(np.shape(phi))
for ind, phiv in enumerate(phi):
Rpp[ind] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y2,
theta, phiv)
Rpo[ind] = rppy.reflectivity.exact_ortho(rppy.reflectivity.Cij(vp1, vs1, p1, 0, 0, 0, e1, d1, y1, 0), p1,
rppy.reflectivity.Cij(vp2, vs2, p2, 0, 0, 0, e2, d2, y2, 0), p2,
0, 0, phiv, theta)
Rpk[ind] = rppy.reflectivity.vavrycuk_psencik_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y1,
phiv, theta)
plt.figure(1)
plt.plot(phi, Rpp, phi, Rpo, phi, Rpk)
plt.show()
theta = np.arange(0, 60, 1)
phi = 45
Rpp = np.zeros(np.shape(theta))
Rpo = np.zeros(np.shape(theta))
Rpk = np.zeros(np.shape(theta))
Rpa = np.zeros(np.shape(theta))
for ind, thetav in enumerate(theta):
Rpp[ind] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y1,
thetav, phi)
Rpk[ind] = rppy.reflectivity.vavrycuk_psencik_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y1,
phi, thetav)
Rpo = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, theta)
Rpa = rppy.reflectivity.aki_richards(vp1, vs1, p1, vp2, vs2, p2, theta)
plt.figure(2)
plt.plot(theta, Rpp, theta, Rpo, theta, Rpk, theta, Rpa)
plt.xlim([0, 60])
plt.ylim([0.125, 0.275])
plt.legend(['Ruger', 'Zoe', 'Vavrycuk', 'A-R'])
plt.show()
| bsd-2-clause | -2,847,318,867,181,452,000 | 32.111111 | 109 | 0.503356 | false | 2.435967 | false | false | false |
vjFaLk/frappe | frappe/core/doctype/communication/email.py | 1 | 19348 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
from six.moves import range
from six import string_types
import frappe
import json
from email.utils import formataddr
from frappe.core.utils import get_parent_doc
from frappe.utils import (get_url, get_formatted_email, cint,
validate_email_add, split_emails, time_diff_in_seconds, parse_addr, get_datetime)
from frappe.utils.file_manager import get_file, add_attachments
from frappe.email.queue import check_email_limit
from frappe.utils.scheduler import log
from frappe.email.email_body import get_message_id
import frappe.email.smtp
import time
from frappe import _
from frappe.utils.background_jobs import enqueue
# imports - third-party imports
import pymysql
from pymysql.constants import ER
@frappe.whitelist()
def make(doctype=None, name=None, content=None, subject=None, sent_or_received = "Sent",
sender=None, sender_full_name=None, recipients=None, communication_medium="Email", send_email=False,
print_html=None, print_format=None, attachments='[]', send_me_a_copy=False, cc=None, bcc=None,
flags=None, read_receipt=None, print_letterhead=True):
"""Make a new communication.
:param doctype: Reference DocType.
:param name: Reference Document name.
:param content: Communication body.
:param subject: Communication subject.
:param sent_or_received: Sent or Received (default **Sent**).
:param sender: Communcation sender (default current user).
:param recipients: Communication recipients as list.
:param communication_medium: Medium of communication (default **Email**).
:param send_mail: Send via email (default **False**).
:param print_html: HTML Print format to be sent as attachment.
:param print_format: Print Format name of parent document to be sent as attachment.
:param attachments: List of attachments as list of files or JSON string.
:param send_me_a_copy: Send a copy to the sender (default **False**).
"""
is_error_report = (doctype=="User" and name==frappe.session.user and subject=="Error Report")
send_me_a_copy = cint(send_me_a_copy)
if doctype and name and not is_error_report and not frappe.has_permission(doctype, "email", name) and not (flags or {}).get('ignore_doctype_permissions'):
raise frappe.PermissionError("You are not allowed to send emails related to: {doctype} {name}".format(
doctype=doctype, name=name))
if not sender:
sender = get_formatted_email(frappe.session.user)
comm = frappe.get_doc({
"doctype":"Communication",
"subject": subject,
"content": content,
"sender": sender,
"sender_full_name":sender_full_name,
"recipients": recipients,
"cc": cc or None,
"bcc": bcc or None,
"communication_medium": communication_medium,
"sent_or_received": sent_or_received,
"reference_doctype": doctype,
"reference_name": name,
"message_id":get_message_id().strip(" <>"),
"read_receipt":read_receipt,
"has_attachment": 1 if attachments else 0
})
comm.insert(ignore_permissions=True)
if not doctype:
# if no reference given, then send it against the communication
comm.db_set(dict(reference_doctype='Communication', reference_name=comm.name))
if isinstance(attachments, string_types):
attachments = json.loads(attachments)
# if not committed, delayed task doesn't find the communication
if attachments:
add_attachments("Communication", comm.name, attachments)
frappe.db.commit()
if cint(send_email):
frappe.flags.print_letterhead = cint(print_letterhead)
comm.send(print_html, print_format, attachments, send_me_a_copy=send_me_a_copy)
return {
"name": comm.name,
"emails_not_sent_to": ", ".join(comm.emails_not_sent_to) if hasattr(comm, "emails_not_sent_to") else None
}
def validate_email(doc):
"""Validate Email Addresses of Recipients and CC"""
if not (doc.communication_type=="Communication" and doc.communication_medium == "Email") or doc.flags.in_receive:
return
# validate recipients
for email in split_emails(doc.recipients):
validate_email_add(email, throw=True)
# validate CC
for email in split_emails(doc.cc):
validate_email_add(email, throw=True)
for email in split_emails(doc.bcc):
validate_email_add(email, throw=True)
# validate sender
def notify(doc, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, bcc=None, fetched_from_email_account=False):
"""Calls a delayed task 'sendmail' that enqueus email in Email Queue queue
:param print_html: Send given value as HTML attachment
:param print_format: Attach print format of parent document
:param attachments: A list of filenames that should be attached when sending this email
:param recipients: Email recipients
:param cc: Send email as CC to
:param bcc: Send email as BCC to
:param fetched_from_email_account: True when pulling email, the notification shouldn't go to the main recipient
"""
recipients, cc, bcc = get_recipients_cc_and_bcc(doc, recipients, cc, bcc,
fetched_from_email_account=fetched_from_email_account)
if not recipients and not cc:
return
doc.emails_not_sent_to = set(doc.all_email_addresses) - set(doc.sent_email_addresses)
if frappe.flags.in_test:
# for test cases, run synchronously
doc._notify(print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc, bcc=None)
else:
check_email_limit(list(set(doc.sent_email_addresses)))
enqueue(sendmail, queue="default", timeout=300, event="sendmail",
communication_name=doc.name,
print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc, bcc=bcc, lang=frappe.local.lang,
session=frappe.local.session, print_letterhead=frappe.flags.print_letterhead)
def _notify(doc, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, bcc=None):
prepare_to_notify(doc, print_html, print_format, attachments)
if doc.outgoing_email_account.send_unsubscribe_message:
unsubscribe_message = _("Leave this conversation")
else:
unsubscribe_message = ""
frappe.sendmail(
recipients=(recipients or []),
cc=(cc or []),
bcc=(bcc or []),
expose_recipients="header",
sender=doc.sender,
reply_to=doc.incoming_email_account,
subject=doc.subject,
content=doc.content,
reference_doctype=doc.reference_doctype,
reference_name=doc.reference_name,
attachments=doc.attachments,
message_id=doc.message_id,
unsubscribe_message=unsubscribe_message,
delayed=True,
communication=doc.name,
read_receipt=doc.read_receipt,
is_notification=True if doc.sent_or_received =="Received" else False,
print_letterhead=frappe.flags.print_letterhead
)
def update_parent_mins_to_first_response(doc):
"""Update mins_to_first_communication of parent document based on who is replying."""
parent = get_parent_doc(doc)
if not parent:
return
# update parent mins_to_first_communication only if we create the Email communication
# ignore in case of only Comment is added
if doc.communication_type == "Comment":
return
status_field = parent.meta.get_field("status")
if status_field:
options = (status_field.options or '').splitlines()
# if status has a "Replied" option, then update the status for received communication
if ('Replied' in options) and doc.sent_or_received=="Received":
parent.db_set("status", "Open")
else:
# update the modified date for document
parent.update_modified()
update_mins_to_first_communication(parent, doc)
parent.run_method('notify_communication', doc)
parent.notify_update()
def get_recipients_cc_and_bcc(doc, recipients, cc, bcc, fetched_from_email_account=False):
doc.all_email_addresses = []
doc.sent_email_addresses = []
doc.previous_email_sender = None
if not recipients:
recipients = get_recipients(doc, fetched_from_email_account=fetched_from_email_account)
if not cc:
cc = get_cc(doc, recipients, fetched_from_email_account=fetched_from_email_account)
if not bcc:
bcc = get_bcc(doc, recipients, fetched_from_email_account=fetched_from_email_account)
if fetched_from_email_account:
# email was already sent to the original recipient by the sender's email service
original_recipients, recipients = recipients, []
# send email to the sender of the previous email in the thread which this email is a reply to
#provides erratic results and can send external
#if doc.previous_email_sender:
# recipients.append(doc.previous_email_sender)
# cc that was received in the email
original_cc = split_emails(doc.cc)
# don't cc to people who already received the mail from sender's email service
cc = list(set(cc) - set(original_cc) - set(original_recipients))
remove_administrator_from_email_list(cc)
original_bcc = split_emails(doc.bcc)
bcc = list(set(bcc) - set(original_bcc) - set(original_recipients))
remove_administrator_from_email_list(bcc)
remove_administrator_from_email_list(recipients)
return recipients, cc, bcc
def remove_administrator_from_email_list(email_list):
if 'Administrator' in email_list:
email_list.remove('Administrator')
def prepare_to_notify(doc, print_html=None, print_format=None, attachments=None):
"""Prepare to make multipart MIME Email
:param print_html: Send given value as HTML attachment.
:param print_format: Attach print format of parent document."""
view_link = frappe.utils.cint(frappe.db.get_value("Print Settings", "Print Settings", "attach_view_link"))
if print_format and view_link:
doc.content += get_attach_link(doc, print_format)
set_incoming_outgoing_accounts(doc)
if not doc.sender:
doc.sender = doc.outgoing_email_account.email_id
if not doc.sender_full_name:
doc.sender_full_name = doc.outgoing_email_account.name or _("Notification")
if doc.sender:
# combine for sending to get the format 'Jane <[email protected]>'
doc.sender = formataddr([doc.sender_full_name, doc.sender])
doc.attachments = []
if print_html or print_format:
doc.attachments.append({"print_format_attachment":1, "doctype":doc.reference_doctype,
"name":doc.reference_name, "print_format":print_format, "html":print_html})
if attachments:
if isinstance(attachments, string_types):
attachments = json.loads(attachments)
for a in attachments:
if isinstance(a, string_types):
# is it a filename?
try:
# keep this for error handling
file = get_file(a)
# these attachments will be attached on-demand
# and won't be stored in the message
doc.attachments.append({"fid": a})
except IOError:
frappe.throw(_("Unable to find attachment {0}").format(a))
else:
doc.attachments.append(a)
def set_incoming_outgoing_accounts(doc):
doc.incoming_email_account = doc.outgoing_email_account = None
if not doc.incoming_email_account and doc.sender:
doc.incoming_email_account = frappe.db.get_value("Email Account",
{"email_id": doc.sender, "enable_incoming": 1}, "email_id")
if not doc.incoming_email_account and doc.reference_doctype:
doc.incoming_email_account = frappe.db.get_value("Email Account",
{"append_to": doc.reference_doctype, }, "email_id")
doc.outgoing_email_account = frappe.db.get_value("Email Account",
{"append_to": doc.reference_doctype, "enable_outgoing": 1},
["email_id", "always_use_account_email_id_as_sender", "name",
"always_use_account_name_as_sender_name"], as_dict=True)
if not doc.incoming_email_account:
doc.incoming_email_account = frappe.db.get_value("Email Account",
{"default_incoming": 1, "enable_incoming": 1}, "email_id")
if not doc.outgoing_email_account:
# if from address is not the default email account
doc.outgoing_email_account = frappe.db.get_value("Email Account",
{"email_id": doc.sender, "enable_outgoing": 1},
["email_id", "always_use_account_email_id_as_sender", "name",
"send_unsubscribe_message", "always_use_account_name_as_sender_name"], as_dict=True) or frappe._dict()
if not doc.outgoing_email_account:
doc.outgoing_email_account = frappe.db.get_value("Email Account",
{"default_outgoing": 1, "enable_outgoing": 1},
["email_id", "always_use_account_email_id_as_sender", "name",
"send_unsubscribe_message", "always_use_account_name_as_sender_name"],as_dict=True) or frappe._dict()
if doc.sent_or_received == "Sent":
doc.db_set("email_account", doc.outgoing_email_account.name)
def get_recipients(doc, fetched_from_email_account=False):
"""Build a list of email addresses for To"""
# [EDGE CASE] doc.recipients can be None when an email is sent as BCC
recipients = split_emails(doc.recipients)
#if fetched_from_email_account and doc.in_reply_to:
# add sender of previous reply
#doc.previous_email_sender = frappe.db.get_value("Communication", doc.in_reply_to, "sender")
#recipients.append(doc.previous_email_sender)
if recipients:
recipients = filter_email_list(doc, recipients, [])
return recipients
def get_cc(doc, recipients=None, fetched_from_email_account=False):
"""Build a list of email addresses for CC"""
# get a copy of CC list
cc = split_emails(doc.cc)
if doc.reference_doctype and doc.reference_name:
if fetched_from_email_account:
# if it is a fetched email, add follows to CC
cc.append(get_owner_email(doc))
cc += get_assignees(doc)
if getattr(doc, "send_me_a_copy", False) and doc.sender not in cc:
cc.append(doc.sender)
if cc:
# exclude unfollows, recipients and unsubscribes
exclude = [] #added to remove account check
exclude += [d[0] for d in frappe.db.get_all("User", ["email"], {"thread_notify": 0}, as_list=True)]
exclude += [(parse_addr(email)[1] or "").lower() for email in recipients]
if fetched_from_email_account:
# exclude sender when pulling email
exclude += [parse_addr(doc.sender)[1]]
if doc.reference_doctype and doc.reference_name:
exclude += [d[0] for d in frappe.db.get_all("Email Unsubscribe", ["email"],
{"reference_doctype": doc.reference_doctype, "reference_name": doc.reference_name}, as_list=True)]
cc = filter_email_list(doc, cc, exclude, is_cc=True)
return cc
def get_bcc(doc, recipients=None, fetched_from_email_account=False):
"""Build a list of email addresses for BCC"""
bcc = split_emails(doc.bcc)
if bcc:
exclude = []
exclude += [d[0] for d in frappe.db.get_all("User", ["email"], {"thread_notify": 0}, as_list=True)]
exclude += [(parse_addr(email)[1] or "").lower() for email in recipients]
if fetched_from_email_account:
# exclude sender when pulling email
exclude += [parse_addr(doc.sender)[1]]
if doc.reference_doctype and doc.reference_name:
exclude += [d[0] for d in frappe.db.get_all("Email Unsubscribe", ["email"],
{"reference_doctype": doc.reference_doctype, "reference_name": doc.reference_name}, as_list=True)]
bcc = filter_email_list(doc, bcc, exclude, is_bcc=True)
return bcc
def filter_email_list(doc, email_list, exclude, is_cc=False, is_bcc=False):
# temp variables
filtered = []
email_address_list = []
for email in list(set(email_list)):
email_address = (parse_addr(email)[1] or "").lower()
if not email_address:
continue
# this will be used to eventually find email addresses that aren't sent to
doc.all_email_addresses.append(email_address)
if (email in exclude) or (email_address in exclude):
continue
if is_cc:
is_user_enabled = frappe.db.get_value("User", email_address, "enabled")
if is_user_enabled==0:
# don't send to disabled users
continue
if is_bcc:
is_user_enabled = frappe.db.get_value("User", email_address, "enabled")
if is_user_enabled==0:
continue
# make sure of case-insensitive uniqueness of email address
if email_address not in email_address_list:
# append the full email i.e. "Human <[email protected]>"
filtered.append(email)
email_address_list.append(email_address)
doc.sent_email_addresses.extend(email_address_list)
return filtered
def get_owner_email(doc):
owner = get_parent_doc(doc).owner
return get_formatted_email(owner) or owner
def get_assignees(doc):
return [( get_formatted_email(d.owner) or d.owner ) for d in
frappe.db.get_all("ToDo", filters={
"reference_type": doc.reference_doctype,
"reference_name": doc.reference_name,
"status": "Open"
}, fields=["owner"])
]
def get_attach_link(doc, print_format):
"""Returns public link for the attachment via `templates/emails/print_link.html`."""
return frappe.get_template("templates/emails/print_link.html").render({
"url": get_url(),
"doctype": doc.reference_doctype,
"name": doc.reference_name,
"print_format": print_format,
"key": get_parent_doc(doc).get_signature()
})
def sendmail(communication_name, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, bcc=None, lang=None, session=None, print_letterhead=None):
try:
if lang:
frappe.local.lang = lang
if session:
# hack to enable access to private files in PDF
session['data'] = frappe._dict(session['data'])
frappe.local.session.update(session)
if print_letterhead:
frappe.flags.print_letterhead = print_letterhead
# upto 3 retries
for i in range(3):
try:
communication = frappe.get_doc("Communication", communication_name)
communication._notify(print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc, bcc=bcc)
except pymysql.InternalError as e:
# deadlock, try again
if e.args[0] == ER.LOCK_DEADLOCK:
frappe.db.rollback()
time.sleep(1)
continue
else:
raise
else:
break
except:
traceback = log("frappe.core.doctype.communication.email.sendmail", frappe.as_json({
"communication_name": communication_name,
"print_html": print_html,
"print_format": print_format,
"attachments": attachments,
"recipients": recipients,
"cc": cc,
"bcc": bcc,
"lang": lang
}))
frappe.logger(__name__).error(traceback)
raise
def update_mins_to_first_communication(parent, communication):
if parent.meta.has_field('mins_to_first_response') and not parent.get('mins_to_first_response'):
if frappe.db.get_all('User', filters={'email': communication.sender,
'user_type': 'System User', 'enabled': 1}, limit=1):
first_responded_on = communication.creation
if parent.meta.has_field('first_responded_on'):
parent.db_set('first_responded_on', first_responded_on)
parent.db_set('mins_to_first_response', round(time_diff_in_seconds(first_responded_on, parent.creation) / 60), 2)
@frappe.whitelist(allow_guest=True)
def mark_email_as_seen(name=None):
try:
if name and frappe.db.exists("Communication", name) and not frappe.db.get_value("Communication", name, "read_by_recipient"):
frappe.db.set_value("Communication", name, "read_by_recipient", 1)
frappe.db.set_value("Communication", name, "delivery_status", "Read")
frappe.db.set_value("Communication", name, "read_by_recipient_on", get_datetime())
frappe.db.commit()
except Exception:
frappe.log_error(frappe.get_traceback())
finally:
# Return image as response under all circumstances
from PIL import Image
import io
im = Image.new('RGBA', (1, 1))
im.putdata([(255,255,255,0)])
buffered_obj = io.BytesIO()
im.save(buffered_obj, format="PNG")
frappe.response["type"] = 'binary'
frappe.response["filename"] = "imaginary_pixel.png"
frappe.response["filecontent"] = buffered_obj.getvalue()
| mit | 5,878,466,721,758,545,000 | 34.896104 | 155 | 0.723641 | false | 3.208624 | false | false | false |
apanda/modeling | tests/num_dnode_test.py | 1 | 1303 | import z3
from z3 import is_true, is_false
from examples import *
import time
import mcnet.components as components
import random
import sys
"""Check time as increase in nodes"""
def ResetZ3 ():
z3._main_ctx = None
z3.main_ctx()
z3.set_param('auto_config', False)
z3.set_param('smt.mbqi', True)
z3.set_param('model.compact', True)
z3.set_param('smt.pull_nested_quantifiers', True)
z3.set_param('smt.mbqi.max_iterations', 10000)
z3.set_param('smt.random_seed', random.SystemRandom().randint(0, sys.maxint))
iters = 10
bad_in_row = 0
for sz in xrange(2, 200):
times = []
all_bad = True
for it in xrange(0, iters):
ResetZ3()
obj = NumDumbNodesTest (sz)
start = time.time()
# Set timeout to some largish number
obj.check.solver.set(timeout=10000000)
ret = obj.check.CheckIsolationProperty(obj.e_0, obj.e_1)
bad = False
if z3.sat != ret.result:
bad = True
stop = time.time()
if not bad:
times.append(stop - start)
all_bad = False
print "%d %s %s"%(sz, ' '.join(map(str, times)), "bad" if all_bad else "good")
if all_bad:
bad_in_row += 1
else:
bad_in_row = 0
assert bad_in_row <= 5, \
"Too many failures"
| bsd-3-clause | 4,604,562,846,646,764,000 | 29.302326 | 82 | 0.594014 | false | 3.117225 | false | false | false |
h2oloopan/easymerge | EasyMerge/tests/reddit/scripts/migrate/backfill/subreddit_images.py | 1 | 2325 |
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2013 reddit
# Inc. All Rights Reserved.
###############################################################################
import urllib2
from pylons import g
from r2.lib.db.operators import desc
from r2.lib.utils import fetch_things2
from r2.lib.media import upload_media
from r2.models.subreddit import Subreddit
from r2.models.wiki import WikiPage, ImagesByWikiPage
all_subreddits = Subreddit._query(sort=desc("_date"))
for sr in fetch_things2(all_subreddits):
images = sr.images.copy()
images.pop("/empties/", None)
if not images:
continue
print 'Processing /r/%s (id36: %s)' % (sr.name, sr._id36)
# upgrade old-style image ids to urls
for name, image_url in images.items():
if not isinstance(image_url, int):
continue
print " upgrading image %r" % image_url
url = "http://%s/%s_%d.png" % (g.s3_old_thumb_bucket,
sr._fullname, image_url)
image_data = urllib2.urlopen(url).read()
new_url = upload_media(image_data, file_type=".png")
images[name] = new_url
# use a timestamp of zero to make sure that we don't overwrite any changes
# from live dual-writes.
rowkey = WikiPage.id_for(sr, "config/stylesheet")
ImagesByWikiPage._cf.insert(rowkey, images, timestamp=0)
| mit | -8,692,393,015,163,003,000 | 37.75 | 79 | 0.68 | false | 3.805237 | false | false | false |
wyoder/pubs | ui/fields.py | 1 | 14935 | '''
Fields that are used in our UI
#.. todo: Make a field specifically for lists
'''
from pubs.ui import *
import pubs.ui.models as models
import pubs.pGraph as pGraph
import pubs.pNode as pNode
class BaseField(QtWidgets.QWidget):
def __init__(self, label, value = None, description = str(), parent = None, attribute = None):
super(BaseField, self).__init__(parent)
self.__label = QtWidgets.QLabel(label)
self.__label.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Fixed)
self.__value = value
#self.__description = self.setAccessibleDescription(description)
self.__attribute= attribute
self.setContentsMargins(0,2,0,2)
def label(self):
return self.__label
def attribute(self):
return self.__attribute
def labelText(self):
return self.__label.text()
def setLabel(self, value):
self.__label.setText(value)
def value(self):
return self.__value
def setValue(self,value):
self.__value = value
if self.__attribute:
self.__attribute.setValue(value)
def setDescription(self, value):
'''
Sets the description of the current field
@param value: String describing the field
@type value: *str* or *QString*
'''
#Check type
if not isinstance(value, basestring) and not isinstance(value, QtCore.QString):
raise TypeError('%s must be a string or QString' % value)
#set values
self.__description = value
self.setDescription(value)
class LineEditField(BaseField):
def __init__(self, *args, **kwargs):
super(LineEditField, self).__init__(*args,**kwargs)
self._layout = QtWidgets.QHBoxLayout()
self._lineEdit = QtWidgets.QLineEdit()
self._lineEdit.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Fixed)
#set text if any value
if self.value():
self.setText(self.value())
#self._lineEdit.setMaximumHeight(20)
self._lineEdit.setMinimumHeight(40)
self._lineEdit.setMinimumWidth(200)
self._lineEdit.textChanged.connect(self.setText)
self._layout.addWidget(self.label())
self._layout.addWidget(self._lineEdit)
self._layout.addStretch()
self.setLayout(self._layout)
def setText(self, value):
'''
Sets the text for the QLineEdit
'''
if not isinstance(value, basestring) and not isinstance(value, QtCore.QString):
raise TypeError('%s must be an string' % value)
#get the souce of the call for setText function
source = self.sender()
#set the value on field
self.setValue(str(value))
#set lineEdit text
if not source == self._lineEdit:
self._lineEdit.setText(value)
def getLineEdit(self):
return self._lineEdit
class DirBrowserField(LineEditField):
def __init__(self, *args, **kwargs):
super(DirBrowserField, self).__init__(*args, **kwargs)
self._dirBrowseButton = QtWidgets.QPushButton(QtGui.QIcon( os.path.join(os.path.dirname( __file__ ), 'icons/folder.png') ),'')
self._dirBrowseButton.clicked.connect(self._getDir)
self._layout.addWidget(self._dirBrowseButton)
self._layout.setContentsMargins(0,0,0,0)
def _getDir(self,index):
dir = QtWidgets.QFileDialog.getExistingDirectory(self, 'open', str(os.getcwd()))
self.setText(str(dir))
class FileBrowserField(LineEditField):
def __init__(self, mode = 'open', filter = "", *args, **kwargs):
super(FileBrowserField, self).__init__(*args, **kwargs)
self.__mode = mode.lower()
self.__filter = filter
self._fileBrowseButton = QtWidgets.QPushButton(QtGui.QIcon( os.path.join(os.path.dirname( __file__ ), 'icons/folder.png') ),'')
self._fileBrowseButton.clicked.connect(self._getFile)
self._layout.addWidget(self._fileBrowseButton)
self._layout.addStretch()
self._layout.setContentsMargins(0,0,0,0)
def _getFile(self,*args):
if self.__mode == 'save':
file = QtWidgets.QFileDialog.getSaveFileName(self, 'save', str(os.getcwd()), self.__filter)[0]
else:
file = QtWidgets.QFileDialog.getOpenFileName(self, 'open', str(os.getcwd()), self.__filter)[0]
if file:
self.setText(str(file))
class ListField(BaseField):
def __init__(self, *args, **kwargs):
super(ListField, self).__init__(*args, **kwargs)
self.listGraph = pGraph.PGraph('listGraph')
for value in self.value():
self.listGraph.addNode(value)
self._model = models.LayerGraphModel(self.listGraph)
self._layout = QtWidgets.QHBoxLayout()
self._listView = QtWidgets.QListView()
self._listView.setModel(self._model)
self._listView.setMaximumHeight(100)
#self._listView.setMaximumWidth(100)
self._layout.addWidget(self.label())
self._layout.addWidget(self._listView)
self.setLayout(self._layout)
self._layout.setContentsMargins(0,0,0,0)
self._layout.addStretch()
#CONTEXT MENU
self._listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.connect(self._listView, QtCore.SIGNAL("customContextMenuRequested(const QPoint &)"), self.showCustomContextMenu)
def showCustomContextMenu(self, pos):
'''
Show the context menu at the position of the curser
:param pos: The point where the curser is on the screen
:type pos: QtCore.QPoint
'''
index = self._listView.indexAt(pos)
if not index.isValid():
return
node = self._model.itemFromIndex(index)
#If node is disabled, return
if not node.isActive():
return
#construct menus
mainMenu = QtWidgets.QMenu(self)
#main menu actions
mainMenu.addSeparator()
addNodeAction = mainMenu.addAction('Add Item')
removeNodeAction = mainMenu.addAction('Remove Item')
QtCore.QObject.connect(addNodeAction, QtCore.SIGNAL('triggered()'), self.__addDialog)
QtCore.QObject.connect(removeNodeAction, QtCore.SIGNAL('triggered()'), self._removeSelectedNode)
mainMenu.popup(QtGui.QCursor.pos())
def _removeSelectedNode(self):
index = self._listView.currentIndex()
node = self._selectedNode()
#self._model.removeRows(index.row(), 1, self._model)
if node:
self._model.beginRemoveRows( index.parent(), index.row(), index.row()+1-1 )
self.listGraph.removeNode(node)
self._model.endRemoveRows()
del node
self.setValue(self.listGraph.nodeNames())
def _addNode(self,value):
if not isinstance(self.value(),list):
self.setValue([value])
else:
self.setValue(self.value().append(value))
self.listGraph.addNode(value)
self._model = models.LayerGraphModel(self.listGraph)
self._listView.setModel(self._model)
def __addDialog(self,*args):
dialog = QtWidgets.QDialog(self)
dialog.exec_()
def _selectedNode(self):
'''
Returns the selected node
'''
index = self._listView.currentIndex()
if not index.isValid():
return None
return self._model.itemFromIndex(index)
class TextEditField(BaseField):
def __init__(self, *args, **kwargs):
super(TextEditField, self).__init__(*args, **kwargs)
self._textEdit = QtWidgets.QPlainTextEdit(self.value())
self._layout = QtWidgets.QVBoxLayout()
self._layout.addWidget(self.label())
self._layout.addWidget(self._textEdit)
self._layout.addStretch()
self.setLayout(self._layout)
self._textEdit.textChanged.connect(self.setText)
self._layout.setContentsMargins(0,0,0,0)
def setText(self):
self.setValue(str(self._textEdit.toPlainText()))
class IntField(BaseField):
def __init__(self, label, value = 0, description = str(), parent = None, min = -100, max = 100, **kwargs):
super(IntField, self).__init__(label, value, description, parent, **kwargs)
self._layout = QtWidgets.QHBoxLayout()
self._intBox = QtWidgets.QSpinBox()
self._intBox.setRange(min,max)
self._layout.addWidget(self.label())
if value:
self._intBox.setValue(value)
self._intBox.valueChanged.connect(self.setValue)
self._layout.addWidget(self._intBox)
self._layout.addStretch()
self.setLayout(self._layout)
self._layout.setContentsMargins(0,0,0,0)
def setValue(self, value):
'''
Sets the text for the QLineEdit
'''
if not isinstance(value, int):
raise TypeError('%s must be an integer' % value)
#get the source of where the function is being called
source = self.sender()
#set field value
super(IntField, self).setValue(value)
#set spinBox value
if not source == self._intBox:
self._intBox.setValue(value)
def value(self):
value = self._intBox.value()
super(IntField, self).setValue(int(value))
return super(IntField,self).value()
class VectorField(BaseField):
def __init__(self, *args, **kwargs):
super(VectorField, self).__init__(*args,**kwargs)
#create layouts
self._layout = QtWidgets.QHBoxLayout()
self._valueLayout = QtWidgets.QVBoxLayout()
#create widgets
self._xField = LineEditField(label = 'X')
self._yField = LineEditField(label = 'Y')
self._zField = LineEditField(label = 'Z')
#set line edit widths
self._xField.getLineEdit().setMaximumWidth(55)
self._xField.getLineEdit().setMinimumWidth(55)
self._xField.getLineEdit().setMaximumHeight(20)
self._xField.getLineEdit().setMinimumHeight(20)
self._yField.getLineEdit().setMaximumWidth(55)
self._yField.getLineEdit().setMinimumWidth(55)
self._yField.getLineEdit().setMaximumHeight(20)
self._yField.getLineEdit().setMinimumHeight(20)
self._zField.getLineEdit().setMaximumWidth(55)
self._zField.getLineEdit().setMinimumWidth(55)
self._zField.getLineEdit().setMaximumHeight(20)
self._zField.getLineEdit().setMinimumHeight(20)
#set validators for line edits
self._xField.getLineEdit().setValidator(QtGui.QDoubleValidator())
self._yField.getLineEdit().setValidator(QtGui.QDoubleValidator())
self._zField.getLineEdit().setValidator(QtGui.QDoubleValidator())
#connect line edits to set value methods
self._xField.getLineEdit().editingFinished.connect(self._setValue)
self._yField.getLineEdit().editingFinished.connect(self._setValue)
self._zField.getLineEdit().editingFinished.connect(self._setValue)
#add widgets to the layout
self._valueLayout.addWidget(self._xField)
self._valueLayout.addWidget(self._yField)
self._valueLayout.addWidget(self._zField)
#self._valueLayout.addStretch()
self._layout.addWidget(self.label())
self._layout.addLayout(self._valueLayout)
self._valueLayout.setContentsMargins(0,0,0,0)
self._layout.setContentsMargins(0,0,0,0)
#set text if any value
if self.value():
if isinstance(self.value(), list) or isinstance(self.value(), tuple):
if len(self.value()) < 3 or len(self.value()) > 3:
raise TypeError('%s must be a list of 3 values' % self.value())
#set the values on the individual fields
self._xField.getLineEdit().setText('%.4f' % float(self.value()[0]))
self._yField.getLineEdit().setText('%.4f' % float(self.value()[1]))
self._zField.getLineEdit().setText('%.4f' % float(self.value()[2]))
else:
raise TypeError('%s must be a list of 3 values' % self.value())
else:
self.setValue(['%.4f' % float(0.0),'%.4f' % float(0.0),'%.4f' % float(0.0)])
self.setLayout(self._layout)
self._layout.addStretch()
def setValue(self, value):
self._xField.getLineEdit().setText('%.4f' % float(value[0]))
self._yField.getLineEdit().setText('%.4f' % float(value[1]))
self._zField.getLineEdit().setText('%.4f' % float(value[2]))
super(VectorField, self).setValue(*value)
def _setValue(self, *args):
sender = self.sender()
if sender == self._xField.getLineEdit():
value = self._xField.getLineEdit().text()
self._xField.getLineEdit().setText('%.4f' % float(value))
super(VectorField, self).setValue((float(value),self.value()[1],self.value()[2]))
if sender == self._yField.getLineEdit():
value = self._yField.getLineEdit().text()
self._yField.getLineEdit().setText('%.4f' % float(value))
super(VectorField, self).setValue((self.value()[0], float(value), self.value()[2]))
if sender == self._zField.getLineEdit():
value = self._zField.getLineEdit().text()
self._zField.getLineEdit().setText('%.4f' % float(value))
super(VectorField, self).setValue((self.value()[0],self.value()[1], float(value)))
class BooleanField(BaseField):
def __init__(self, *args, **kwargs):
super(BooleanField, self).__init__(*args, **kwargs)
self._layout = QtWidgets.QHBoxLayout()
self._checkBox = QtWidgets.QCheckBox()
self._checkBox.toggled.connect(self.setValue)
self._layout.addWidget(self.label())
#self._layout.addStretch()
self._layout.addWidget(self._checkBox)
self._layout.addStretch()
self.setValue(self.value())
self._layout.setContentsMargins(0,0,0,0)
self.setLayout(self._layout)
def setValue(self, value):
super(BooleanField, self).setValue(value)
self._checkBox.blockSignals(True)
if value:
self._checkBox.setCheckState(QtCore.Qt.Checked)
else:
self._checkBox.setCheckState(QtCore.Qt.Unchecked)
self._checkBox.blockSignals(False)
| gpl-3.0 | 696,080,950,196,650,100 | 37.196931 | 135 | 0.600737 | false | 4.061735 | false | false | false |
migonzalvar/threaded-launcher | watchdog.py | 1 | 1246 | #!/bin/env python3.4
from threading import Thread
import os
import subprocess
import time
def watchdog():
"""Launch all the scripts in a folder and wait until completion."""
scripts_processes = []
base_dir = os.path.join(os.path.dirname(__file__), 'modules')
# Launch scripts
for script in os.listdir(base_dir):
script = os.path.join(base_dir, script)
print('** Executing {}'.format(script))
process = subprocess.Popen(['{}'.format(script)], shell=True, stdout=subprocess.PIPE)
scripts_processes.append(process)
# Wait for script completion
while scripts_processes:
time.sleep(1)
for process in scripts_processes:
ret_code = process.poll()
if ret_code is not None:
scripts_processes.remove(process)
print('** {} finished with code {}'.format(process, ret_code))
print('** {} start output'.format(process))
print(process.stdout.read())
print('** {} end output'.format(process))
else:
print('** {} Still running'.format(process))
t = Thread(target=watchdog)
print('## Start watchdog')
t.start()
t.join()
print('## Finish watchdog')
| mit | 5,560,709,927,552,056,000 | 29.390244 | 93 | 0.599518 | false | 4.167224 | false | false | false |
sorki/faf | src/pyfaf/bugtrackers/bugzilla.py | 1 | 23495 | # Copyright (C) 2013 ABRT Team
# Copyright (C) 2013 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
import time
import datetime
import bugzilla
from pyfaf import queries
from pyfaf.common import FafError
from pyfaf.utils.decorators import retry
from pyfaf.utils.date import daterange
from pyfaf.storage import column_len
from pyfaf.storage.bugzilla import (BzBug,
BzUser,
BzBugCc,
BzComment,
BzAttachment,
BzBugHistory)
from pyfaf.bugtrackers import BugTracker
from xmlrpclib import Fault
__all__ = ["Bugzilla"]
class Bugzilla(BugTracker):
"""
Proxy over python-bugzilla library handling bug downloading,
creation and updates.
"""
name = "abstract_bugzilla"
report_backref_name = "bz_bugs"
def __init__(self):
"""
Load required configuration based on instance name.
"""
super(Bugzilla, self).__init__()
# load config for corresponding bugzilla (e.g. fedorabz.api_url,
# rhelbz.user, xyzbz.password)
self.load_config_to_self("api_url", "{0}.api_url".format(self.name))
self.load_config_to_self("web_url", "{0}.web_url".format(self.name))
self.load_config_to_self("new_bug_url", "{0}.new_bug_url"
.format(self.name))
self.load_config_to_self("user", "{0}.user".format(self.name))
self.load_config_to_self("password", "{0}.password".format(self.name))
self.connected = False
if not self.api_url:
self.log_error("No api_url specified for '{0}' bugzilla instance".
format(self.name))
return
# url has to be string not unicode due to pycurl
self.api_url = str(self.api_url)
def _connect(self):
if self.connected:
return
self.log_debug("Opening bugzilla connection for '{0}'"
.format(self.name))
self.bz = bugzilla.Bugzilla(url=str(self.api_url), cookiefile=None)
if self.user and self.password:
self.log_debug("Logging into bugzilla '{0}' as '{1}'"
.format(self.name, self.user))
self.bz.login(self.user, self.password)
self.connected = True
def download_bug_to_storage_no_retry(self, db, bug_id):
"""
Download and save single bug identified by `bug_id`.
"""
self.log_debug(u"Downloading bug #{0}".format(bug_id))
self._connect()
try:
bug = self.bz.getbug(bug_id)
except Fault as ex:
if int(ex.faultCode) == 102:
# Access denied to a private bug
raise FafError(ex.faultString)
else:
raise
return self._save_bug(db, bug)
@retry(3, delay=10, backoff=3, verbose=True)
def download_bug_to_storage(self, db, bug_id):
return self.download_bug_to_storage_no_retry(db, bug_id)
def list_bugs(self, from_date=datetime.date.today(),
to_date=datetime.date(2000, 1, 1),
step=7,
stop_after_empty_steps=10,
updated_first=False,
custom_fields=dict()):
"""
Fetch all bugs by creation or modification date
starting `from_date` until we are not able to find more
of them or `to_date` is hit.
Bugs are pulled in date ranges defined by `step`
not to hit bugzilla timeouts.
Number of empty queries required before we stop querying is
controlled by `stop_after_empty_steps`.
If `updated_first` is True, recently modified bugs
are queried first.
`custom_fields` dictionary can be used to create more specific
bugzilla queries.
"""
if not updated_first:
custom_fields.update(dict(chfield="[Bug creation]"))
empty = 0
over_days = list(daterange(from_date, to_date, step, desc=True))
prev = over_days[0]
for current in over_days[1:]:
limit = 100
offset = 0
fetched_per_date_range = 0
while True:
try:
result = self._query_bugs(
prev, current, limit, offset, custom_fields)
except Exception as e:
self.log_error("Exception after multiple attempts: {0}."
" Ignoring".format(e.message))
continue
count = len(result)
fetched_per_date_range += count
self.log_debug("Got {0} bugs".format(count))
for bug in result:
yield bug.bug_id
if not count:
self.log_debug("No more bugs in this date range")
break
offset += limit
if not fetched_per_date_range:
empty += 1
if empty >= stop_after_empty_steps:
break
else:
empty = 0
prev = current - datetime.timedelta(1)
@retry(3, delay=10, backoff=3, verbose=True)
def _query_bugs(self, to_date, from_date,
limit=100, offset=0, custom_fields=dict()):
"""
Perform bugzilla query for bugs since `from_date` to `to_date`.
Use `custom_fields` to perform additional filtering.
"""
target = "bugs modified"
if "chfield" in custom_fields:
target = "bugs created"
self.log_debug("Fetching {0} between "
"{1} and {2}, offset is: {3}".format(target, from_date,
to_date, offset))
que = dict(
chfieldto=to_date.strftime("%Y-%m-%d"),
chfieldfrom=from_date.strftime("%Y-%m-%d"),
query_format="advanced",
limit=limit,
offset=offset,
)
que.update(custom_fields)
self._connect()
return self.bz.query(que)
def _convert_datetime(self, bz_datetime):
"""
Convert `bz_datetime` returned by python-bugzilla
to standard datetime.
"""
return datetime.datetime.fromtimestamp(
time.mktime(bz_datetime.timetuple()))
def _preprocess_bug(self, bug):
"""
Process the bug instance and return
dictionary with fields required by lower logic.
Returns `None` if there are missing fields.
"""
required_fields = [
"bug_id",
"creation_time",
"last_change_time",
"product",
"version",
"component",
"summary",
"status",
"resolution",
"cc",
"status_whiteboard",
"reporter",
"comments",
"attachments",
]
bug_dict = dict()
for field in required_fields:
if not hasattr(bug, field):
self.log_error("Missing bug field {0}".format(field))
return None
bug_dict[field] = getattr(bug, field)
for field in ["creation_time", "last_change_time"]:
bug_dict[field] = self._convert_datetime(bug_dict[field])
history = bug.get_history()
bug_dict["history"] = history["bugs"][0]["history"]
if bug.resolution == "DUPLICATE":
bug_dict["dupe_id"] = bug.dupe_id
return bug_dict
def _save_bug(self, db, bug):
"""
Save bug represented by `bug_dict` to the database.
If bug is marked as duplicate, the duplicate bug is downloaded
as well.
"""
bug_dict = self._preprocess_bug(bug)
if not bug_dict:
self.log_error("Bug pre-processing failed")
return
self.log_debug("Saving bug #{0}: {1}".format(bug_dict["bug_id"],
bug_dict["summary"]))
bug_id = bug_dict["bug_id"]
# check if we already have this bug up-to-date
old_bug = (
db.session.query(BzBug)
.filter(BzBug.id == bug_id)
.filter(BzBug.last_change_time == bug_dict["last_change_time"])
.first())
if old_bug:
self.log_info("Bug already up-to-date")
return old_bug
tracker = queries.get_bugtracker_by_name(db, self.name)
if not tracker:
self.log_error("Tracker with name '{0}' is not installed"
.format(self.name))
return
opsysrelease = queries.get_osrelease(db, bug_dict["product"],
bug_dict["version"])
if not opsysrelease:
self.log_error("Unable to save this bug due to unknown "
"release '{0} {1}'".format(bug_dict["product"],
bug_dict["version"]))
return
relcomponent = queries.get_component_by_name_release(
db, opsysrelease, bug_dict["component"])
if not relcomponent:
self.log_error("Unable to save this bug due to unknown "
"component '{0}'".format(bug_dict["component"]))
return
component = relcomponent.component
reporter = queries.get_bz_user(db, bug_dict["reporter"])
if not reporter:
self.log_debug("Creator {0} not found".format(
bug_dict["reporter"]))
downloaded = self._download_user(bug_dict["reporter"])
if not downloaded:
self.log_error("Unable to download user, skipping.")
return
reporter = self._save_user(db, downloaded)
new_bug = BzBug()
new_bug.id = bug_dict["bug_id"]
new_bug.summary = bug_dict["summary"]
new_bug.status = bug_dict["status"]
new_bug.creation_time = bug_dict["creation_time"]
new_bug.last_change_time = bug_dict["last_change_time"]
if bug_dict["status"] == "CLOSED":
new_bug.resolution = bug_dict["resolution"]
if bug_dict["resolution"] == "DUPLICATE":
if not queries.get_bz_bug(db, bug_dict["dupe_id"]):
self.log_debug("Duplicate #{0} not found".format(
bug_dict["dupe_id"]))
dup = self.download_bug_to_storage(db, bug_dict["dupe_id"])
if dup:
new_bug.duplicate = dup.id
new_bug.tracker_id = tracker.id
new_bug.component_id = component.id
new_bug.opsysrelease_id = opsysrelease.id
new_bug.creator_id = reporter.id
new_bug.whiteboard = bug_dict["status_whiteboard"]
# the bug itself might be downloaded during duplicate processing
# exit in this case - it would cause duplicate database entry
if queries.get_bz_bug(db, bug_dict["bug_id"]):
self.log_debug("Bug #{0} already exists in storage,"
" updating".format(bug_dict["bug_id"]))
bugdict = {}
for col in new_bug.__table__._columns:
bugdict[col.name] = getattr(new_bug, col.name)
(db.session.query(BzBug)
.filter(BzBug.id == bug_id).update(bugdict))
new_bug = queries.get_bz_bug(db, bug_dict["bug_id"])
else:
db.session.add(new_bug)
db.session.flush()
self._save_ccs(db, bug_dict["cc"], new_bug.id)
self._save_history(db, bug_dict["history"], new_bug.id)
self._save_attachments(db, bug_dict["attachments"], new_bug.id)
self._save_comments(db, bug_dict["comments"], new_bug.id)
return new_bug
def _save_ccs(self, db, ccs, new_bug_id):
"""
Save CC"ed users to the database.
Expects list of emails (`ccs`) and ID of the bug as `new_bug_id`.
"""
total = len(ccs)
for num, user_email in enumerate(ccs):
self.log_debug("Processing CC: {0}/{1}".format(num + 1, total))
cc = (
db.session.query(BzBugCc)
.join(BzUser)
.filter((BzUser.email == user_email) &
(BzBugCc.bug_id == new_bug_id)).first())
if cc:
self.log_debug("CC'ed user {0} already"
" exists".format(user_email))
continue
cced = queries.get_bz_user(db, user_email)
if not cced:
self.log_debug("CC'ed user {0} not found,"
" adding.".format(user_email))
downloaded = self._download_user(user_email)
if not downloaded:
self.log_error("Unable to download user, skipping.")
continue
cced = self._save_user(db, downloaded)
new = BzBugCc()
new.bug_id = new_bug_id
new.user = cced
db.session.add(new)
db.session.flush()
def _save_history(self, db, events, new_bug_id):
"""
Save bug history to the database.
Expects list of `events` and ID of the bug as `new_bug_id`.
"""
total = len(events)
for num, event in enumerate(events):
self.log_debug("Processing history event {0}/{1}".format(num + 1,
total))
user_email = event["who"]
user = queries.get_bz_user(db, user_email)
if not user:
self.log_debug("History changed by unknown user #{0}".format(
user_email))
downloaded = self._download_user(user_email)
if not downloaded:
self.log_error("Unable to download user, skipping.")
continue
user = self._save_user(db, downloaded)
for change in event["changes"]:
chtime = self._convert_datetime(event["when"])
ch = (
db.session.query(BzBugHistory)
.filter((BzBugHistory.user == user) &
(BzBugHistory.time == chtime) &
(BzBugHistory.field == change["field_name"]) &
(BzBugHistory.added == change["added"]) &
(BzBugHistory.removed == change["removed"]))
.first())
if ch:
self.log_debug("Skipping existing history event "
"#{0}".format(ch.id))
continue
new = BzBugHistory()
new.bug_id = new_bug_id
new.user = user
new.time = chtime
new.field = change["field_name"]
new.added = change["added"][:column_len(BzBugHistory, "added")]
new.removed = change["removed"][:column_len(BzBugHistory, "removed")]
db.session.add(new)
db.session.flush()
def _save_attachments(self, db, attachments, new_bug_id):
"""
Save bug attachments to the database.
Expects list of `attachments` and ID of the bug as `new_bug_id`.
"""
total = len(attachments)
for num, attachment in enumerate(attachments):
self.log_debug("Processing attachment {0}/{1}".format(num + 1,
total))
if queries.get_bz_attachment(db, attachment["id"]):
self.log_debug("Skipping existing attachment #{0}".format(
attachment["id"]))
continue
user_email = attachment["attacher"]
user = queries.get_bz_user(db, user_email)
if not user:
self.log_debug("Attachment from unknown user {0}".format(
user_email))
downloaded = self._download_user(user_email)
if not downloaded:
self.log_error("Unable to download user, skipping.")
continue
user = self._save_user(db, downloaded)
new = BzAttachment()
new.id = attachment["id"]
new.bug_id = new_bug_id
new.mimetype = attachment["content_type"]
new.description = attachment["description"]
new.filename = attachment["file_name"]
new.is_private = bool(attachment["is_private"])
new.is_patch = bool(attachment["is_patch"])
new.is_obsolete = bool(attachment["is_obsolete"])
new.creation_time = self._convert_datetime(
attachment["creation_time"])
new.last_change_time = self._convert_datetime(
attachment["last_change_time"])
new.user = user
db.session.add(new)
self._connect()
data = self.bz.openattachment(attachment["id"])
# save_lob is inherited method which cannot be seen by pylint
# because of sqlalchemy magic
# pylint: disable=E1101
new.save_lob("content", data, truncate=True, overwrite=True)
data.close()
db.session.flush()
def _save_comments(self, db, comments, new_bug_id):
"""
Save bug comments to the database.
Expects list of `comments` and ID of the bug as `new_bug_id`.
"""
total = len(comments)
for num, comment in enumerate(comments):
self.log_debug("Processing comment {0}/{1}".format(num + 1,
total))
if queries.get_bz_comment(db, comment["id"]):
self.log_debug("Skipping existing comment #{0}".format(
comment["id"]))
continue
self.log_debug("Downloading comment #{0}".format(comment["id"]))
user_email = comment["creator"]
user = queries.get_bz_user(db, user_email)
if not user:
self.log_debug("History changed by unknown user #{0}".format(
user_email))
downloaded = self._download_user(user_email)
if not downloaded:
self.log_error("Unable to download user, skipping.")
continue
user = self._save_user(db, downloaded)
new = BzComment()
new.id = comment["id"]
new.bug_id = new_bug_id
new.creation_time = self._convert_datetime(comment["time"])
new.is_private = comment["is_private"]
if "attachment_id" in comment:
attachment = queries.get_bz_attachment(
db, comment["attachment_id"])
if attachment:
new.attachment = attachment
else:
self.log_warning("Comment is referencing an attachment"
" which is not accessible.")
new.number = num
new.user = user
db.session.add(new)
if not isinstance(comment["text"], basestring):
comment["text"] = str(comment["text"])
# save_lob is inherited method which cannot
# be seen by pylint because of sqlalchemy magic
# pylint: disable=E1101
new.save_lob("content", comment["text"].encode("utf-8"),
overwrite=True)
db.session.flush()
@retry(3, delay=10, backoff=3, verbose=True)
def _download_user(self, user_email):
"""
Return user with `user_email` downloaded from bugzilla.
"""
self.log_debug("Downloading user {0}".format(user_email))
self._connect()
user = self.bz.getuser(user_email)
return user
def _save_user(self, db, user):
"""
Save bugzilla `user` to the database. Return persisted
BzUser object.
"""
# We need to account for case when user has changed
# the email address.
dbuser = (db.session.query(BzUser)
.filter(BzUser.id == user.userid).first())
if not dbuser:
dbuser = BzUser(id=user.userid)
for field in ["name", "email", "can_login", "real_name"]:
setattr(dbuser, field, getattr(user, field))
db.session.add(dbuser)
db.session.flush()
return dbuser
@retry(3, delay=10, backoff=3, verbose=True)
def create_bug(self, **data):
"""
Create new bugzilla ticket using `data` dictionary.
"""
self._connect()
return self.bz.createbug(**data)
@retry(2, delay=60, backoff=1, verbose=True)
def clone_bug(self, orig_bug_id, new_product, new_version):
self._connect()
origbug = self.bz.getbug(orig_bug_id)
desc = ["+++ This bug was initially created as a clone "
"of Bug #{0} +++".format(orig_bug_id)]
private = False
first = True
for comment in origbug.longdescs:
if comment["is_private"]:
private = True
if not first:
desc.append("--- Additional comment from {0} on {1} ---"
.format(comment["author"], comment["time"]))
if "extra_data" in comment:
desc.append("*** This bug has been marked as a duplicate "
"of bug {0} ***".format(comment["extra_data"]))
else:
desc.append(comment["text"])
first = False
data = {
'product': new_product,
'component': origbug.component,
'version': new_version,
'op_sys': origbug.op_sys,
'platform': origbug.platform,
'summary': origbug.summary,
'description': "\n\n".join(desc),
'comment_is_private': private,
'priority': origbug.priority,
'bug_severity': origbug.bug_severity,
'blocked': origbug.blocked,
'whiteboard': origbug.whiteboard,
'keywords': origbug.keywords,
'cf_clone_of': str(orig_bug_id),
'cf_verified': ['Any'],
'cf_environment': origbug.cf_environment,
'groups': origbug.groups
}
for key in data:
if data[key] is None:
kwargs.pop(key)
newbug = self.bz.createbug(**data)
return newbug
| gpl-3.0 | 7,789,109,973,879,203,000 | 32.805755 | 85 | 0.520962 | false | 4.222682 | false | false | false |
mfsteen/CIQTranslate-Kristian | openpyxl/comments/reader.py | 1 | 1558 | from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
import os.path
from openpyxl.comments import Comment
from openpyxl.xml.constants import (
PACKAGE_WORKSHEET_RELS,
COMMENTS_NS,
PACKAGE_XL,
)
from openpyxl.xml.functions import fromstring
from .properties import CommentSheet
def read_comments(ws, xml_source):
"""Given a worksheet and the XML of its comments file, assigns comments to cells"""
root = fromstring(xml_source)
comments = CommentSheet.from_tree(root)
authors = comments.authors.author
for comment in comments.commentList:
author = authors[comment.authorId]
ref = comment.ref
comment = Comment(comment.content, author)
ws.cell(coordinate=ref).comment = comment
def get_comments_file(worksheet_path, archive, valid_files):
"""Returns the XML filename in the archive which contains the comments for
the spreadsheet with codename sheet_codename. Returns None if there is no
such file"""
sheet_codename = os.path.split(worksheet_path)[-1]
rels_file = PACKAGE_WORKSHEET_RELS + '/' + sheet_codename + '.rels'
if rels_file not in valid_files:
return None
rels_source = archive.read(rels_file)
root = fromstring(rels_source)
for i in root:
if i.attrib['Type'] == COMMENTS_NS:
comments_file = os.path.split(i.attrib['Target'])[-1]
comments_file = PACKAGE_XL + '/' + comments_file
if comments_file in valid_files:
return comments_file
return None
| gpl-3.0 | 159,124,037,491,564,830 | 31.458333 | 87 | 0.679076 | false | 3.745192 | false | false | false |
beni55/rinohtype | rinoh/annotation.py | 1 | 1363 | # This file is part of RinohType, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
from .util import Decorator
from .text import MixedStyledText
__all__ = ['NamedDestination', 'NamedDestinationLink', 'HyperLink',
'AnnotatedSpan', 'AnnotatedText']
class Annotation(object):
pass
class NamedDestination(Annotation):
type = 'NamedDestination'
def __init__(self, name):
self.name = name
class NamedDestinationLink(Annotation):
type = 'NamedDestinationLink'
def __init__(self, name):
self.name = name
class HyperLink(Annotation):
type = 'URI'
def __init__(self, target):
self.target = target
class AnnotatedSpan(Decorator):
def __init__(self, span, annotation):
super().__init__(span)
self.annotation = annotation
class AnnotatedText(MixedStyledText):
def __init__(self, text_or_items, annotation, style=None, parent=None):
super().__init__(text_or_items, style=style, parent=parent)
self.annotation = annotation
def spans(self, document):
return (AnnotatedSpan(span, self.annotation)
for item in self for span in item.spans(document))
| agpl-3.0 | 4,685,080,658,187,097,000 | 22.912281 | 75 | 0.668379 | false | 3.839437 | false | false | false |
google-research/dreamer | dreamer/models/base.py | 1 | 2449 | # Copyright 2019 The Dreamer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from dreamer import tools
class Base(tf.nn.rnn_cell.RNNCell):
def __init__(self, transition_tpl, posterior_tpl, reuse=None):
super(Base, self).__init__(_reuse=reuse)
self._posterior_tpl = posterior_tpl
self._transition_tpl = transition_tpl
self._debug = False
@property
def state_size(self):
raise NotImplementedError
@property
def updates(self):
return []
@property
def losses(self):
return []
@property
def output_size(self):
return (self.state_size, self.state_size)
def zero_state(self, batch_size, dtype):
return tools.nested.map(
lambda size: tf.zeros([batch_size, size], dtype),
self.state_size)
def features_from_state(self, state):
raise NotImplementedError
def dist_from_state(self, state, mask=None):
raise NotImplementedError
def divergence_from_states(self, lhs, rhs, mask=None):
lhs = self.dist_from_state(lhs, mask)
rhs = self.dist_from_state(rhs, mask)
divergence = tfd.kl_divergence(lhs, rhs)
if mask is not None:
divergence = tools.mask(divergence, mask)
return divergence
def call(self, inputs, prev_state):
obs, prev_action, use_obs = inputs
if self._debug:
with tf.control_dependencies([tf.assert_equal(use_obs, use_obs[0, 0])]):
use_obs = tf.identity(use_obs)
use_obs = use_obs[0, 0]
zero_obs = tools.nested.map(tf.zeros_like, obs)
prior = self._transition_tpl(prev_state, prev_action, zero_obs)
posterior = tf.cond(
use_obs,
lambda: self._posterior_tpl(prev_state, prev_action, obs),
lambda: prior)
return (prior, posterior), posterior
| apache-2.0 | 3,964,951,076,290,957,300 | 29.6125 | 78 | 0.695794 | false | 3.660688 | false | false | false |
jolyonb/edx-platform | common/lib/xmodule/xmodule/video_module/transcripts_utils.py | 1 | 37083 | """
Utility functions for transcripts.
++++++++++++++++++++++++++++++++++
"""
from __future__ import absolute_import
import copy
import json
import logging
import os
from functools import wraps
import requests
import six
from django.conf import settings
from lxml import etree
from pysrt import SubRipFile, SubRipItem, SubRipTime
from pysrt.srtexc import Error
from six import text_type
from six.moves import range, zip
from six.moves.html_parser import HTMLParser # pylint: disable=import-error
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from .bumper_utils import get_bumper_settings
try:
from edxval import api as edxval_api
except ImportError:
edxval_api = None
log = logging.getLogger(__name__)
NON_EXISTENT_TRANSCRIPT = 'non_existent_dummy_file_name'
class TranscriptException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsGenerationException(Exception): # pylint: disable=missing-docstring
pass
class GetTranscriptsFromYouTubeException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsRequestValidationException(Exception): # pylint: disable=missing-docstring
pass
def exception_decorator(func):
"""
Generate NotFoundError for TranscriptsGenerationException, UnicodeDecodeError.
Args:
`func`: Input function
Returns:
'wrapper': Decorated function
"""
@wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except (TranscriptsGenerationException, UnicodeDecodeError) as ex:
log.exception(text_type(ex))
raise NotFoundError
return wrapper
def generate_subs(speed, source_speed, source_subs):
"""
Generate transcripts from one speed to another speed.
Args:
`speed`: float, for this speed subtitles will be generated,
`source_speed`: float, speed of source_subs
`source_subs`: dict, existing subtitles for speed `source_speed`.
Returns:
`subs`: dict, actual subtitles.
"""
if speed == source_speed:
return source_subs
coefficient = 1.0 * speed / source_speed
subs = {
'start': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['start']
],
'end': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['end']
],
'text': source_subs['text']}
return subs
def save_to_store(content, name, mime_type, location):
"""
Save named content to store by location.
Returns location of saved content.
"""
content_location = Transcript.asset_location(location, name)
content = StaticContent(content_location, name, mime_type, content)
contentstore().save(content)
return content_location
def save_subs_to_store(subs, subs_id, item, language='en'):
"""
Save transcripts into `StaticContent`.
Args:
`subs_id`: str, subtitles id
`item`: video module instance
`language`: two chars str ('uk'), language of translation of transcripts
Returns: location of saved subtitles.
"""
filedata = json.dumps(subs, indent=2)
filename = subs_filename(subs_id, language)
return save_to_store(filedata, filename, 'application/json', item.location)
def youtube_video_transcript_name(youtube_text_api):
"""
Get the transcript name from available transcripts of video
with respect to language from youtube server
"""
utf8_parser = etree.XMLParser(encoding='utf-8')
transcripts_param = {'type': 'list', 'v': youtube_text_api['params']['v']}
lang = youtube_text_api['params']['lang']
# get list of transcripts of specific video
# url-form
# http://video.google.com/timedtext?type=list&v={VideoId}
youtube_response = requests.get('http://' + youtube_text_api['url'], params=transcripts_param)
if youtube_response.status_code == 200 and youtube_response.text:
youtube_data = etree.fromstring(youtube_response.content, parser=utf8_parser)
# iterate all transcripts information from youtube server
for element in youtube_data:
# search specific language code such as 'en' in transcripts info list
if element.tag == 'track' and element.get('lang_code', '') == lang:
return element.get('name')
return None
def get_transcripts_from_youtube(youtube_id, settings, i18n, youtube_transcript_name=''):
"""
Gets transcripts from youtube for youtube_id.
Parses only utf-8 encoded transcripts.
Other encodings are not supported at the moment.
Returns (status, transcripts): bool, dict.
"""
_ = i18n.ugettext
utf8_parser = etree.XMLParser(encoding='utf-8')
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
# if the transcript name is not empty on youtube server we have to pass
# name param in url in order to get transcript
# example http://video.google.com/timedtext?lang=en&v={VideoId}&name={transcript_name}
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
data = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if data.status_code != 200 or not data.text:
msg = _("Can't receive transcripts from Youtube for {youtube_id}. Status code: {status_code}.").format(
youtube_id=youtube_id,
status_code=data.status_code
)
raise GetTranscriptsFromYouTubeException(msg)
sub_starts, sub_ends, sub_texts = [], [], []
xmltree = etree.fromstring(data.content, parser=utf8_parser)
for element in xmltree:
if element.tag == "text":
start = float(element.get("start"))
duration = float(element.get("dur", 0)) # dur is not mandatory
text = element.text
end = start + duration
if text:
# Start and end should be ints representing the millisecond timestamp.
sub_starts.append(int(start * 1000))
sub_ends.append(int((end + 0.0001) * 1000))
sub_texts.append(text.replace('\n', ' '))
return {'start': sub_starts, 'end': sub_ends, 'text': sub_texts}
def download_youtube_subs(youtube_id, video_descriptor, settings):
"""
Download transcripts from Youtube.
Args:
youtube_id: str, actual youtube_id of the video.
video_descriptor: video descriptor instance.
We save transcripts for 1.0 speed, as for other speed conversion is done on front-end.
Returns:
Serialized sjson transcript content, if transcripts were successfully downloaded and saved.
Raises:
GetTranscriptsFromYouTubeException, if fails.
"""
i18n = video_descriptor.runtime.service(video_descriptor, "i18n")
_ = i18n.ugettext
subs = get_transcripts_from_youtube(youtube_id, settings, i18n)
return json.dumps(subs, indent=2)
def remove_subs_from_store(subs_id, item, lang='en'):
"""
Remove from store, if transcripts content exists.
"""
filename = subs_filename(subs_id, lang)
Transcript.delete_asset(item.location, filename)
def generate_subs_from_source(speed_subs, subs_type, subs_filedata, item, language='en'):
"""Generate transcripts from source files (like SubRip format, etc.)
and save them to assets for `item` module.
We expect, that speed of source subs equal to 1
:param speed_subs: dictionary {speed: sub_id, ...}
:param subs_type: type of source subs: "srt", ...
:param subs_filedata:unicode, content of source subs.
:param item: module object.
:param language: str, language of translation of transcripts
:returns: True, if all subs are generated and saved successfully.
"""
_ = item.runtime.service(item, "i18n").ugettext
if subs_type.lower() != 'srt':
raise TranscriptsGenerationException(_("We support only SubRip (*.srt) transcripts format."))
try:
srt_subs_obj = SubRipFile.from_string(subs_filedata)
except Exception as ex:
msg = _("Something wrong with SubRip transcripts file during parsing. Inner message is {error_message}").format(
error_message=text_type(ex)
)
raise TranscriptsGenerationException(msg)
if not srt_subs_obj:
raise TranscriptsGenerationException(_("Something wrong with SubRip transcripts file during parsing."))
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs_obj:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts}
for speed, subs_id in six.iteritems(speed_subs):
save_subs_to_store(
generate_subs(speed, 1, subs),
subs_id,
item,
language
)
return subs
def generate_srt_from_sjson(sjson_subs, speed):
"""Generate transcripts with speed = 1.0 from sjson to SubRip (*.srt).
:param sjson_subs: "sjson" subs.
:param speed: speed of `sjson_subs`.
:returns: "srt" subs.
"""
output = ''
equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text'])
if not equal_len:
return output
sjson_speed_1 = generate_subs(speed, 1, sjson_subs)
for i in range(len(sjson_speed_1['start'])):
item = SubRipItem(
index=i,
start=SubRipTime(milliseconds=sjson_speed_1['start'][i]),
end=SubRipTime(milliseconds=sjson_speed_1['end'][i]),
text=sjson_speed_1['text'][i]
)
output += (six.text_type(item))
output += '\n'
return output
def generate_sjson_from_srt(srt_subs):
"""
Generate transcripts from sjson to SubRip (*.srt).
Arguments:
srt_subs(SubRip): "SRT" subs object
Returns:
Subs converted to "SJSON" format.
"""
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
sjson_subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts
}
return sjson_subs
def copy_or_rename_transcript(new_name, old_name, item, delete_old=False, user=None):
"""
Renames `old_name` transcript file in storage to `new_name`.
If `old_name` is not found in storage, raises `NotFoundError`.
If `delete_old` is True, removes `old_name` files from storage.
"""
filename = u'subs_{0}.srt.sjson'.format(old_name)
content_location = StaticContent.compute_location(item.location.course_key, filename)
transcripts = contentstore().find(content_location).data
save_subs_to_store(json.loads(transcripts), new_name, item)
item.sub = new_name
item.save_with_metadata(user)
if delete_old:
remove_subs_from_store(old_name, item)
def get_html5_ids(html5_sources):
"""
Helper method to parse out an HTML5 source into the ideas
NOTE: This assumes that '/' are not in the filename
"""
html5_ids = [x.split('/')[-1].rsplit('.', 1)[0] for x in html5_sources]
return html5_ids
def manage_video_subtitles_save(item, user, old_metadata=None, generate_translation=False):
"""
Does some specific things, that can be done only on save.
Video player item has some video fields: HTML5 ones and Youtube one.
If value of `sub` field of `new_item` is cleared, transcripts should be removed.
`item` is video module instance with updated values of fields,
but actually have not been saved to store yet.
`old_metadata` contains old values of XFields.
# 1.
If value of `sub` field of `new_item` is different from values of video fields of `new_item`,
and `new_item.sub` file is present, then code in this function creates copies of
`new_item.sub` file with new names. That names are equal to values of video fields of `new_item`
After that `sub` field of `new_item` is changed to one of values of video fields.
This whole action ensures that after user changes video fields, proper `sub` files, corresponding
to new values of video fields, will be presented in system.
# 2. convert /static/filename.srt to filename.srt in self.transcripts.
(it is done to allow user to enter both /static/filename.srt and filename.srt)
# 3. Generate transcripts translation only when user clicks `save` button, not while switching tabs.
a) delete sjson translation for those languages, which were removed from `item.transcripts`.
Note: we are not deleting old SRT files to give user more flexibility.
b) For all SRT files in`item.transcripts` regenerate new SJSON files.
(To avoid confusing situation if you attempt to correct a translation by uploading
a new version of the SRT file with same name).
"""
_ = item.runtime.service(item, "i18n").ugettext
# # 1.
# html5_ids = get_html5_ids(item.html5_sources)
# # Youtube transcript source should always have a higher priority than html5 sources. Appending
# # `youtube_id_1_0` at the end helps achieve this when we read transcripts list.
# possible_video_id_list = html5_ids + [item.youtube_id_1_0]
# sub_name = item.sub
# for video_id in possible_video_id_list:
# if not video_id:
# continue
# if not sub_name:
# remove_subs_from_store(video_id, item)
# continue
# # copy_or_rename_transcript changes item.sub of module
# try:
# # updates item.sub with `video_id`, if it is successful.
# copy_or_rename_transcript(video_id, sub_name, item, user=user)
# except NotFoundError:
# # subtitles file `sub_name` is not presented in the system. Nothing to copy or rename.
# log.debug(
# "Copying %s file content to %s name is failed, "
# "original file does not exist.",
# sub_name, video_id
# )
# 2.
if generate_translation:
for lang, filename in item.transcripts.items():
item.transcripts[lang] = os.path.split(filename)[-1]
# 3.
if generate_translation:
old_langs = set(old_metadata.get('transcripts', {})) if old_metadata else set()
new_langs = set(item.transcripts)
html5_ids = get_html5_ids(item.html5_sources)
possible_video_id_list = html5_ids + [item.youtube_id_1_0]
for lang in old_langs.difference(new_langs): # 3a
for video_id in possible_video_id_list:
if video_id:
remove_subs_from_store(video_id, item, lang)
reraised_message = ''
for lang in new_langs: # 3b
try:
generate_sjson_for_all_speeds(
item,
item.transcripts[lang],
{speed: subs_id for subs_id, speed in six.iteritems(youtube_speed_dict(item))},
lang,
)
except TranscriptException as ex:
pass
if reraised_message:
item.save_with_metadata(user)
raise TranscriptException(reraised_message)
def youtube_speed_dict(item):
"""
Returns {speed: youtube_ids, ...} dict for existing youtube_ids
"""
yt_ids = [item.youtube_id_0_75, item.youtube_id_1_0, item.youtube_id_1_25, item.youtube_id_1_5]
yt_speeds = [0.75, 1.00, 1.25, 1.50]
youtube_ids = {p[0]: p[1] for p in zip(yt_ids, yt_speeds) if p[0]}
return youtube_ids
def subs_filename(subs_id, lang='en'):
"""
Generate proper filename for storage.
"""
if lang == 'en':
return u'subs_{0}.srt.sjson'.format(subs_id)
else:
return u'{0}_subs_{1}.srt.sjson'.format(lang, subs_id)
def generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, lang):
"""
Generates sjson from srt for given lang.
`item` is module object.
"""
_ = item.runtime.service(item, "i18n").ugettext
try:
srt_transcripts = contentstore().find(Transcript.asset_location(item.location, user_filename))
except NotFoundError as ex:
raise TranscriptException(_("{exception_message}: Can't find uploaded transcripts: {user_filename}").format(
exception_message=text_type(ex),
user_filename=user_filename
))
if not lang:
lang = item.transcript_language
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
generate_subs_from_source(
result_subs_dict,
os.path.splitext(user_filename)[1][1:],
srt_transcripts.data.decode('utf-8-sig'),
item,
lang
)
def get_or_create_sjson(item, transcripts):
"""
Get sjson if already exists, otherwise generate it.
Generate sjson with subs_id name, from user uploaded srt.
Subs_id is extracted from srt filename, which was set by user.
Args:
transcipts (dict): dictionary of (language: file) pairs.
Raises:
TranscriptException: when srt subtitles do not exist,
and exceptions from generate_subs_from_source.
`item` is module object.
"""
user_filename = transcripts[item.transcript_language]
user_subs_id = os.path.splitext(user_filename)[0]
source_subs_id, result_subs_dict = user_subs_id, {1.0: user_subs_id}
try:
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
except NotFoundError: # generating sjson from srt
generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, item.transcript_language)
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
return sjson_transcript
def get_video_ids_info(edx_video_id, youtube_id_1_0, html5_sources):
"""
Returns list internal or external video ids.
Arguments:
edx_video_id (unicode): edx_video_id
youtube_id_1_0 (unicode): youtube id
html5_sources (list): html5 video ids
Returns:
tuple: external or internal, video ids list
"""
clean = lambda item: item.strip() if isinstance(item, six.string_types) else item
external = not bool(clean(edx_video_id))
video_ids = [edx_video_id, youtube_id_1_0] + get_html5_ids(html5_sources)
# video_ids cleanup
video_ids = [item for item in video_ids if bool(clean(item))]
return external, video_ids
def clean_video_id(edx_video_id):
"""
Cleans an edx video ID.
Arguments:
edx_video_id(unicode): edx-val's video identifier
"""
return edx_video_id and edx_video_id.strip()
def get_video_transcript_content(edx_video_id, language_code):
"""
Gets video transcript content, only if the corresponding feature flag is enabled for the given `course_id`.
Arguments:
language_code(unicode): Language code of the requested transcript
edx_video_id(unicode): edx-val's video identifier
Returns:
A dict containing transcript's file name and its sjson content.
"""
transcript = None
edx_video_id = clean_video_id(edx_video_id)
if edxval_api and edx_video_id:
transcript = edxval_api.get_video_transcript_data(edx_video_id, language_code)
return transcript
def get_available_transcript_languages(edx_video_id):
"""
Gets available transcript languages for a video.
Arguments:
edx_video_id(unicode): edx-val's video identifier
Returns:
A list containing distinct transcript language codes against all the passed video ids.
"""
available_languages = []
edx_video_id = clean_video_id(edx_video_id)
if edxval_api and edx_video_id:
available_languages = edxval_api.get_available_transcript_languages(video_id=edx_video_id)
return available_languages
def convert_video_transcript(file_name, content, output_format):
"""
Convert video transcript into desired format
Arguments:
file_name: name of transcript file along with its extension
content: transcript content stream
output_format: the format in which transcript will be converted
Returns:
A dict containing the new transcript filename and the content converted into desired format.
"""
name_and_extension = os.path.splitext(file_name)
basename, input_format = name_and_extension[0], name_and_extension[1][1:]
filename = u'{base_name}.{ext}'.format(base_name=basename, ext=output_format)
converted_transcript = Transcript.convert(content, input_format=input_format, output_format=output_format)
return dict(filename=filename, content=converted_transcript)
class Transcript(object):
"""
Container for transcript methods.
"""
SRT = 'srt'
TXT = 'txt'
SJSON = 'sjson'
mime_types = {
SRT: 'application/x-subrip; charset=utf-8',
TXT: 'text/plain; charset=utf-8',
SJSON: 'application/json',
}
@staticmethod
def convert(content, input_format, output_format):
"""
Convert transcript `content` from `input_format` to `output_format`.
Accepted input formats: sjson, srt.
Accepted output format: srt, txt, sjson.
Raises:
TranscriptsGenerationException: On parsing the invalid srt content during conversion from srt to sjson.
"""
assert input_format in ('srt', 'sjson')
assert output_format in ('txt', 'srt', 'sjson')
if input_format == output_format:
return content
if input_format == 'srt':
if output_format == 'txt':
text = SubRipFile.from_string(content.decode('utf8')).text
return HTMLParser().unescape(text)
elif output_format == 'sjson':
try:
# With error handling (set to 'ERROR_RAISE'), we will be getting
# the exception if something went wrong in parsing the transcript.
srt_subs = SubRipFile.from_string(
# Skip byte order mark(BOM) character
content.decode('utf-8-sig'),
error_handling=SubRipFile.ERROR_RAISE
)
except Error as ex: # Base exception from pysrt
raise TranscriptsGenerationException(text_type(ex))
return json.dumps(generate_sjson_from_srt(srt_subs))
if input_format == 'sjson':
if output_format == 'txt':
text = json.loads(content)['text']
text_without_none = [line if line else '' for line in text]
return HTMLParser().unescape("\n".join(text_without_none))
elif output_format == 'srt':
return generate_srt_from_sjson(json.loads(content), speed=1.0)
@staticmethod
def asset(location, subs_id, lang='en', filename=None):
"""
Get asset from contentstore, asset location is built from subs_id and lang.
`location` is module location.
"""
# HACK Warning! this is temporary and will be removed once edx-val take over the
# transcript module and contentstore will only function as fallback until all the
# data is migrated to edx-val. It will be saving a contentstore hit for a hardcoded
# dummy-non-existent-transcript name.
if NON_EXISTENT_TRANSCRIPT in [subs_id, filename]:
raise NotFoundError
asset_filename = subs_filename(subs_id, lang) if not filename else filename
return Transcript.get_asset(location, asset_filename)
@staticmethod
def get_asset(location, filename):
"""
Return asset by location and filename.
"""
return contentstore().find(Transcript.asset_location(location, filename))
@staticmethod
def asset_location(location, filename):
"""
Return asset location. `location` is module location.
"""
# If user transcript filename is empty, raise `TranscriptException` to avoid `InvalidKeyError`.
if not filename:
raise TranscriptException("Transcript not uploaded yet")
return StaticContent.compute_location(location.course_key, filename)
@staticmethod
def delete_asset(location, filename):
"""
Delete asset by location and filename.
"""
try:
contentstore().delete(Transcript.asset_location(location, filename))
log.info("Transcript asset %s was removed from store.", filename)
except NotFoundError:
pass
return StaticContent.compute_location(location.course_key, filename)
class VideoTranscriptsMixin(object):
"""Mixin class for transcript functionality.
This is necessary for both VideoModule and VideoDescriptor.
"""
def available_translations(self, transcripts, verify_assets=None, is_bumper=False):
"""
Return a list of language codes for which we have transcripts.
Arguments:
verify_assets (boolean): If True, checks to ensure that the transcripts
really exist in the contentstore. If False, we just look at the
VideoDescriptor fields and do not query the contentstore. One reason
we might do this is to avoid slamming contentstore() with queries
when trying to make a listing of videos and their languages.
Defaults to `not FALLBACK_TO_ENGLISH_TRANSCRIPTS`.
transcripts (dict): A dict with all transcripts and a sub.
include_val_transcripts(boolean): If True, adds the edx-val transcript languages as well.
"""
translations = []
if verify_assets is None:
verify_assets = not settings.FEATURES.get('FALLBACK_TO_ENGLISH_TRANSCRIPTS')
sub, other_langs = transcripts["sub"], transcripts["transcripts"]
if verify_assets:
all_langs = dict(**other_langs)
if sub:
all_langs.update({'en': sub})
for language, filename in six.iteritems(all_langs):
try:
# for bumper videos, transcripts are stored in content store only
if is_bumper:
get_transcript_for_video(self.location, filename, filename, language)
else:
get_transcript(self, language)
except NotFoundError:
continue
translations.append(language)
else:
# If we're not verifying the assets, we just trust our field values
translations = list(other_langs)
if not translations or sub:
translations += ['en']
# to clean redundant language codes.
return list(set(translations))
def get_transcript(self, transcripts, transcript_format='srt', lang=None):
"""
Returns transcript, filename and MIME type.
transcripts (dict): A dict with all transcripts and a sub.
Raises:
- NotFoundError if cannot find transcript file in storage.
- ValueError if transcript file is empty or incorrect JSON.
- KeyError if transcript file has incorrect format.
If language is 'en', self.sub should be correct subtitles name.
If language is 'en', but if self.sub is not defined, this means that we
should search for video name in order to get proper transcript (old style courses).
If language is not 'en', give back transcript in proper language and format.
"""
if not lang:
lang = self.get_default_transcript_language(transcripts)
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if lang == 'en':
if sub: # HTML5 case and (Youtube case for new style videos)
transcript_name = sub
elif self.youtube_id_1_0: # old courses
transcript_name = self.youtube_id_1_0
else:
log.debug("No subtitles for 'en' language")
raise ValueError
data = Transcript.asset(self.location, transcript_name, lang).data
filename = u'{}.{}'.format(transcript_name, transcript_format)
content = Transcript.convert(data, 'sjson', transcript_format)
else:
data = Transcript.asset(self.location, None, None, other_lang[lang]).data
filename = u'{}.{}'.format(os.path.splitext(other_lang[lang])[0], transcript_format)
content = Transcript.convert(data, 'srt', transcript_format)
if not content:
log.debug('no subtitles produced in get_transcript')
raise ValueError
return content, filename, Transcript.mime_types[transcript_format]
def get_default_transcript_language(self, transcripts):
"""
Returns the default transcript language for this video module.
Args:
transcripts (dict): A dict with all transcripts and a sub.
"""
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.transcript_language in other_lang:
transcript_language = self.transcript_language
elif sub:
transcript_language = u'en'
elif len(other_lang) > 0:
transcript_language = sorted(other_lang)[0]
else:
transcript_language = u'en'
return transcript_language
def get_transcripts_info(self, is_bumper=False):
"""
Returns a transcript dictionary for the video.
Arguments:
is_bumper(bool): If True, the request is for the bumper transcripts
include_val_transcripts(bool): If True, include edx-val transcripts as well
"""
if is_bumper:
transcripts = copy.deepcopy(get_bumper_settings(self).get('transcripts', {}))
sub = transcripts.pop("en", "")
else:
transcripts = self.transcripts if self.transcripts else {}
sub = self.sub
# Only attach transcripts that are not empty.
transcripts = {
language_code: transcript_file
for language_code, transcript_file in transcripts.items() if transcript_file != ''
}
# bumper transcripts are stored in content store so we don't need to include val transcripts
if not is_bumper:
transcript_languages = get_available_transcript_languages(edx_video_id=self.edx_video_id)
# HACK Warning! this is temporary and will be removed once edx-val take over the
# transcript module and contentstore will only function as fallback until all the
# data is migrated to edx-val.
for language_code in transcript_languages:
if language_code == 'en' and not sub:
sub = NON_EXISTENT_TRANSCRIPT
elif not transcripts.get(language_code):
transcripts[language_code] = NON_EXISTENT_TRANSCRIPT
return {
"sub": sub,
"transcripts": transcripts,
}
@exception_decorator
def get_transcript_from_val(edx_video_id, lang=None, output_format=Transcript.SRT):
"""
Get video transcript from edx-val.
Arguments:
edx_video_id (unicode): video identifier
lang (unicode): transcript language
output_format (unicode): transcript output format
Returns:
tuple containing content, filename, mimetype
"""
transcript = get_video_transcript_content(edx_video_id, lang)
if not transcript:
raise NotFoundError(u'Transcript not found for {}, lang: {}'.format(edx_video_id, lang))
transcript_conversion_props = dict(transcript, output_format=output_format)
transcript = convert_video_transcript(**transcript_conversion_props)
filename = transcript['filename']
content = transcript['content']
mimetype = Transcript.mime_types[output_format]
return content, filename, mimetype
def get_transcript_for_video(video_location, subs_id, file_name, language):
"""
Get video transcript from content store.
NOTE: Transcripts can be searched from content store by two ways:
1. by an id(a.k.a subs_id) which will be used to construct transcript filename
2. by providing transcript filename
Arguments:
video_location (Locator): Video location
subs_id (unicode): id for a transcript in content store
file_name (unicode): file_name for a transcript in content store
language (unicode): transcript language
Returns:
tuple containing transcript input_format, basename, content
"""
try:
if subs_id is None:
raise NotFoundError
content = Transcript.asset(video_location, subs_id, language).data
base_name = subs_id
input_format = Transcript.SJSON
except NotFoundError:
content = Transcript.asset(video_location, None, language, file_name).data
base_name = os.path.splitext(file_name)[0]
input_format = Transcript.SRT
return input_format, base_name, content
@exception_decorator
def get_transcript_from_contentstore(video, language, output_format, transcripts_info, youtube_id=None):
"""
Get video transcript from content store.
Arguments:
video (Video Descriptor): Video descriptor
language (unicode): transcript language
output_format (unicode): transcript output format
transcripts_info (dict): transcript info for a video
youtube_id (unicode): youtube video id
Returns:
tuple containing content, filename, mimetype
"""
input_format, base_name, transcript_content = None, None, None
if output_format not in (Transcript.SRT, Transcript.SJSON, Transcript.TXT):
raise NotFoundError('Invalid transcript format `{output_format}`'.format(output_format=output_format))
sub, other_languages = transcripts_info['sub'], transcripts_info['transcripts']
transcripts = dict(other_languages)
# this is sent in case of a translation dispatch and we need to use it as our subs_id.
possible_sub_ids = [youtube_id, sub, video.youtube_id_1_0] + get_html5_ids(video.html5_sources)
for sub_id in possible_sub_ids:
try:
transcripts[u'en'] = sub_id
input_format, base_name, transcript_content = get_transcript_for_video(
video.location,
subs_id=sub_id,
file_name=transcripts[language],
language=language
)
break
except (KeyError, NotFoundError):
continue
if transcript_content is None:
raise NotFoundError('No transcript for `{lang}` language'.format(
lang=language
))
# add language prefix to transcript file only if language is not None
language_prefix = '{}_'.format(language) if language else ''
transcript_name = u'{}{}.{}'.format(language_prefix, base_name, output_format)
transcript_content = Transcript.convert(transcript_content, input_format=input_format, output_format=output_format)
if not transcript_content.strip():
raise NotFoundError('No transcript content')
if youtube_id:
youtube_ids = youtube_speed_dict(video)
transcript_content = json.dumps(
generate_subs(youtube_ids.get(youtube_id, 1), 1, json.loads(transcript_content))
)
return transcript_content, transcript_name, Transcript.mime_types[output_format]
def get_transcript(video, lang=None, output_format=Transcript.SRT, youtube_id=None):
"""
Get video transcript from edx-val or content store.
Arguments:
video (Video Descriptor): Video Descriptor
lang (unicode): transcript language
output_format (unicode): transcript output format
youtube_id (unicode): youtube video id
Returns:
tuple containing content, filename, mimetype
"""
transcripts_info = video.get_transcripts_info()
if not lang:
lang = video.get_default_transcript_language(transcripts_info)
try:
edx_video_id = clean_video_id(video.edx_video_id)
if not edx_video_id:
raise NotFoundError
return get_transcript_from_val(edx_video_id, lang, output_format)
except NotFoundError:
return get_transcript_from_contentstore(
video,
lang,
youtube_id=youtube_id,
output_format=output_format,
transcripts_info=transcripts_info
)
| agpl-3.0 | -4,293,292,588,554,137,600 | 35.178537 | 120 | 0.641183 | false | 4.063445 | false | false | false |
mlk/thefuck | thefuck/rules/apt_get.py | 1 | 1174 | from thefuck.specific.apt import apt_available
from thefuck.utils import memoize, which
from thefuck.shells import shell
try:
from CommandNotFound import CommandNotFound
command_not_found = CommandNotFound()
enabled_by_default = apt_available
except ImportError:
enabled_by_default = False
def _get_executable(command):
if command.script_parts[0] == 'sudo':
return command.script_parts[1]
else:
return command.script_parts[0]
@memoize
def get_package(executable):
try:
packages = command_not_found.getPackages(executable)
return packages[0][0]
except IndexError:
# IndexError is thrown when no matching package is found
return None
def match(command):
if 'not found' in command.stderr or 'not installed' in command.stderr:
executable = _get_executable(command)
return not which(executable) and get_package(executable)
else:
return False
def get_new_command(command):
executable = _get_executable(command)
name = get_package(executable)
formatme = shell.and_('sudo apt-get install {}', '{}')
return formatme.format(name, command.script)
| mit | -3,930,856,329,572,652,500 | 26.302326 | 74 | 0.69506 | false | 3.939597 | false | false | false |
samuelmaudo/yepes | yepes/view_mixins/cache.py | 1 | 2980 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import hashlib
from django.contrib import messages
from django.http import HttpResponsePermanentRedirect
from django.utils.encoding import force_bytes
from yepes.cache import MintCache
from yepes.conf import settings
from yepes.utils.minifier import minify_html_response
class CacheMixin(object):
"""
Provides the ability to cache the response to save resources in
further requests.
By default, it only caches responses for GET and HEAD requests,
and only if the response status code is 200, 301 or 404. However,
it is highly customizable.
"""
cache_alias = None
cached_methods = ('GET', 'HEAD')
cached_statuses = (200, 301, 404)
delay = None
timeout = None
use_cache = True
def __init__(self, *args, **kwargs):
super(CacheMixin, self).__init__(*args, **kwargs)
self._cache = MintCache(
self.cache_alias or settings.VIEW_CACHE_ALIAS,
timeout=self.timeout or settings.VIEW_CACHE_SECONDS,
delay=self.delay or settings.VIEW_CACHE_DELAY_SECONDS)
def get_cache_hash(self, request):
return '{0}://{1}{2}'.format(
'https' if request.is_secure() else 'http',
request.get_host(),
request.path)
def get_cache_key(self, request):
class_name = self.__class__.__name__
hash = hashlib.md5(force_bytes(self.get_cache_hash(request)))
return 'yepes.views.{0}.{1}'.format(class_name, hash.hexdigest())
def dispatch(self, request, *args, **kwargs):
super_dispatch = super(CacheMixin, self).dispatch
self.request = request
self.args = args
self.kwargs = kwargs
if (settings.VIEW_CACHE_AVAILABLE
and self.get_use_cache(request)):
key = self.get_cache_key(request)
response = self._cache.get(key)
if response is None:
response = super_dispatch(request, *args, **kwargs)
if response.status_code not in self.cached_statuses:
return response
if (hasattr(response, 'render')
and callable(response.render)):
def update_cache(resp):
resp = minify_html_response(resp)
return self._cache.set(key, resp)
response.add_post_render_callback(update_cache)
else:
self._cache.set(key, minify_html_response(response))
return response
else:
return super_dispatch(request, *args, **kwargs)
def get_use_cache(self, request):
if not self.use_cache:
return False
if request.method.upper() not in self.cached_methods:
return False
try:
return not request.user.is_staff
except AttributeError:
return True
| bsd-3-clause | -7,693,372,715,246,965,000 | 31.391304 | 73 | 0.591611 | false | 4.257143 | false | false | false |
rohit01/sethji | sethji/util.py | 1 | 2148 | import datetime
import calendar
import re
def validate_email(email, email_regex_csv):
regex_list = [e.strip() for e in email_regex_csv.split(',')]
for user_regex in regex_list:
## Only * is allowed in user email regex
match_regex = re.escape(user_regex)
match_regex = "^%s$" % match_regex.replace('\\*', '.*')
if re.match(match_regex, email):
return True
return False
def convert_none_into_blank_values(details):
for k, v in details.items():
if v == None:
details[k] = ''
return details
def pretty_date(time_object=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
now = datetime.datetime.now()
if type(time_object) is int:
diff = now - datetime.datetime.fromtimestamp(time_object)
elif isinstance(time_object, datetime.datetime):
diff = now - time_object
elif not time_object:
return ''
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(second_diff / 60) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(second_diff / 3600) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff / 7) + " weeks ago"
if day_diff < 365:
return str(day_diff / 30) + " months ago"
return str(day_diff / 365) + " years ago"
def get_current_month_day_count():
now = datetime.datetime.now()
return calendar.monthrange(now.year, now.month)[1]
def get_current_month_and_year():
now = datetime.datetime.now()
return now.strftime("%B"), now.year
| mit | 1,566,480,901,819,199,500 | 28.833333 | 66 | 0.582868 | false | 3.628378 | false | false | false |
jmaher/randomtools | wget/wget_helper.py | 1 | 4423 | import subprocess
import re
import os
filename = ''
def findAndGet(url, lines):
retVal = None
fname, root, query = getFilename(url)
resrc = re.compile('.*src="%s(.*)".*' % url)
rebg = re.compile('.*\(%s(.*)\).*' % url)
for line in lines:
match = resrc.match(line)
if match:
retVal = url + match.group(1).split('"')[0]
break
match = rebg.match(line)
if match:
retVal = url + match.group(1).split('"')[0]
break
if retVal:
retVal = retVal.replace("&", "&")
return retVal
def findEscapedUrl(url, lines):
#look for the \/ version of the url
retVal = None
fname, root, query = getFilename(url)
refname = re.compile('.*[=:]"https:(.*)%s(.*)".*' % fname)
refname2 = re.compile('.*src=https:(.*)%s(.*)".*' % fname)
for line in lines:
match = refname.match(line)
if match:
first = match.group(1).split('"')[-1]
if first.startswith('files/'):
break
retVal = 'https:' + first + fname + match.group(2).split('"')[0]
print "matched on refname: %s" % retVal
break
match = refname2.match(line)
if match:
first = match.group(1).split('"')[-1]
if first.startswith('files/'):
break
retVal = 'https:' + first + fname + match.group(2).split('"')[0]
print "matched on refname2: %s" % retVal
break
if retVal:
retVal = retVal.replace("&", "&")
return retVal
def getFilename(url):
parts = url.split('?')
query = ""
if len(parts) > 1:
query = '?'.join(parts[1:])
dirparts = parts[0].split('/')
root = '/'.join(dirparts[:-1])
fname = dirparts[-1]
return fname, root, query
def wgetFile(filename, url):
try:
url.index('&')
url = '"%s"' % url
except:
pass
if os.path.exists('files/%s' % filename):
stats = os.stat('files/%s' % filename)
if stats.st_size > 0:
return ""
url = url.replace('\/', '/')
cmd = 'wget --user-agent=Firefox -O files/%s %s' % (filename, url)
print cmd
# NOTE: using subprocess fails for wget as it has a scheme error
os.system('%s > wget.out' % cmd)
with open('wget.out', 'r') as fHandle:
stderr = fHandle.read()
if os.path.exists('files/%s' % filename):
stats = os.stat('files/%s' % filename)
if stats.st_size <= 0:
stderr = "%s\nERROR: file %s is size 0" % (stderr, filename)
os.system('rm files/%s' % filename)
return stderr
def replaceLines(query, root, lines):
newlines = []
newline = ""
for line in lines:
if query:
newline = line.replace('%s' % query, '')
else:
newline = line
newline = newline.replace('%s' % root, 'files')
newlines.append(newline)
return newlines
with open('f.txt', 'r') as fHandle:
urls = fHandle.readlines()
with open(filename, 'r') as fHandle:
lines = fHandle.readlines()
redo = []
for url in urls:
url = url.split(' ')[0]
url = url.strip('\n')
if url.strip(' ') == "":
continue
if url.startswith('file://'):
continue
fname, root, query = getFilename(url)
stderr = wgetFile(fname, url)
replace = True
rewget = re.compile('.*ERROR.*', re.MULTILINE|re.DOTALL)
if rewget.match(stderr):
found = findAndGet(url, lines)
if not found:
redo.append(url)
replace = False
else:
url = found
fname, root, query = getFilename(url)
stderr = wgetFile(fname, url)
if rewget.match(stderr):
redo.append(url)
replace = False
if replace:
lines = replaceLines(query, root, lines)
# Handle second pass for escaped urls
found = findEscapedUrl(url, lines)
if found:
fname, root, query = getFilename(found)
stderr = wgetFile(fname, found)
if rewget.match(stderr):
if url not in redo:
redo.remove(url)
else:
lines = replaceLines(query, root, lines)
with open(filename, 'w') as fHandle:
for line in lines:
fHandle.write(line)
print "\n\n:Files that didn't work out so well:"
for r in redo:
print r
| mpl-2.0 | 6,678,670,892,547,276,000 | 25.644578 | 76 | 0.530861 | false | 3.631363 | false | false | false |
googleads/google-ads-python | google/ads/googleads/v6/enums/types/matching_function_context_type.py | 1 | 1242 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"MatchingFunctionContextTypeEnum",},
)
class MatchingFunctionContextTypeEnum(proto.Message):
r"""Container for context types for an operand in a matching
function.
"""
class MatchingFunctionContextType(proto.Enum):
r"""Possible context types for an operand in a matching function."""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ITEM_ID = 2
DEVICE_NAME = 3
FEED_ITEM_SET_ID = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 1,692,842,941,226,915,300 | 28.571429 | 76 | 0.699678 | false | 3.980769 | false | false | false |
grouan/udata | udata/tests/site/test_site_api.py | 1 | 3123 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from flask import url_for
from udata.core.site.models import Site
from udata.core.site.metrics import SiteMetric
from udata.core.site.views import current_site
from udata.models import db, WithMetrics
from udata.tests.api import APITestCase
from udata.tests.factories import (
AdminFactory, VisibleDatasetFactory, VisibleReuseFactory, SiteFactory
)
class FakeModel(db.Document, WithMetrics):
name = db.StringField()
class FakeSiteMetric(SiteMetric):
name = 'fake-site-metric'
display_name = 'Fake site metric'
default = 0
def get_value(self):
return 2
class MetricsAPITest(APITestCase):
def test_get_metrics_for_site(self):
'''It should fetch my user data on GET'''
with self.app.app_context():
FakeSiteMetric.update()
response = self.get(url_for('api.metrics', id='site'))
self.assert200(response)
data = response.json[0]
self.assertEqual(data['level'], 'daily')
self.assertEqual(data['date'], date.today().isoformat())
self.assertIn('fake-site-metric', data['values'])
self.assertEqual(data['values']['fake-site-metric'], 2)
class SiteAPITest(APITestCase):
def test_get_site(self):
response = self.get(url_for('api.site'))
self.assert200(response)
def test_get_home_datasets(self):
site = SiteFactory.create(
id=self.app.config['SITE_ID'],
settings__home_datasets=VisibleDatasetFactory.create_batch(3)
)
current_site.reload()
self.login(AdminFactory())
response = self.get(url_for('api.home_datasets'))
self.assert200(response)
self.assertEqual(len(response.json), len(site.settings.home_datasets))
def test_get_home_reuses(self):
site = SiteFactory.create(
id=self.app.config['SITE_ID'],
settings__home_reuses=VisibleReuseFactory.create_batch(3)
)
current_site.reload()
self.login(AdminFactory())
response = self.get(url_for('api.home_reuses'))
self.assert200(response)
self.assertEqual(len(response.json), len(site.settings.home_reuses))
def test_set_home_datasets(self):
ids = [d.id for d in VisibleDatasetFactory.create_batch(3)]
self.login(AdminFactory())
response = self.put(url_for('api.home_datasets'), ids)
self.assert200(response)
self.assertEqual(len(response.json), len(ids))
site = Site.objects.get(id=self.app.config['SITE_ID'])
self.assertEqual([d.id for d in site.settings.home_datasets], ids)
def test_set_home_reuses(self):
ids = [r.id for r in VisibleReuseFactory.create_batch(3)]
self.login(AdminFactory())
response = self.put(url_for('api.home_reuses'), ids)
self.assert200(response)
self.assertEqual(len(response.json), len(ids))
site = Site.objects.get(id=self.app.config['SITE_ID'])
self.assertEqual([r.id for r in site.settings.home_reuses], ids)
| agpl-3.0 | 5,911,655,990,735,081,000 | 29.320388 | 78 | 0.651937 | false | 3.606236 | true | false | false |
Azure/azure-sdk-for-python | sdk/storage/azure-storage-file-share/azure/storage/fileshare/_generated/models/_models_py3.py | 1 | 39940 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_file_storage_enums import *
class AccessPolicy(msrest.serialization.Model):
"""An Access policy.
:param start: The date-time the policy is active.
:type start: str
:param expiry: The date-time the policy expires.
:type expiry: str
:param permission: The permissions for the ACL policy.
:type permission: str
"""
_attribute_map = {
'start': {'key': 'Start', 'type': 'str'},
'expiry': {'key': 'Expiry', 'type': 'str'},
'permission': {'key': 'Permission', 'type': 'str'},
}
def __init__(
self,
*,
start: Optional[str] = None,
expiry: Optional[str] = None,
permission: Optional[str] = None,
**kwargs
):
super(AccessPolicy, self).__init__(**kwargs)
self.start = start
self.expiry = expiry
self.permission = permission
class ClearRange(msrest.serialization.Model):
"""ClearRange.
All required parameters must be populated in order to send to Azure.
:param start: Required.
:type start: long
:param end: Required.
:type end: long
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
}
_xml_map = {
'name': 'ClearRange'
}
def __init__(
self,
*,
start: int,
end: int,
**kwargs
):
super(ClearRange, self).__init__(**kwargs)
self.start = start
self.end = end
class CopyFileSmbInfo(msrest.serialization.Model):
"""Parameter group.
:param file_permission_copy_mode: Specifies the option to copy file security descriptor from
source file or to set it using the value which is defined by the header value of x-ms-file-
permission or x-ms-file-permission-key. Possible values include: "source", "override".
:type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType
:param ignore_read_only: Specifies the option to overwrite the target file if it already exists
and has read-only attribute set.
:type ignore_read_only: bool
:param file_attributes: Specifies either the option to copy file attributes from a source
file(source) to a target file or a list of attributes to set on a target file.
:type file_attributes: str
:param file_creation_time: Specifies either the option to copy file creation time from a source
file(source) to a target file or a time value in ISO 8601 format to set as creation time on a
target file.
:type file_creation_time: str
:param file_last_write_time: Specifies either the option to copy file last write time from a
source file(source) to a target file or a time value in ISO 8601 format to set as last write
time on a target file.
:type file_last_write_time: str
:param set_archive_attribute: Specifies the option to set archive attribute on a target file.
True means archive attribute will be set on a target file despite attribute overrides or a
source file state.
:type set_archive_attribute: bool
"""
_attribute_map = {
'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'},
'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'},
'file_attributes': {'key': 'fileAttributes', 'type': 'str'},
'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'},
'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'},
'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'},
}
def __init__(
self,
*,
file_permission_copy_mode: Optional[Union[str, "PermissionCopyModeType"]] = None,
ignore_read_only: Optional[bool] = None,
file_attributes: Optional[str] = None,
file_creation_time: Optional[str] = None,
file_last_write_time: Optional[str] = None,
set_archive_attribute: Optional[bool] = None,
**kwargs
):
super(CopyFileSmbInfo, self).__init__(**kwargs)
self.file_permission_copy_mode = file_permission_copy_mode
self.ignore_read_only = ignore_read_only
self.file_attributes = file_attributes
self.file_creation_time = file_creation_time
self.file_last_write_time = file_last_write_time
self.set_archive_attribute = set_archive_attribute
class CorsRule(msrest.serialization.Model):
"""CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain.
All required parameters must be populated in order to send to Azure.
:param allowed_origins: Required. The origin domains that are permitted to make a request
against the storage service via CORS. The origin domain is the domain from which the request
originates. Note that the origin must be an exact case-sensitive match with the origin that the
user age sends to the service. You can also use the wildcard character '*' to allow all origin
domains to make requests via CORS.
:type allowed_origins: str
:param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may
use for a CORS request. (comma separated).
:type allowed_methods: str
:param allowed_headers: Required. The request headers that the origin domain may specify on the
CORS request.
:type allowed_headers: str
:param exposed_headers: Required. The response headers that may be sent in the response to the
CORS request and exposed by the browser to the request issuer.
:type exposed_headers: str
:param max_age_in_seconds: Required. The maximum amount time that a browser should cache the
preflight OPTIONS request.
:type max_age_in_seconds: int
"""
_validation = {
'allowed_origins': {'required': True},
'allowed_methods': {'required': True},
'allowed_headers': {'required': True},
'exposed_headers': {'required': True},
'max_age_in_seconds': {'required': True, 'minimum': 0},
}
_attribute_map = {
'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'},
'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'},
'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'},
'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'},
'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'},
}
def __init__(
self,
*,
allowed_origins: str,
allowed_methods: str,
allowed_headers: str,
exposed_headers: str,
max_age_in_seconds: int,
**kwargs
):
super(CorsRule, self).__init__(**kwargs)
self.allowed_origins = allowed_origins
self.allowed_methods = allowed_methods
self.allowed_headers = allowed_headers
self.exposed_headers = exposed_headers
self.max_age_in_seconds = max_age_in_seconds
class DirectoryItem(msrest.serialization.Model):
"""A listed directory item.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
}
_xml_map = {
'name': 'Directory'
}
def __init__(
self,
*,
name: str,
**kwargs
):
super(DirectoryItem, self).__init__(**kwargs)
self.name = name
class FileHTTPHeaders(msrest.serialization.Model):
"""Parameter group.
:param file_content_type: Sets the MIME content type of the file. The default type is
'application/octet-stream'.
:type file_content_type: str
:param file_content_encoding: Specifies which content encodings have been applied to the file.
:type file_content_encoding: str
:param file_content_language: Specifies the natural languages used by this resource.
:type file_content_language: str
:param file_cache_control: Sets the file's cache control. The File service stores this value
but does not use or modify it.
:type file_cache_control: str
:param file_content_md5: Sets the file's MD5 hash.
:type file_content_md5: bytearray
:param file_content_disposition: Sets the file's Content-Disposition header.
:type file_content_disposition: str
"""
_attribute_map = {
'file_content_type': {'key': 'fileContentType', 'type': 'str'},
'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'},
'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'},
'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'},
'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'},
'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'},
}
def __init__(
self,
*,
file_content_type: Optional[str] = None,
file_content_encoding: Optional[str] = None,
file_content_language: Optional[str] = None,
file_cache_control: Optional[str] = None,
file_content_md5: Optional[bytearray] = None,
file_content_disposition: Optional[str] = None,
**kwargs
):
super(FileHTTPHeaders, self).__init__(**kwargs)
self.file_content_type = file_content_type
self.file_content_encoding = file_content_encoding
self.file_content_language = file_content_language
self.file_cache_control = file_cache_control
self.file_content_md5 = file_content_md5
self.file_content_disposition = file_content_disposition
class FileItem(msrest.serialization.Model):
"""A listed file item.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param properties: Required. File properties.
:type properties: ~azure.storage.fileshare.models.FileProperty
"""
_validation = {
'name': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'properties': {'key': 'Properties', 'type': 'FileProperty'},
}
_xml_map = {
'name': 'File'
}
def __init__(
self,
*,
name: str,
properties: "FileProperty",
**kwargs
):
super(FileItem, self).__init__(**kwargs)
self.name = name
self.properties = properties
class FileProperty(msrest.serialization.Model):
"""File properties.
All required parameters must be populated in order to send to Azure.
:param content_length: Required. Content length of the file. This value may not be up-to-date
since an SMB client may have modified the file locally. The value of Content-Length may not
reflect that fact until the handle is closed or the op-lock is broken. To retrieve current
property values, call Get File Properties.
:type content_length: long
"""
_validation = {
'content_length': {'required': True},
}
_attribute_map = {
'content_length': {'key': 'Content-Length', 'type': 'long'},
}
def __init__(
self,
*,
content_length: int,
**kwargs
):
super(FileProperty, self).__init__(**kwargs)
self.content_length = content_length
class FileRange(msrest.serialization.Model):
"""An Azure Storage file range.
All required parameters must be populated in order to send to Azure.
:param start: Required. Start of the range.
:type start: long
:param end: Required. End of the range.
:type end: long
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'long'},
'end': {'key': 'End', 'type': 'long'},
}
_xml_map = {
'name': 'Range'
}
def __init__(
self,
*,
start: int,
end: int,
**kwargs
):
super(FileRange, self).__init__(**kwargs)
self.start = start
self.end = end
class FilesAndDirectoriesListSegment(msrest.serialization.Model):
"""Abstract for entries that can be listed from Directory.
All required parameters must be populated in order to send to Azure.
:param directory_items: Required.
:type directory_items: list[~azure.storage.fileshare.models.DirectoryItem]
:param file_items: Required.
:type file_items: list[~azure.storage.fileshare.models.FileItem]
"""
_validation = {
'directory_items': {'required': True},
'file_items': {'required': True},
}
_attribute_map = {
'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'},
'file_items': {'key': 'FileItems', 'type': '[FileItem]'},
}
_xml_map = {
'name': 'Entries'
}
def __init__(
self,
*,
directory_items: List["DirectoryItem"],
file_items: List["FileItem"],
**kwargs
):
super(FilesAndDirectoriesListSegment, self).__init__(**kwargs)
self.directory_items = directory_items
self.file_items = file_items
class HandleItem(msrest.serialization.Model):
"""A listed Azure Storage handle item.
All required parameters must be populated in order to send to Azure.
:param handle_id: Required. XSMB service handle ID.
:type handle_id: str
:param path: Required. File or directory name including full path starting from share root.
:type path: str
:param file_id: Required. FileId uniquely identifies the file or directory.
:type file_id: str
:param parent_id: ParentId uniquely identifies the parent directory of the object.
:type parent_id: str
:param session_id: Required. SMB session ID in context of which the file handle was opened.
:type session_id: str
:param client_ip: Required. Client IP that opened the handle.
:type client_ip: str
:param open_time: Required. Time when the session that previously opened the handle has last
been reconnected. (UTC).
:type open_time: ~datetime.datetime
:param last_reconnect_time: Time handle was last connected to (UTC).
:type last_reconnect_time: ~datetime.datetime
"""
_validation = {
'handle_id': {'required': True},
'path': {'required': True},
'file_id': {'required': True},
'session_id': {'required': True},
'client_ip': {'required': True},
'open_time': {'required': True},
}
_attribute_map = {
'handle_id': {'key': 'HandleId', 'type': 'str'},
'path': {'key': 'Path', 'type': 'str'},
'file_id': {'key': 'FileId', 'type': 'str'},
'parent_id': {'key': 'ParentId', 'type': 'str'},
'session_id': {'key': 'SessionId', 'type': 'str'},
'client_ip': {'key': 'ClientIp', 'type': 'str'},
'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'},
'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'},
}
_xml_map = {
'name': 'Handle'
}
def __init__(
self,
*,
handle_id: str,
path: str,
file_id: str,
session_id: str,
client_ip: str,
open_time: datetime.datetime,
parent_id: Optional[str] = None,
last_reconnect_time: Optional[datetime.datetime] = None,
**kwargs
):
super(HandleItem, self).__init__(**kwargs)
self.handle_id = handle_id
self.path = path
self.file_id = file_id
self.parent_id = parent_id
self.session_id = session_id
self.client_ip = client_ip
self.open_time = open_time
self.last_reconnect_time = last_reconnect_time
class LeaseAccessConditions(msrest.serialization.Model):
"""Parameter group.
:param lease_id: If specified, the operation only succeeds if the resource's lease is active
and matches this ID.
:type lease_id: str
"""
_attribute_map = {
'lease_id': {'key': 'leaseId', 'type': 'str'},
}
def __init__(
self,
*,
lease_id: Optional[str] = None,
**kwargs
):
super(LeaseAccessConditions, self).__init__(**kwargs)
self.lease_id = lease_id
class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model):
"""An enumeration of directories and files.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param share_name: Required.
:type share_name: str
:param share_snapshot:
:type share_snapshot: str
:param directory_path: Required.
:type directory_path: str
:param prefix: Required.
:type prefix: str
:param marker:
:type marker: str
:param max_results:
:type max_results: int
:param segment: Required. Abstract for entries that can be listed from Directory.
:type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'share_name': {'required': True},
'directory_path': {'required': True},
'prefix': {'required': True},
'segment': {'required': True},
'next_marker': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}},
'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}},
'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}},
'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str'},
'marker': {'key': 'Marker', 'type': 'str'},
'max_results': {'key': 'MaxResults', 'type': 'int'},
'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'},
'next_marker': {'key': 'NextMarker', 'type': 'str'},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(
self,
*,
service_endpoint: str,
share_name: str,
directory_path: str,
prefix: str,
segment: "FilesAndDirectoriesListSegment",
next_marker: str,
share_snapshot: Optional[str] = None,
marker: Optional[str] = None,
max_results: Optional[int] = None,
**kwargs
):
super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs)
self.service_endpoint = service_endpoint
self.share_name = share_name
self.share_snapshot = share_snapshot
self.directory_path = directory_path
self.prefix = prefix
self.marker = marker
self.max_results = max_results
self.segment = segment
self.next_marker = next_marker
class ListHandlesResponse(msrest.serialization.Model):
"""An enumeration of handles.
All required parameters must be populated in order to send to Azure.
:param handle_list:
:type handle_list: list[~azure.storage.fileshare.models.HandleItem]
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'next_marker': {'required': True},
}
_attribute_map = {
'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}},
'next_marker': {'key': 'NextMarker', 'type': 'str'},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(
self,
*,
next_marker: str,
handle_list: Optional[List["HandleItem"]] = None,
**kwargs
):
super(ListHandlesResponse, self).__init__(**kwargs)
self.handle_list = handle_list
self.next_marker = next_marker
class ListSharesResponse(msrest.serialization.Model):
"""An enumeration of shares.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param prefix:
:type prefix: str
:param marker:
:type marker: str
:param max_results:
:type max_results: int
:param share_items:
:type share_items: list[~azure.storage.fileshare.models.ShareItemInternal]
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'next_marker': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str'},
'marker': {'key': 'Marker', 'type': 'str'},
'max_results': {'key': 'MaxResults', 'type': 'int'},
'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}},
'next_marker': {'key': 'NextMarker', 'type': 'str'},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(
self,
*,
service_endpoint: str,
next_marker: str,
prefix: Optional[str] = None,
marker: Optional[str] = None,
max_results: Optional[int] = None,
share_items: Optional[List["ShareItemInternal"]] = None,
**kwargs
):
super(ListSharesResponse, self).__init__(**kwargs)
self.service_endpoint = service_endpoint
self.prefix = prefix
self.marker = marker
self.max_results = max_results
self.share_items = share_items
self.next_marker = next_marker
class Metrics(msrest.serialization.Model):
"""Storage Analytics metrics for file service.
All required parameters must be populated in order to send to Azure.
:param version: Required. The version of Storage Analytics to configure.
:type version: str
:param enabled: Required. Indicates whether metrics are enabled for the File service.
:type enabled: bool
:param include_apis: Indicates whether metrics should generate summary statistics for called
API operations.
:type include_apis: bool
:param retention_policy: The retention policy.
:type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy
"""
_validation = {
'version': {'required': True},
'enabled': {'required': True},
}
_attribute_map = {
'version': {'key': 'Version', 'type': 'str'},
'enabled': {'key': 'Enabled', 'type': 'bool'},
'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'},
'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(
self,
*,
version: str,
enabled: bool,
include_apis: Optional[bool] = None,
retention_policy: Optional["RetentionPolicy"] = None,
**kwargs
):
super(Metrics, self).__init__(**kwargs)
self.version = version
self.enabled = enabled
self.include_apis = include_apis
self.retention_policy = retention_policy
class RetentionPolicy(msrest.serialization.Model):
"""The retention policy.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Indicates whether a retention policy is enabled for the File service.
If false, metrics data is retained, and the user is responsible for deleting it.
:type enabled: bool
:param days: Indicates the number of days that metrics data should be retained. All data older
than this value will be deleted. Metrics data is deleted on a best-effort basis after the
retention period expires.
:type days: int
"""
_validation = {
'enabled': {'required': True},
'days': {'maximum': 365, 'minimum': 1},
}
_attribute_map = {
'enabled': {'key': 'Enabled', 'type': 'bool'},
'days': {'key': 'Days', 'type': 'int'},
}
def __init__(
self,
*,
enabled: bool,
days: Optional[int] = None,
**kwargs
):
super(RetentionPolicy, self).__init__(**kwargs)
self.enabled = enabled
self.days = days
class ShareFileRangeList(msrest.serialization.Model):
"""The list of file ranges.
:param ranges:
:type ranges: list[~azure.storage.fileshare.models.FileRange]
:param clear_ranges:
:type clear_ranges: list[~azure.storage.fileshare.models.ClearRange]
"""
_attribute_map = {
'ranges': {'key': 'Ranges', 'type': '[FileRange]'},
'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'},
}
def __init__(
self,
*,
ranges: Optional[List["FileRange"]] = None,
clear_ranges: Optional[List["ClearRange"]] = None,
**kwargs
):
super(ShareFileRangeList, self).__init__(**kwargs)
self.ranges = ranges
self.clear_ranges = clear_ranges
class ShareItemInternal(msrest.serialization.Model):
"""A listed Azure Storage share item.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param snapshot:
:type snapshot: str
:param deleted:
:type deleted: bool
:param version:
:type version: str
:param properties: Required. Properties of a share.
:type properties: ~azure.storage.fileshare.models.SharePropertiesInternal
:param metadata: Dictionary of :code:`<string>`.
:type metadata: dict[str, str]
"""
_validation = {
'name': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'snapshot': {'key': 'Snapshot', 'type': 'str'},
'deleted': {'key': 'Deleted', 'type': 'bool'},
'version': {'key': 'Version', 'type': 'str'},
'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'},
'metadata': {'key': 'Metadata', 'type': '{str}'},
}
_xml_map = {
'name': 'Share'
}
def __init__(
self,
*,
name: str,
properties: "SharePropertiesInternal",
snapshot: Optional[str] = None,
deleted: Optional[bool] = None,
version: Optional[str] = None,
metadata: Optional[Dict[str, str]] = None,
**kwargs
):
super(ShareItemInternal, self).__init__(**kwargs)
self.name = name
self.snapshot = snapshot
self.deleted = deleted
self.version = version
self.properties = properties
self.metadata = metadata
class SharePermission(msrest.serialization.Model):
"""A permission (a security descriptor) at the share level.
All required parameters must be populated in order to send to Azure.
:param permission: Required. The permission in the Security Descriptor Definition Language
(SDDL).
:type permission: str
"""
_validation = {
'permission': {'required': True},
}
_attribute_map = {
'permission': {'key': 'permission', 'type': 'str'},
}
def __init__(
self,
*,
permission: str,
**kwargs
):
super(SharePermission, self).__init__(**kwargs)
self.permission = permission
class SharePropertiesInternal(msrest.serialization.Model):
"""Properties of a share.
All required parameters must be populated in order to send to Azure.
:param last_modified: Required.
:type last_modified: ~datetime.datetime
:param etag: Required.
:type etag: str
:param quota: Required.
:type quota: int
:param provisioned_iops:
:type provisioned_iops: int
:param provisioned_ingress_m_bps:
:type provisioned_ingress_m_bps: int
:param provisioned_egress_m_bps:
:type provisioned_egress_m_bps: int
:param next_allowed_quota_downgrade_time:
:type next_allowed_quota_downgrade_time: ~datetime.datetime
:param deleted_time:
:type deleted_time: ~datetime.datetime
:param remaining_retention_days:
:type remaining_retention_days: int
:param access_tier:
:type access_tier: str
:param access_tier_change_time:
:type access_tier_change_time: ~datetime.datetime
:param access_tier_transition_state:
:type access_tier_transition_state: str
:param lease_status: The current lease status of the share. Possible values include: "locked",
"unlocked".
:type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType
:param lease_state: Lease state of the share. Possible values include: "available", "leased",
"expired", "breaking", "broken".
:type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType
:param lease_duration: When a share is leased, specifies whether the lease is of infinite or
fixed duration. Possible values include: "infinite", "fixed".
:type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType
:param enabled_protocols:
:type enabled_protocols: str
:param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash".
:type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
"""
_validation = {
'last_modified': {'required': True},
'etag': {'required': True},
'quota': {'required': True},
}
_attribute_map = {
'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'},
'etag': {'key': 'Etag', 'type': 'str'},
'quota': {'key': 'Quota', 'type': 'int'},
'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'},
'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'},
'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'},
'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'},
'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'},
'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'},
'access_tier': {'key': 'AccessTier', 'type': 'str'},
'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'},
'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'},
'lease_status': {'key': 'LeaseStatus', 'type': 'str'},
'lease_state': {'key': 'LeaseState', 'type': 'str'},
'lease_duration': {'key': 'LeaseDuration', 'type': 'str'},
'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'},
'root_squash': {'key': 'RootSquash', 'type': 'str'},
}
def __init__(
self,
*,
last_modified: datetime.datetime,
etag: str,
quota: int,
provisioned_iops: Optional[int] = None,
provisioned_ingress_m_bps: Optional[int] = None,
provisioned_egress_m_bps: Optional[int] = None,
next_allowed_quota_downgrade_time: Optional[datetime.datetime] = None,
deleted_time: Optional[datetime.datetime] = None,
remaining_retention_days: Optional[int] = None,
access_tier: Optional[str] = None,
access_tier_change_time: Optional[datetime.datetime] = None,
access_tier_transition_state: Optional[str] = None,
lease_status: Optional[Union[str, "LeaseStatusType"]] = None,
lease_state: Optional[Union[str, "LeaseStateType"]] = None,
lease_duration: Optional[Union[str, "LeaseDurationType"]] = None,
enabled_protocols: Optional[str] = None,
root_squash: Optional[Union[str, "ShareRootSquash"]] = None,
**kwargs
):
super(SharePropertiesInternal, self).__init__(**kwargs)
self.last_modified = last_modified
self.etag = etag
self.quota = quota
self.provisioned_iops = provisioned_iops
self.provisioned_ingress_m_bps = provisioned_ingress_m_bps
self.provisioned_egress_m_bps = provisioned_egress_m_bps
self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time
self.deleted_time = deleted_time
self.remaining_retention_days = remaining_retention_days
self.access_tier = access_tier
self.access_tier_change_time = access_tier_change_time
self.access_tier_transition_state = access_tier_transition_state
self.lease_status = lease_status
self.lease_state = lease_state
self.lease_duration = lease_duration
self.enabled_protocols = enabled_protocols
self.root_squash = root_squash
class ShareProtocolSettings(msrest.serialization.Model):
"""Protocol settings.
:param smb: Settings for SMB protocol.
:type smb: ~azure.storage.fileshare.models.ShareSmbSettings
"""
_attribute_map = {
'smb': {'key': 'Smb', 'type': 'ShareSmbSettings', 'xml': {'name': 'SMB'}},
}
def __init__(
self,
*,
smb: Optional["ShareSmbSettings"] = None,
**kwargs
):
super(ShareProtocolSettings, self).__init__(**kwargs)
self.smb = smb
class ShareSmbSettings(msrest.serialization.Model):
"""Settings for SMB protocol.
:param multichannel: Settings for SMB Multichannel.
:type multichannel: ~azure.storage.fileshare.models.SmbMultichannel
"""
_attribute_map = {
'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'},
}
def __init__(
self,
*,
multichannel: Optional["SmbMultichannel"] = None,
**kwargs
):
super(ShareSmbSettings, self).__init__(**kwargs)
self.multichannel = multichannel
class ShareStats(msrest.serialization.Model):
"""Stats for the share.
All required parameters must be populated in order to send to Azure.
:param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that
this value may not include all recently created or recently resized files.
:type share_usage_bytes: int
"""
_validation = {
'share_usage_bytes': {'required': True},
}
_attribute_map = {
'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'},
}
def __init__(
self,
*,
share_usage_bytes: int,
**kwargs
):
super(ShareStats, self).__init__(**kwargs)
self.share_usage_bytes = share_usage_bytes
class SignedIdentifier(msrest.serialization.Model):
"""Signed identifier.
All required parameters must be populated in order to send to Azure.
:param id: Required. A unique id.
:type id: str
:param access_policy: The access policy.
:type access_policy: ~azure.storage.fileshare.models.AccessPolicy
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'},
}
def __init__(
self,
*,
id: str,
access_policy: Optional["AccessPolicy"] = None,
**kwargs
):
super(SignedIdentifier, self).__init__(**kwargs)
self.id = id
self.access_policy = access_policy
class SmbMultichannel(msrest.serialization.Model):
"""Settings for SMB multichannel.
:param enabled: If SMB multichannel is enabled.
:type enabled: bool
"""
_attribute_map = {
'enabled': {'key': 'Enabled', 'type': 'bool'},
}
_xml_map = {
'name': 'Multichannel'
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
**kwargs
):
super(SmbMultichannel, self).__init__(**kwargs)
self.enabled = enabled
class SourceModifiedAccessConditions(msrest.serialization.Model):
"""Parameter group.
:param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching
crc64 checksum.
:type source_if_match_crc64: bytearray
:param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a
matching crc64 checksum.
:type source_if_none_match_crc64: bytearray
"""
_attribute_map = {
'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'},
'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'},
}
def __init__(
self,
*,
source_if_match_crc64: Optional[bytearray] = None,
source_if_none_match_crc64: Optional[bytearray] = None,
**kwargs
):
super(SourceModifiedAccessConditions, self).__init__(**kwargs)
self.source_if_match_crc64 = source_if_match_crc64
self.source_if_none_match_crc64 = source_if_none_match_crc64
class StorageError(msrest.serialization.Model):
"""StorageError.
:param message:
:type message: str
"""
_attribute_map = {
'message': {'key': 'Message', 'type': 'str'},
}
def __init__(
self,
*,
message: Optional[str] = None,
**kwargs
):
super(StorageError, self).__init__(**kwargs)
self.message = message
class StorageServiceProperties(msrest.serialization.Model):
"""Storage service properties.
:param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for
files.
:type hour_metrics: ~azure.storage.fileshare.models.Metrics
:param minute_metrics: A summary of request statistics grouped by API in minute aggregates for
files.
:type minute_metrics: ~azure.storage.fileshare.models.Metrics
:param cors: The set of CORS rules.
:type cors: list[~azure.storage.fileshare.models.CorsRule]
:param protocol: Protocol settings.
:type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings
"""
_attribute_map = {
'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'},
'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'},
'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}},
'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings', 'xml': {'name': 'ProtocolSettings'}},
}
def __init__(
self,
*,
hour_metrics: Optional["Metrics"] = None,
minute_metrics: Optional["Metrics"] = None,
cors: Optional[List["CorsRule"]] = None,
protocol: Optional["ShareProtocolSettings"] = None,
**kwargs
):
super(StorageServiceProperties, self).__init__(**kwargs)
self.hour_metrics = hour_metrics
self.minute_metrics = minute_metrics
self.cors = cors
self.protocol = protocol
| mit | 229,342,826,065,661,280 | 32.647852 | 364 | 0.611017 | false | 3.927237 | false | false | false |
maikito26/script.foscam | resources/lib/utils.py | 1 | 6718 | import os
import time
import glob
import xbmc
import xbmcaddon
import xbmcgui
import requests
__addon__ = xbmcaddon.Addon()
__id__ = __addon__.getAddonInfo('id')
__icon__ = __addon__.getAddonInfo('icon').decode("utf-8")
__version__ = __addon__.getAddonInfo('version')
addon_name = __addon__.getLocalizedString(32000)
TEXTURE_FMT = os.path.join(__addon__.getAddonInfo('path'), 'resources', 'media', '{0}.png')
ACTION_PREVIOUS_MENU = 10
ACTION_BACKSPACE = 110
ACTION_NAV_BACK = 92
ACTION_STOP = 13
ACTION_SELECT_ITEM = 7
INVALID_PASSWORD_CHARS = ('{', '}', ':', ';', '!', '?', '@', '\\', '/')
INVALID_USER_CHARS = ('@',)
def log(message, level=xbmc.LOGNOTICE):
xbmc.log("{0} v{1}: {2}".format(__id__, __version__, message), level=level)
def log_normal(message):
if int(__addon__.getSetting('debug')) > 0:
log(message)
def log_verbose(message):
if int(__addon__.getSetting('debug')) == 2:
log(message)
def log_error(message):
log(message, xbmc.LOGERROR)
def notify(msg, time=10000):
xbmcgui.Dialog().notification(addon_name, msg, __icon__, time)
def addon_info(info):
return __addon__.getAddonInfo(info)
def get_string(ident):
return __addon__.getLocalizedString(ident)
def get_setting(ident):
return __addon__.getSetting(ident)
def get_bool_setting(ident):
return get_setting(ident) == "true"
def get_int_setting(ident):
try:
return int(get_setting(ident))
except ValueError:
return None
def get_float_setting(ident):
return float(get_setting(ident))
def set_setting(ident, value):
__addon__.setSetting(ident, value)
def open_settings(callback=None):
if callback is not None:
callback()
__addon__.openSettings()
def invalid_char(credential, chars, stringid, show_dialog):
for char in chars:
if char in credential:
if show_dialog:
xbmcgui.Dialog().ok(get_string(32000), get_string(stringid),
" ", " ".join(chars))
return char
return False
def invalid_password_char(password, show_dialog=False):
return invalid_char(password, INVALID_PASSWORD_CHARS, 32105, show_dialog)
def invalid_user_char(user, show_dialog=False):
return invalid_char(user, INVALID_USER_CHARS, 32106, show_dialog)
def error_dialog(msg):
xbmcgui.Dialog().ok(get_string(32000), msg, " ", get_string(32102))
open_settings()
class SnapShot(object):
def __init__(self, path, interval, get_data):
self.time = time.time()
self.interval = interval
self.filename = os.path.join(path, "{0}.jpg".format(self.time))
self.get_data = get_data
def __enter__(self):
return self
def save(self):
with open(self.filename, 'wb') as output:
log_verbose("Snapshot {0}".format(self.filename))
data = self.get_data()
if data:
output.write(data)
return self.filename
else:
return ""
def __exit__(self, exc_type, exc_value, traceback):
current_time = time.time()
elapsed = current_time - self.time
log_verbose("Retrieving snapshot took {0:.2f} seconds".format(elapsed))
remaining = int(self.interval - elapsed*1000)
sleep = max(200, remaining)
log_verbose("Sleeping for {0} milliseconds".format(sleep))
xbmc.sleep(sleep)
try:
os.remove(self.filename)
except:
pass
else:
log_verbose("Deleted {0}".format(self.filename))
def get_mjpeg_frame(stream):
content_length = ""
try:
while not "Length" in content_length:
content_length = stream.readline()
log_verbose("Stream Readline: " + content_length)
bytes = int(content_length.split(':')[-1])
log_verbose("Stream JPEG Read Size: " + str(bytes))
content_length = stream.readline()
log_verbose("Stream Readline: " + content_length)
return stream.read(bytes)
except requests.RequestException as e:
utils.log_error(str(e))
return None
class ExtractMJPEGFrames(object):
def __init__(self, path, duration, stream, callback, *args):
self.path = path
self.duration = duration
self.stream = stream
self.callback = callback
self.callback_args = args
self._stop = False
def __enter__(self):
return self
def stop(self):
self._stop = True
def start(self):
start_time = time.time()
current_time = start_time
frames = 0
while current_time < start_time + self.duration and not self._stop:
xbmc.sleep(1)
frame = get_mjpeg_frame(self.stream)
if frame:
filename = os.path.join(self.path, "snapshot.{0}.jpg".format(time.time()))
with open(filename, 'wb') as jpeg_file:
jpeg_file.write(frame)
self.callback(filename, *self.callback_args)
log_verbose("Snapshot {0}".format(filename))
current_time = time.time()
frames += 1
duration = current_time - start_time
log_normal("Average fps: {0:.2f}".format(frames / duration))
return int(duration)
def __exit__(self, exc_type, exc_value, traceback):
self.stream.close()
for jpg in glob.glob(os.path.join(self.path, "snapshot.*.jpg")):
try:
os.remove(jpg)
except:
log_verbose("Unable to delete {0}".format(jpg))
else:
log_verbose("Deleted {0}".format(jpg))
class Monitor(xbmc.Monitor):
def __init__(self, updated_settings_callback):
xbmc.Monitor.__init__(self)
self.updated_settings_callback = updated_settings_callback
def onSettingsChanged(self):
self.updated_settings_callback()
class StopResumePlayer(xbmc.Player):
def maybe_stop_current(self):
if self.isPlaying():
self.resume_time = self.getTime()
self.previous_file = self.getPlayingFile()
self.stop()
log_normal("Stopped {0}".format(self.previous_file))
else:
self.previous_file = None
def maybe_resume_previous(self):
if self.previous_file is not None:
resume_time_str = "{0:.1f}".format(self.resume_time - 10.)
log_normal("Resuming {0} at {1}".format(self.previous_file, resume_time_str))
listitem = xbmcgui.ListItem()
listitem.setProperty('StartOffset', resume_time_str)
self.play(self.previous_file, listitem)
| gpl-3.0 | 8,232,375,941,994,584,000 | 28.991071 | 91 | 0.591694 | false | 3.761478 | false | false | false |
sid5432/pyOTDR | pyotdr/main.py | 1 | 1399 | import os
import logging
import argparse
from pyotdr.dump import tofile, ExportDataType
from pyotdr.read import sorparse
logging.basicConfig(format="%(message)s")
logger = logging.getLogger(__name__)
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
logger.setLevel(LOG_LEVEL)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("SOR_file", type=str, help="Name of the sor file to transform")
parser.add_argument(
"format",
type=ExportDataType,
choices=list(ExportDataType),
default=ExportDataType.JSON,
help="Output format : JSON or XML",
nargs="?",
)
args = parser.parse_args()
logging.basicConfig(format="%(message)s")
root_logger = logging.getLogger("pyotdr")
root_logger.setLevel(LOG_LEVEL)
filename = args.SOR_file
opformat = ExportDataType(args.format)
_, results, tracedata = sorparse(filename)
# construct data file name to dump results
fn_strip, _ = os.path.splitext(os.path.basename(filename))
datafile = fn_strip + "-dump." + str(opformat).lower()
with open(datafile, "w") as output:
tofile(results, output, format=opformat)
# construct data file name
fn_strip, _ = os.path.splitext(os.path.basename(filename))
opfile = fn_strip + "-trace.dat"
with open(opfile, "w") as output:
for xy in tracedata:
output.write(xy)
| gpl-3.0 | -2,556,162,915,401,240,000 | 28.145833 | 87 | 0.664046 | false | 3.652742 | false | false | false |
zhouyao1994/incubator-superset | superset/utils/dashboard_import_export.py | 1 | 1863 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
import json
import logging
import time
from superset.models.core import Dashboard
from superset.utils.core import decode_dashboards
def import_dashboards(session, data_stream, import_time=None):
"""Imports dashboards from a stream to databases"""
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(data_stream.read(), object_hook=decode_dashboards)
# TODO: import DRUID datasources
for table in data["datasources"]:
type(table).import_obj(table, import_time=import_time)
session.commit()
for dashboard in data["dashboards"]:
Dashboard.import_obj(dashboard, import_time=import_time)
session.commit()
def export_dashboards(session):
"""Returns all dashboards metadata as a json dump"""
logging.info("Starting export")
dashboards = session.query(Dashboard)
dashboard_ids = []
for dashboard in dashboards:
dashboard_ids.append(dashboard.id)
data = Dashboard.export_dashboards(dashboard_ids)
return data
| apache-2.0 | 5,545,386,890,963,763,000 | 37.8125 | 72 | 0.741278 | false | 4.032468 | false | false | false |
inorton/junit2html | junit2htmlreport/matrix.py | 1 | 7952 | """
Handle multiple parsed junit reports
"""
from __future__ import unicode_literals
import os
from . import parser
from .common import ReportContainer
from .parser import SKIPPED, FAILED, PASSED, ABSENT
from .render import HTMLMatrix, HTMLReport
UNTESTED = "untested"
PARTIAL_PASS = "partial pass"
PARTIAL_FAIL = "partial failure"
TOTAL_FAIL = "total failure"
class ReportMatrix(ReportContainer):
"""
Load and handle several report files
"""
def __init__(self):
super(ReportMatrix, self).__init__()
self.cases = {}
self.classes = {}
self.casenames = {}
self.result_stats = {}
self.case_results = {}
def add_case_result(self, case):
testclass = case.testclass.name
casename = case.name
if testclass not in self.case_results:
self.case_results[testclass] = {}
if casename not in self.case_results[testclass]:
self.case_results[testclass][casename] = []
self.case_results[testclass][casename].append(case.outcome())
def report_order(self):
return sorted(self.reports.keys())
def short_outcome(self, outcome):
if outcome == PASSED:
return "/"
elif outcome == SKIPPED:
return "s"
elif outcome == FAILED:
return "f"
elif outcome == TOTAL_FAIL:
return "F"
elif outcome == PARTIAL_PASS:
return "%"
elif outcome == PARTIAL_FAIL:
return "X"
elif outcome == UNTESTED:
return "U"
return "?"
def add_report(self, filename):
"""
Load a report into the matrix
:param filename:
:return:
"""
parsed = parser.Junit(filename=filename)
filename = os.path.basename(filename)
self.reports[filename] = parsed
for suite in parsed.suites:
for testclass in suite.classes:
if testclass not in self.classes:
self.classes[testclass] = {}
if testclass not in self.casenames:
self.casenames[testclass] = list()
self.classes[testclass][filename] = suite.classes[testclass]
for testcase in self.classes[testclass][filename].cases:
name = testcase.name.strip()
if name not in self.casenames[testclass]:
self.casenames[testclass].append(name)
if testclass not in self.cases:
self.cases[testclass] = {}
if name not in self.cases[testclass]:
self.cases[testclass][name] = {}
self.cases[testclass][name][filename] = testcase
outcome = testcase.outcome()
self.add_case_result(testcase)
self.result_stats[outcome] = 1 + self.result_stats.get(
outcome, 0)
def summary(self):
"""
Render a summary of the matrix
:return:
"""
raise NotImplementedError()
def combined_result_list(self, classname, casename):
"""
Combone the result of all instances of the given case
:param classname:
:param casename:
:return:
"""
if classname in self.case_results:
if casename in self.case_results[classname]:
results = self.case_results[classname][casename]
return self.combined_result(results)
return " ", ""
def combined_result(self, results):
"""
Given a list of results, produce a "combined" overall result
:param results:
:return:
"""
if results:
if PASSED in results:
if FAILED in results:
return self.short_outcome(PARTIAL_FAIL), PARTIAL_FAIL.title()
return self.short_outcome(PASSED), PASSED.title()
if FAILED in results:
return self.short_outcome(FAILED), FAILED.title()
if SKIPPED in results:
return self.short_outcome(UNTESTED), UNTESTED.title()
return " ", ""
class HtmlReportMatrix(ReportMatrix):
"""
Render a matrix report as html
"""
def __init__(self, outdir):
super(HtmlReportMatrix, self).__init__()
self.outdir = outdir
def add_report(self, filename):
"""
Load a report
"""
super(HtmlReportMatrix, self).add_report(filename)
basename = os.path.basename(filename)
# make the individual report too
report = self.reports[basename].html()
if self.outdir != "" and not os.path.exists(self.outdir):
os.makedirs(self.outdir)
with open(
os.path.join(self.outdir, basename) + ".html", "wb") as filehandle:
filehandle.write(report.encode("utf-8"))
def short_outcome(self, outcome):
if outcome == PASSED:
return "ok"
return super(HtmlReportMatrix, self).short_outcome(outcome)
def short_axis(self, axis):
if axis.endswith(".xml"):
return axis[:-4]
return axis
def summary(self):
"""
Render the html
:return:
"""
html_matrix = HTMLMatrix(self)
return str(html_matrix)
class TextReportMatrix(ReportMatrix):
"""
Render a matrix report as text
"""
def summary(self):
"""
Render as a string
:return:
"""
output = "\nMatrix Test Report\n"
output += "===================\n"
axis = list(self.reports.keys())
axis.sort()
# find the longest classname or test case name
left_indent = 0
for classname in self.classes:
left_indent = max(len(classname), left_indent)
for casename in self.casenames[classname]:
left_indent = max(len(casename), left_indent)
# render the axis headings in a stepped tree
treelines = ""
for filename in self.report_order():
output += "{} {}{}\n".format(" " * left_indent, treelines,
filename)
treelines += "| "
output += "{} {}\n".format(" " * left_indent, treelines)
# render in groups of the same class
for classname in self.classes:
# new class
output += "{} \n".format(classname)
# print the case name
for casename in sorted(set(self.casenames[classname])):
output += "- {}{} ".format(casename,
" " * (left_indent - len(casename)))
# print each test and its result for each axis
case_data = ""
for axis in self.report_order():
if axis not in self.cases[classname][casename]:
case_data += " "
else:
testcase = self.cases[classname][casename][axis]
if testcase.skipped:
case_data += "s "
elif testcase.failure:
case_data += "f "
else:
case_data += "/ "
combined, combined_name = self.combined_result(
self.case_results[classname][testcase.name])
output += case_data
output += " {} {}\n".format(combined, combined_name)
# print the result stats
output += "\n"
output += "-" * 79
output += "\n"
output += "Test Results:\n"
for outcome in sorted(self.result_stats):
output += " {:<12} : {:>6}\n".format(
outcome.title(),
self.result_stats[outcome])
return output
| mit | -5,829,537,178,853,907,000 | 30.43083 | 83 | 0.522384 | false | 4.513053 | true | false | false |
Andy-hpliu/AirtestX | atx/__init__.py | 1 | 1668 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module is to make mobile test more easily
"""
from __future__ import absolute_import
import os
import sys
import signal
import pkg_resources
try:
version = pkg_resources.get_distribution("atx").version
except pkg_resources.DistributionNotFound:
version = 'unknown'
from atx.consts import *
from atx.errors import *
from atx.device import Pattern, Bounds
def connect(*args, **kwargs):
"""Connect to a device, and return its object
Args:
platform: string one of <android|ios|windows>
Returns:
None
Raises:
SyntaxError, EnvironmentError
"""
platform = kwargs.pop('platform', os.getenv('ATX_PLATFORM') or 'android')
cls = None
if platform == 'android':
os.environ['JSONRPC_TIMEOUT'] = "10" # default is 90s which is too long.
devcls = __import__('atx.device.android')
cls = devcls.device.android.AndroidDevice
elif platform == 'windows':
devcls = __import__('atx.device.windows')
cls = devcls.device.windows.WindowsDevice
elif platform == 'ios':
devcls = __import__('atx.device.ios_webdriveragent')
cls = devcls.device.ios_webdriveragent.IOSDevice
elif platform == 'dummy': # for py.test use
devcls = __import__('atx.device.dummy')
cls = devcls.device.dummy.DummyDevice
if cls is None:
raise SyntaxError('Platform: %s not exists' % platform)
c = cls(*args, **kwargs)
return c
# def _sig_handler(signum, frame):
# print >>sys.stderr, 'Signal INT catched !!!'
# sys.exit(1)
# signal.signal(signal.SIGINT, _sig_handler)
| apache-2.0 | -3,891,371,900,709,590,500 | 26.344262 | 80 | 0.641487 | false | 3.698448 | false | false | false |
aburnap/JMD2015-When-Crowdsourcing-Fails | human_crowds/model.py | 1 | 4057 | #-----------------------------------------------------------------------------
#
# Paper: When Crowdsourcing Fails: A Study of Expertise on Crowdsourced
# Design Evaluation
# Author: Alex Burnap - [email protected]
# Date: October 10, 2014
# License: Apache v2
# Description: Model definition for creating Bayesian network crowd
# consensus model
#
#-----------------------------------------------------------------------------
import numpy as np
import pymc
import scipy.stats as stats
def create_model(evaluation_matrix, num_participants, num_designs):
"""
Function creates Bayesian network model defition as dict for PyMC, called
by simulation_X.py.
Input: evaluation matrix -
Output: Dict of PyMC
Note: Current hyperparameters are hard coded as in paper
"""
#--------------- Data Manipulation of Evaluation Matrix-------------------
indices = np.nonzero(evaluation_matrix)
participant_indices, design_indices = indices[0], indices[1]
observed_evaluations = evaluation_matrix.reshape(num_participants*num_designs)
observed_evaluations = np.ma.masked_equal(observed_evaluations,0).compressed()
observed_evaluations = (observed_evaluations-1)/4.0
#--- 1st Level --- Hyperparameters of Priors -----------------------------
ability_mu_prior = 0.5
ability_tau_prior = 0.1
logistic_scale_mu = 0.07
logistic_scale_tau = 1.0
criteria_score_mu_prior = 0.5
criteria_score_tau_prior = 0.1
#--- 2nd Level --- Ability, Difficulty, Logistic Scale, Inv-Wishart Var --
"""
Currently only each participant has it's own node, there is common node
for difficulty, logistic scale, and inv_wishart_var
"""
ability_vector = pymc.TruncatedNormal('ability', mu=ability_mu_prior,
tau=ability_tau_prior, a=0, b=1, value=.5*np.ones(num_participants))
design_difficulty_num = pymc.TruncatedNormal('design_difficulty',
mu=0.5, tau=1.0, a=0.3, b=0.7, value=0.5)
logistic_scale_num = pymc.TruncatedNormal('logistic_scale', mu=logistic_scale_mu,
tau=logistic_scale_tau, a=.01, b=.2, value=.07)#, value=.1*np.ones(num_participants))
inv_gamma_var = .01 # turn this to density later
#--- 3rd Level ---- Logistic, Alpha, Beta Deterministic ------------------
@pymc.deterministic
def logistic_det(ability=ability_vector, difficulty=design_difficulty_num, scale=logistic_scale_num):
sigma = np.array(1 - stats.logistic.cdf(ability-difficulty,0,scale)).clip(
np.spacing(1)*10, 1e6) #this is done to prevent dividing by 0
return sigma
@pymc.deterministic
def alpha_det(E=logistic_det, V=inv_gamma_var):
return (E**2)/V + 2
@pymc.deterministic
def beta_det(E=logistic_det, V=inv_gamma_var):
return (E*((E**2)/V + 1))
#--- 4th Level --- Inverse-Gamma and True Score --------------------------
criteria_score_vector = pymc.TruncatedNormal('criteria_score', mu=criteria_score_mu_prior,
tau=criteria_score_tau_prior, a=0, b=1, value=.5*np.ones(num_designs))
inverse_gamma_vector = pymc.InverseGamma('inverse_gamma', alpha=alpha_det, beta=beta_det,
value=0.5*np.ones(num_participants))
#--- 5th Level ---- Evaluations -------------------------------
y = pymc.TruncatedNormal('y', mu=criteria_score_vector[design_indices],
tau=1/(inverse_gamma_vector[participant_indices]**2),
a=0, b=1, value=observed_evaluations, observed=True)
#--- Return All MCMC Objects ---------------------------------------------
return {'y':y ,
'criteria_score_vector': criteria_score_vector,
'inverse_gamma_vector': inverse_gamma_vector,
'alpha_det': alpha_det,
'beta_det': beta_det,
'logistic_det': logistic_det,
'logistic_scale_num': logistic_scale_num,
'ability_vector':ability_vector,
'design_difficulty_num':design_difficulty_num}
| mit | 5,495,789,919,888,849,000 | 41.705263 | 106 | 0.597486 | false | 3.691538 | false | false | false |
myDevicesIoT/Cayenne-Agent | myDevices/devices/digital/__init__.py | 1 | 4181 | # Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from myDevices.decorators.rest import request, response
from myDevices.utils.types import M_JSON
class GPIOPort():
IN = 0
OUT = 1
LOW = False
HIGH = True
def __init__(self, channelCount):
self.digitalChannelCount = channelCount
def checkDigitalChannel(self, channel):
if not 0 <= channel < self.digitalChannelCount:
raise ValueError("Channel %d out of range [%d..%d]" % (channel, 0, self.digitalChannelCount-1))
def checkDigitalValue(self, value):
if not (value == 0 or value == 1):
raise ValueError("Value %d not in {0, 1}")
@response("%d")
def digitalCount(self):
return self.digitalChannelCount
def __family__(self):
return "GPIOPort"
def __getFunction__(self, channel):
raise NotImplementedError
def __setFunction__(self, channel, func):
raise NotImplementedError
def __digitalRead__(self, channel):
raise NotImplementedError
def __portRead__(self):
raise NotImplementedError
def __digitalWrite__(self, channel, value):
raise NotImplementedError
def __portWrite__(self, value):
raise NotImplementedError
def getFunction(self, channel):
self.checkDigitalChannel(channel)
return self.__getFunction__(channel)
def getFunctionString(self, channel):
func = self.getFunction(channel)
if func == self.IN:
return "IN"
elif func == self.OUT:
return "OUT"
# elif func == GPIO.PWM:
# return "PWM"
else:
return "UNKNOWN"
def setFunction(self, channel, value):
self.checkDigitalChannel(channel)
self.__setFunction__(channel, value)
return self.__getFunction__(channel)
def setFunctionString(self, channel, value):
value = value.lower()
if value == "in":
self.setFunction(channel, self.IN)
elif value == "out":
self.setFunction(channel, self.OUT)
# elif value == "pwm":
# self.setFunction(channel, GPIO.PWM)
else:
raise ValueError("Bad Function")
return self.getFunctionString(channel)
@response("%d")
def digitalRead(self, channel):
self.checkDigitalChannel(channel)
return self.__digitalRead__(channel)
@response(contentType=M_JSON)
def wildcard(self, compact=False):
if compact:
f = "f"
v = "v"
else:
f = "function"
v = "value"
values = {}
for i in range(self.digitalChannelCount):
if compact:
func = self.__getFunction__(i)
else:
func = self.getFunctionString(i)
values[i] = {f: func, v: int(self.digitalRead(i))}
return values
@response("%d")
def portRead(self):
return self.__portRead__()
@response("%d")
def digitalWrite(self, channel, value):
self.checkDigitalChannel(channel)
self.checkDigitalValue(value)
self.__digitalWrite__(channel, value)
return self.digitalRead(channel)
@response("%d")
def portWrite(self, value):
self.__portWrite__(value)
return self.portRead()
DRIVERS = {}
DRIVERS["helper"] = ["DigitalSensor", "DigitalActuator", "LightSwitch", "MotorSwitch", "RelaySwitch", "ValveSwitch", "MotionSensor"]
DRIVERS["pcf8574" ] = ["PCF8574", "PCF8574A"]
DRIVERS["ds2408" ] = ["DS2408"]
| mit | -4,353,876,563,491,724,300 | 29.97037 | 132 | 0.60177 | false | 4.09902 | false | false | false |
palful/yambopy | yambopy/dbs/wfdb.py | 1 | 3127 | from yambopy import *
import numpy as np
import shutil
import os
from netCDF4 import Dataset
def abs2(x):
return x.real**2 + x.imag**2
class YamboWFDB():
def __init__(self,savedb,path=None,save='SAVE',filename='ns.wf'):
"""
load wavefunction from yambo
"""
if path is None:
self.path = save
else:
self.path = path+'/SAVE'
self.filename = filename
#take some data from savedb
self.savedb = savedb
self.wfcgrid = savedb.wfcgrid
self.gvectors = savedb.gvectors
self.kpoints = savedb.kpts_car
self.lat = savedb.lat
self.rlat = savedb.rlat
#read wf
self.read()
self.nkpoints, self.nspin, self.ng, self.nbands = self.wf.shape
def read(self):
path = self.path
filename = self.filename
wf = []
nk = 1
while True:
try:
fname = "%s/%s_fragments_%d_1"%(path,filename,nk)
database = Dataset(fname)
re = database.variables['WF_REAL_COMPONENTS_@_K%d_BAND_GRP_1'%nk][:]
im = database.variables['WF_IM_COMPONENTS_@_K%d_BAND_GRP_1'%nk][:]
a = re+1j*im
wf.append(a)
nk+=1
except:
if nk==1:
raise IOError('Could not read %s'%fname)
break
self.wf = np.array(wf)
self.nkpoints, self.nspin, self.ng, self.nbands = self.wf.shape
def get_wf_gvecs(self,kpoint=0):
"""
Get the indexes of teh wavefunctions
"""
#create array for fft
indexes = self.wfcgrid[kpoint]
indexes = indexes[indexes > 0] #remove componnents that do not belong
gvecs = self.gvectors[indexes]
return gvecs
def write(self,path):
"""
write the wavefunctions in new files
"""
if os.path.isdir(path): shutil.rmtree(path)
os.mkdir(path)
#copy all the files
oldpath = self.path
filename = self.filename
shutil.copyfile("%s/%s"%(oldpath,filename),"%s/%s"%(path,filename))
for nk in xrange(self.nkpoints):
fname = "%s_fragments_%d_1"%(filename,nk+1)
shutil.copyfile("%s/%s"%(oldpath,fname),"%s/%s"%(path,fname))
#edit with the new wfs
wf = self.wf
for nk in xrange(self.nkpoints):
fname = "%s_fragments_%d_1"%(filename,nk+1)
database = Dataset("%s/%s"%(path,fname),'r+')
database.variables['WF_REAL_COMPONENTS_@_K%d_BAND_GRP_1'%(nk+1)][:] = wf[nk].real
database.variables['WF_IM_COMPONENTS_@_K%d_BAND_GRP_1'%(nk+1)][:] = wf[nk].imag
db.close()
print 'new wavefunctions written in %s'%path
def __str__(self):
s = ""
s += "nkpoints: %4d\n"%self.nkpoints
s += "nspin: %4d\n"%self.nspin
s += "nbands: %4d\n"%self.nbands
s += "ng: %4d\n"%self.ng
return s
if __name__ == "__main__":
ywf = YamboWFDB(path='database')
| bsd-3-clause | -842,594,899,618,820,100 | 29.656863 | 93 | 0.521906 | false | 3.455249 | false | false | false |
jath03/projects | local/Python server-client/MyServer.py | 1 | 2622 | from http.server import BaseHTTPRequestHandler, HTTPServer
import threading, pickle, re, subprocess
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
f_type_map = {'.html': 'text/html', '.css': 'text/css', '.ico': 'image/x-icon', '.jpg': 'image/jpeg', '.png': 'image/png', '.gif': 'image/gif', '.js': 'text/javascript', '.py': 'test/python'}
t_type = re.compile('\/|(\.\w*)')
r_file = self.path.split('?')
requested_type = t_type.findall(self.path)
print(requested_type)
ex = requested_type[-1]
if ex != '.py':
try:
self.send_response(200)
self.send_header('Content-type', f_type_map[ex])
self.send_header('Content-Encoding', 'utf-8')
self.end_headers()
try:
with open('C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Google test signin\Google%s'% r_file[0]) as file:
f = file.read()
#f = 'This is my secret message'
self.wfile.write(bytes(f, 'utf8'))
except UnicodeDecodeError:
with open('C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Google test signin\Google%s'% r_file[0], 'rb') as f:
file = f.read()
self.wfile.write(file)
except IOError:
self.send_response(404, 'File Not Found')
self.wfile.write(bytes('404 file not found', 'utf8'))
except KeyError:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
with open('C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Google test signin\Google\index.html') as file:
f = 'This is my secret message'
#f = file.read()
self.wfile.write(bytes(f, 'utf8'))
return
else:
file = subprocess.run(['python3 C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Python server-client\cgi\firstcgi.py'], stdout=subprocess.PIPE)
self.wfile.write(bytes(file, 'utf-8'))
server_address = ('192.168.1.233', 6789)
def run():
print('starting server ...')
httpd = HTTPServer(server_address, MyHandler)
httpd.serve_forever()
bg_server= threading.Thread(target = run)
###Uncomment the next line if you want to have the server start when the file is run###
bg_server.start()
print('\nserver started at %s:%s'% server_address)
| mit | 8,561,099,485,359,479,000 | 47.555556 | 199 | 0.553776 | false | 3.677419 | false | false | false |
alirizakeles/zato | code/zato-web-admin/src/zato/admin/web/views/channel/amqp.py | 1 | 3812 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2011 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
from traceback import format_exc
# Django
from django.http import HttpResponse, HttpResponseServerError
# anyjson
from anyjson import dumps
# Zato
from zato.admin.web.forms.channel.amqp import CreateForm, EditForm
from zato.admin.web.views import Delete as _Delete, get_definition_list, \
Index as _Index, method_allowed
from zato.common.odb.model import ChannelAMQP
logger = logging.getLogger(__name__)
def _get_edit_create_message(params, prefix=''):
""" Creates a base dictionary which can be used by both 'edit' and 'create' actions.
"""
return {
'id': params.get('id'),
'cluster_id': params['cluster_id'],
'name': params[prefix + 'name'],
'is_active': bool(params.get(prefix + 'is_active')),
'def_id': params[prefix + 'def_id'],
'queue': params[prefix + 'queue'],
'consumer_tag_prefix': params[prefix + 'consumer_tag_prefix'],
'service': params[prefix + 'service'],
'data_format': params.get(prefix + 'data_format'),
}
def _edit_create_response(client, verb, id, name, def_id, cluster_id):
response = client.invoke('zato.definition.amqp.get-by-id', {'id':def_id, 'cluster_id':cluster_id})
return_data = {'id': id,
'message': 'Successfully {0} the AMQP channel [{1}]'.format(verb, name),
'def_name': response.data.name
}
return HttpResponse(dumps(return_data), content_type='application/javascript')
class Index(_Index):
method_allowed = 'GET'
url_name = 'channel-amqp'
template = 'zato/channel/amqp.html'
service_name = 'zato.channel.amqp.get-list'
output_class = ChannelAMQP
paginate = True
class SimpleIO(_Index.SimpleIO):
input_required = ('cluster_id',)
output_required = ('id', 'name', 'is_active', 'queue', 'consumer_tag_prefix',
'def_name', 'def_id', 'service_name', 'data_format')
output_repeated = True
def handle(self):
create_form = CreateForm(req=self.req)
edit_form = EditForm(prefix='edit', req=self.req)
if self.req.zato.cluster_id:
def_ids = get_definition_list(self.req.zato.client, self.req.zato.cluster, 'amqp')
create_form.set_def_id(def_ids)
edit_form.set_def_id(def_ids)
return {
'create_form': create_form,
'edit_form': edit_form,
}
@method_allowed('POST')
def create(req):
try:
response = req.zato.client.invoke('zato.channel.amqp.create', _get_edit_create_message(req.POST))
return _edit_create_response(req.zato.client, 'created', response.data.id,
req.POST['name'], req.POST['def_id'], req.POST['cluster_id'])
except Exception, e:
msg = 'Could not create an AMQP channel, e:[{e}]'.format(e=format_exc(e))
logger.error(msg)
return HttpResponseServerError(msg)
@method_allowed('POST')
def edit(req):
try:
req.zato.client.invoke('zato.channel.amqp.edit', _get_edit_create_message(req.POST, 'edit-'))
return _edit_create_response(req.zato.client, 'updated', req.POST['id'], req.POST['edit-name'],
req.POST['edit-def_id'], req.POST['cluster_id'])
except Exception, e:
msg = 'Could not update the AMQP channel, e:[{e}]'.format(e=format_exc(e))
logger.error(msg)
return HttpResponseServerError(msg)
class Delete(_Delete):
url_name = 'channel-amqp-delete'
error_message = 'Could not delete the AMQP channel'
service_name = 'zato.channel.amqp.delete'
| gpl-3.0 | 1,374,120,287,783,542,800 | 34.626168 | 105 | 0.633788 | false | 3.367491 | false | false | false |
brunobord/critica | apps/archives/views.py | 1 | 2991 | # -*- coding: utf-8 -*-
"""
Views of ``critica.apps.archives`` application.
"""
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from django.core.paginator import InvalidPage
from django.core.paginator import EmptyPage
from critica.apps.front.views import home
from critica.apps.front.views import category
from critica.apps.front.views import regions
from critica.apps.front.views import voyages
from critica.apps.front.views import epicurien
from critica.apps.front.views import anger
from critica.apps.issues.models import Issue
from critica.apps.issues.views import _get_current_issue
def archives(request):
"""
Displays archive list.
"""
issue = _get_current_issue()
context = {}
context['issue'] = issue
context['is_current'] = True
try:
item_list = Issue.objects.filter(is_published=True).order_by('-publication_date')
except ObjectDoesNotExist:
item_list = None
paginator = Paginator(item_list, 30)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
context['items'] = paginator.page(page)
except (EmptyPage, InvalidPage):
context['items'] = paginator.page(paginator.num_pages)
return render_to_response(
'archives/archives.html',
context,
context_instance=RequestContext(request)
)
def issuearchive_home(request, issue_number):
"""
Displays home archive of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return home(request, issue=issue, is_archive=True)
def issuearchive_category(request, issue_number, category_slug):
"""
Displays category archive of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return category(request, category_slug, issue=issue, is_archive=True)
def issuearchive_regions(request, issue_number):
"""
Displays "Regions" category of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return regions(request, issue=issue, is_archive=True)
def issuearchive_voyages(request, issue_number):
"""
Displays "Voyages" category of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return voyages(request, issue=issue, is_archive=True)
def issuearchive_epicurien(request, issue_number):
"""
Displays "Epicurien" category of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return epicurien(request, issue=issue, is_archive=True)
def issuearchive_anger(request, issue_number):
"""
Displays "Anger" category of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return anger(request, issue=issue, is_archive=True)
| gpl-3.0 | 3,696,300,011,208,231,400 | 25.945946 | 89 | 0.684721 | false | 3.805344 | false | false | false |
aequitas/home-assistant | homeassistant/components/zwave/__init__.py | 1 | 43527 | """Support for Z-Wave."""
import asyncio
import copy
from importlib import import_module
import logging
from pprint import pprint
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback, CoreState
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.const import (
ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.helpers.event import async_track_time_change
from homeassistant.util import convert
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
from . import const
from . import config_flow # noqa pylint: disable=unused-import
from .const import (
CONF_AUTOHEAL, CONF_DEBUG, CONF_POLLING_INTERVAL,
CONF_USB_STICK_PATH, CONF_CONFIG_PATH, CONF_NETWORK_KEY,
DEFAULT_CONF_AUTOHEAL, DEFAULT_CONF_USB_STICK_PATH,
DEFAULT_POLLING_INTERVAL, DEFAULT_DEBUG, DOMAIN,
DATA_DEVICES, DATA_NETWORK, DATA_ENTITY_VALUES)
from .node_entity import ZWaveBaseEntity, ZWaveNodeEntity
from . import workaround
from .discovery_schemas import DISCOVERY_SCHEMAS
from .util import (check_node_schema, check_value_schema, node_name,
check_has_unique_id, is_node_parsed)
_LOGGER = logging.getLogger(__name__)
CLASS_ID = 'class_id'
ATTR_POWER = 'power_consumption'
CONF_POLLING_INTENSITY = 'polling_intensity'
CONF_IGNORED = 'ignored'
CONF_INVERT_OPENCLOSE_BUTTONS = 'invert_openclose_buttons'
CONF_REFRESH_VALUE = 'refresh_value'
CONF_REFRESH_DELAY = 'delay'
CONF_DEVICE_CONFIG = 'device_config'
CONF_DEVICE_CONFIG_GLOB = 'device_config_glob'
CONF_DEVICE_CONFIG_DOMAIN = 'device_config_domain'
DATA_ZWAVE_CONFIG = 'zwave_config'
DEFAULT_CONF_IGNORED = False
DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS = False
DEFAULT_CONF_REFRESH_VALUE = False
DEFAULT_CONF_REFRESH_DELAY = 5
SUPPORTED_PLATFORMS = ['binary_sensor', 'climate', 'cover', 'fan',
'lock', 'light', 'sensor', 'switch']
RENAME_NODE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_NAME): cv.string,
})
RENAME_VALUE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_NAME): cv.string,
})
SET_CONFIG_PARAMETER_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(vol.Coerce(int), cv.string),
vol.Optional(const.ATTR_CONFIG_SIZE, default=2): vol.Coerce(int)
})
SET_NODE_VALUE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Coerce(int)
})
REFRESH_NODE_VALUE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int)
})
SET_POLL_INTENSITY_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_POLL_INTENSITY): vol.Coerce(int),
})
PRINT_CONFIG_PARAMETER_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
})
NODE_SERVICE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
})
REFRESH_ENTITY_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
})
RESET_NODE_METERS_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=1): vol.Coerce(int)
})
CHANGE_ASSOCIATION_SCHEMA = vol.Schema({
vol.Required(const.ATTR_ASSOCIATION): cv.string,
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_TARGET_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_GROUP): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=0x00): vol.Coerce(int)
})
SET_WAKEUP_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE):
vol.All(vol.Coerce(int), cv.positive_int),
})
HEAL_NODE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_RETURN_ROUTES, default=False): cv.boolean,
})
TEST_NODE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_MESSAGES, default=1): cv.positive_int,
})
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema({
vol.Optional(CONF_POLLING_INTENSITY): cv.positive_int,
vol.Optional(CONF_IGNORED, default=DEFAULT_CONF_IGNORED): cv.boolean,
vol.Optional(CONF_INVERT_OPENCLOSE_BUTTONS,
default=DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS): cv.boolean,
vol.Optional(CONF_REFRESH_VALUE, default=DEFAULT_CONF_REFRESH_VALUE):
cv.boolean,
vol.Optional(CONF_REFRESH_DELAY, default=DEFAULT_CONF_REFRESH_DELAY):
cv.positive_int
})
SIGNAL_REFRESH_ENTITY_FORMAT = 'zwave_refresh_entity_{}'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_AUTOHEAL, default=DEFAULT_CONF_AUTOHEAL): cv.boolean,
vol.Optional(CONF_CONFIG_PATH): cv.string,
vol.Optional(CONF_NETWORK_KEY):
vol.All(cv.string, vol.Match(r'(0x\w\w,\s?){15}0x\w\w')),
vol.Optional(CONF_DEVICE_CONFIG, default={}):
vol.Schema({cv.entity_id: DEVICE_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_DEVICE_CONFIG_GLOB, default={}):
vol.Schema({cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_DEVICE_CONFIG_DOMAIN, default={}):
vol.Schema({cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
vol.Optional(CONF_POLLING_INTERVAL, default=DEFAULT_POLLING_INTERVAL):
cv.positive_int,
vol.Optional(CONF_USB_STICK_PATH): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def _obj_to_dict(obj):
"""Convert an object into a hash for debug."""
return {key: getattr(obj, key) for key
in dir(obj)
if key[0] != '_' and not callable(getattr(obj, key))}
def _value_name(value):
"""Return the name of the value."""
return '{} {}'.format(node_name(value.node), value.label).strip()
def nice_print_node(node):
"""Print a nice formatted node to the output (debug method)."""
node_dict = _obj_to_dict(node)
node_dict['values'] = {value_id: _obj_to_dict(value)
for value_id, value in node.values.items()}
_LOGGER.info("FOUND NODE %s \n"
"%s", node.product_name, node_dict)
def get_config_value(node, value_index, tries=5):
"""Return the current configuration value for a specific index."""
try:
for value in node.values.values():
if (value.command_class == const.COMMAND_CLASS_CONFIGURATION
and value.index == value_index):
return value.data
except RuntimeError:
# If we get a runtime error the dict has changed while
# we was looking for a value, just do it again
return None if tries <= 0 else get_config_value(
node, value_index, tries=tries - 1)
return None
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Z-Wave platform (generic part)."""
if discovery_info is None or DATA_NETWORK not in hass.data:
return False
device = hass.data[DATA_DEVICES].get(
discovery_info[const.DISCOVERY_DEVICE], None)
if device is None:
return False
async_add_entities([device])
return True
async def async_setup(hass, config):
"""Set up Z-Wave components."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.data[DATA_ZWAVE_CONFIG] = conf
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data={
CONF_USB_STICK_PATH: conf.get(
CONF_USB_STICK_PATH, DEFAULT_CONF_USB_STICK_PATH),
CONF_NETWORK_KEY: conf.get(CONF_NETWORK_KEY),
}
))
return True
async def async_setup_entry(hass, config_entry):
"""Set up Z-Wave from a config entry.
Will automatically load components to support devices found on the network.
"""
from pydispatch import dispatcher
# pylint: disable=import-error
from openzwave.option import ZWaveOption
from openzwave.network import ZWaveNetwork
from openzwave.group import ZWaveGroup
config = {}
if DATA_ZWAVE_CONFIG in hass.data:
config = hass.data[DATA_ZWAVE_CONFIG]
# Load configuration
use_debug = config.get(CONF_DEBUG, DEFAULT_DEBUG)
autoheal = config.get(CONF_AUTOHEAL,
DEFAULT_CONF_AUTOHEAL)
device_config = EntityValues(
config.get(CONF_DEVICE_CONFIG),
config.get(CONF_DEVICE_CONFIG_DOMAIN),
config.get(CONF_DEVICE_CONFIG_GLOB))
usb_path = config.get(
CONF_USB_STICK_PATH, config_entry.data[CONF_USB_STICK_PATH])
_LOGGER.info('Z-Wave USB path is %s', usb_path)
# Setup options
options = ZWaveOption(
usb_path,
user_path=hass.config.config_dir,
config_path=config.get(CONF_CONFIG_PATH))
options.set_console_output(use_debug)
if config_entry.data.get(CONF_NETWORK_KEY):
options.addOption("NetworkKey", config_entry.data[CONF_NETWORK_KEY])
await hass.async_add_executor_job(options.lock)
network = hass.data[DATA_NETWORK] = ZWaveNetwork(options, autostart=False)
hass.data[DATA_DEVICES] = {}
hass.data[DATA_ENTITY_VALUES] = []
if use_debug: # pragma: no cover
def log_all(signal, value=None):
"""Log all the signals."""
print("")
print("SIGNAL *****", signal)
if value and signal in (ZWaveNetwork.SIGNAL_VALUE_CHANGED,
ZWaveNetwork.SIGNAL_VALUE_ADDED,
ZWaveNetwork.SIGNAL_SCENE_EVENT,
ZWaveNetwork.SIGNAL_NODE_EVENT,
ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED,
ZWaveNetwork
.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD):
pprint(_obj_to_dict(value))
print("")
dispatcher.connect(log_all, weak=False)
def value_added(node, value):
"""Handle new added value to a node on the network."""
# Check if this value should be tracked by an existing entity
for values in hass.data[DATA_ENTITY_VALUES]:
values.check_value(value)
for schema in DISCOVERY_SCHEMAS:
if not check_node_schema(node, schema):
continue
if not check_value_schema(
value,
schema[const.DISC_VALUES][const.DISC_PRIMARY]):
continue
values = ZWaveDeviceEntityValues(
hass, schema, value, config, device_config, registry)
# We create a new list and update the reference here so that
# the list can be safely iterated over in the main thread
new_values = hass.data[DATA_ENTITY_VALUES] + [values]
hass.data[DATA_ENTITY_VALUES] = new_values
component = EntityComponent(_LOGGER, DOMAIN, hass)
registry = await async_get_registry(hass)
def node_added(node):
"""Handle a new node on the network."""
entity = ZWaveNodeEntity(node, network)
def _add_node_to_component():
if hass.data[DATA_DEVICES].get(entity.unique_id):
return
name = node_name(node)
generated_id = generate_entity_id(DOMAIN + '.{}', name, [])
node_config = device_config.get(generated_id)
if node_config.get(CONF_IGNORED):
_LOGGER.info(
"Ignoring node entity %s due to device settings",
generated_id)
return
hass.data[DATA_DEVICES][entity.unique_id] = entity
component.add_entities([entity])
if entity.unique_id:
_add_node_to_component()
return
@callback
def _on_ready(sec):
_LOGGER.info("Z-Wave node %d ready after %d seconds",
entity.node_id, sec)
hass.async_add_job(_add_node_to_component)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave node %d not ready after %d seconds, "
"continuing anyway",
entity.node_id, sec)
hass.async_add_job(_add_node_to_component)
hass.add_job(check_has_unique_id, entity, _on_ready, _on_timeout,
hass.loop)
def node_removed(node):
node_id = node.node_id
node_key = 'node-{}'.format(node_id)
_LOGGER.info("Node Removed: %s",
hass.data[DATA_DEVICES][node_key])
for key in list(hass.data[DATA_DEVICES]):
if not key.startswith('{}-'.format(node_id)):
continue
entity = hass.data[DATA_DEVICES][key]
_LOGGER.info('Removing Entity - value: %s - entity_id: %s',
key, entity.entity_id)
hass.add_job(entity.node_removed())
del hass.data[DATA_DEVICES][key]
entity = hass.data[DATA_DEVICES][node_key]
hass.add_job(entity.node_removed())
del hass.data[DATA_DEVICES][node_key]
def network_ready():
"""Handle the query of all awake nodes."""
_LOGGER.info("Z-Wave network is ready for use. All awake nodes "
"have been queried. Sleeping nodes will be "
"queried when they awake.")
hass.bus.fire(const.EVENT_NETWORK_READY)
def network_complete():
"""Handle the querying of all nodes on network."""
_LOGGER.info("Z-Wave network is complete. All nodes on the network "
"have been queried")
hass.bus.fire(const.EVENT_NETWORK_COMPLETE)
def network_complete_some_dead():
"""Handle the querying of all nodes on network."""
_LOGGER.info("Z-Wave network is complete. All nodes on the network "
"have been queried, but some nodes are marked dead")
hass.bus.fire(const.EVENT_NETWORK_COMPLETE_SOME_DEAD)
dispatcher.connect(
value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED, weak=False)
dispatcher.connect(
node_added, ZWaveNetwork.SIGNAL_NODE_ADDED, weak=False)
dispatcher.connect(
node_removed, ZWaveNetwork.SIGNAL_NODE_REMOVED, weak=False)
dispatcher.connect(
network_ready, ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED, weak=False)
dispatcher.connect(
network_complete, ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED, weak=False)
dispatcher.connect(
network_complete_some_dead,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD, weak=False)
def add_node(service):
"""Switch into inclusion mode."""
_LOGGER.info("Z-Wave add_node have been initialized")
network.controller.add_node()
def add_node_secure(service):
"""Switch into secure inclusion mode."""
_LOGGER.info("Z-Wave add_node_secure have been initialized")
network.controller.add_node(True)
def remove_node(service):
"""Switch into exclusion mode."""
_LOGGER.info("Z-Wave remove_node have been initialized")
network.controller.remove_node()
def cancel_command(service):
"""Cancel a running controller command."""
_LOGGER.info("Cancel running Z-Wave command")
network.controller.cancel_command()
def heal_network(service):
"""Heal the network."""
_LOGGER.info("Z-Wave heal running")
network.heal()
def soft_reset(service):
"""Soft reset the controller."""
_LOGGER.info("Z-Wave soft_reset have been initialized")
network.controller.soft_reset()
def update_config(service):
"""Update the config from git."""
_LOGGER.info("Configuration update has been initialized")
network.controller.update_ozw_config()
def test_network(service):
"""Test the network by sending commands to all the nodes."""
_LOGGER.info("Z-Wave test_network have been initialized")
network.test()
def stop_network(_service_or_event):
"""Stop Z-Wave network."""
_LOGGER.info("Stopping Z-Wave network")
network.stop()
if hass.state == CoreState.running:
hass.bus.fire(const.EVENT_NETWORK_STOP)
def rename_node(service):
"""Rename a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
name = service.data.get(const.ATTR_NAME)
node.name = name
_LOGGER.info(
"Renamed Z-Wave node %d to %s", node_id, name)
def rename_value(service):
"""Rename a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
value = node.values[value_id]
name = service.data.get(const.ATTR_NAME)
value.label = name
_LOGGER.info(
"Renamed Z-Wave value (Node %d Value %d) to %s",
node_id, value_id, name)
def set_poll_intensity(service):
"""Set the polling intensity of a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
value = node.values[value_id]
intensity = service.data.get(const.ATTR_POLL_INTENSITY)
if intensity == 0:
if value.disable_poll():
_LOGGER.info("Polling disabled (Node %d Value %d)",
node_id, value_id)
return
_LOGGER.info("Polling disabled failed (Node %d Value %d)",
node_id, value_id)
else:
if value.enable_poll(intensity):
_LOGGER.info(
"Set polling intensity (Node %d Value %d) to %s",
node_id, value_id, intensity)
return
_LOGGER.info("Set polling intensity failed (Node %d Value %d)",
node_id, value_id)
def remove_failed_node(service):
"""Remove failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to remove zwave node %d", node_id)
network.controller.remove_failed_node(node_id)
def replace_failed_node(service):
"""Replace failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to replace zwave node %d", node_id)
network.controller.replace_failed_node(node_id)
def set_config_parameter(service):
"""Set a config parameter to a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
selection = service.data.get(const.ATTR_CONFIG_VALUE)
size = service.data.get(const.ATTR_CONFIG_SIZE)
for value in (
node.get_values(class_id=const.COMMAND_CLASS_CONFIGURATION)
.values()):
if value.index != param:
continue
if value.type == const.TYPE_BOOL:
value.data = int(selection == 'True')
_LOGGER.info("Setting config parameter %s on Node %s "
"with bool selection %s", param, node_id,
str(selection))
return
if value.type == const.TYPE_LIST:
value.data = str(selection)
_LOGGER.info("Setting config parameter %s on Node %s "
"with list selection %s", param, node_id,
str(selection))
return
if value.type == const.TYPE_BUTTON:
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info("Setting config parameter %s on Node %s "
"with button selection %s", param, node_id,
selection)
return
value.data = int(selection)
_LOGGER.info("Setting config parameter %s on Node %s "
"with selection %s", param, node_id,
selection)
return
node.set_config_param(param, selection, size)
_LOGGER.info("Setting unknown config parameter %s on Node %s "
"with selection %s", param, node_id,
selection)
def refresh_node_value(service):
"""Refresh the specified value from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
node.values[value_id].refresh()
_LOGGER.info("Node %s value %s refreshed", node_id, value_id)
def set_node_value(service):
"""Set the specified value on a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
value = service.data.get(const.ATTR_CONFIG_VALUE)
node = network.nodes[node_id]
node.values[value_id].data = value
_LOGGER.info("Node %s value %s set to %s", node_id, value_id, value)
def print_config_parameter(service):
"""Print a config parameter from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
_LOGGER.info("Config parameter %s on Node %s: %s",
param, node_id, get_config_value(node, param))
def print_node(service):
"""Print all information about z-wave node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
nice_print_node(node)
def set_wakeup(service):
"""Set wake-up interval of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
value = service.data.get(const.ATTR_CONFIG_VALUE)
if node.can_wake_up():
for value_id in node.get_values(
class_id=const.COMMAND_CLASS_WAKE_UP):
node.values[value_id].data = value
_LOGGER.info("Node %s wake-up set to %d", node_id, value)
else:
_LOGGER.info("Node %s is not wakeable", node_id)
def change_association(service):
"""Change an association in the zwave network."""
association_type = service.data.get(const.ATTR_ASSOCIATION)
node_id = service.data.get(const.ATTR_NODE_ID)
target_node_id = service.data.get(const.ATTR_TARGET_NODE_ID)
group = service.data.get(const.ATTR_GROUP)
instance = service.data.get(const.ATTR_INSTANCE)
node = ZWaveGroup(group, network, node_id)
if association_type == 'add':
node.add_association(target_node_id, instance)
_LOGGER.info("Adding association for node:%s in group:%s "
"target node:%s, instance=%s", node_id, group,
target_node_id, instance)
if association_type == 'remove':
node.remove_association(target_node_id, instance)
_LOGGER.info("Removing association for node:%s in group:%s "
"target node:%s, instance=%s", node_id, group,
target_node_id, instance)
async def async_refresh_entity(service):
"""Refresh values that specific entity depends on."""
entity_id = service.data.get(ATTR_ENTITY_ID)
async_dispatcher_send(
hass, SIGNAL_REFRESH_ENTITY_FORMAT.format(entity_id))
def refresh_node(service):
"""Refresh all node info."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
node.refresh_info()
def reset_node_meters(service):
"""Reset meter counters of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
instance = service.data.get(const.ATTR_INSTANCE)
node = network.nodes[node_id]
for value in (
node.get_values(class_id=const.COMMAND_CLASS_METER)
.values()):
if value.index != const.INDEX_METER_RESET:
continue
if value.instance != instance:
continue
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info("Resetting meters on node %s instance %s....",
node_id, instance)
return
_LOGGER.info("Node %s on instance %s does not have resettable "
"meters.", node_id, instance)
def heal_node(service):
"""Heal a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
update_return_routes = service.data.get(const.ATTR_RETURN_ROUTES)
node = network.nodes[node_id]
_LOGGER.info("Z-Wave node heal running for node %s", node_id)
node.heal(update_return_routes)
def test_node(service):
"""Send test messages to a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
messages = service.data.get(const.ATTR_MESSAGES)
node = network.nodes[node_id]
_LOGGER.info("Sending %s test-messages to node %s.", messages, node_id)
node.test(messages)
def start_zwave(_service_or_event):
"""Startup Z-Wave network."""
_LOGGER.info("Starting Z-Wave network...")
network.start()
hass.bus.fire(const.EVENT_NETWORK_START)
async def _check_awaked():
"""Wait for Z-wave awaked state (or timeout) and finalize start."""
_LOGGER.debug(
"network state: %d %s", network.state,
network.state_str)
start_time = dt_util.utcnow()
while True:
waited = int((dt_util.utcnow()-start_time).total_seconds())
if network.state >= network.STATE_AWAKED:
# Need to be in STATE_AWAKED before talking to nodes.
_LOGGER.info("Z-Wave ready after %d seconds", waited)
break
elif waited >= const.NETWORK_READY_WAIT_SECS:
# Wait up to NETWORK_READY_WAIT_SECS seconds for the Z-Wave
# network to be ready.
_LOGGER.warning(
"Z-Wave not ready after %d seconds, continuing anyway",
waited)
_LOGGER.info(
"final network state: %d %s", network.state,
network.state_str)
break
else:
await asyncio.sleep(1)
hass.async_add_job(_finalize_start)
hass.add_job(_check_awaked)
def _finalize_start():
"""Perform final initializations after Z-Wave network is awaked."""
polling_interval = convert(
config.get(CONF_POLLING_INTERVAL), int)
if polling_interval is not None:
network.set_poll_interval(polling_interval, False)
poll_interval = network.get_poll_interval()
_LOGGER.info("Z-Wave polling interval set to %d ms", poll_interval)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_network)
# Register node services for Z-Wave network
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE, add_node)
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE_SECURE,
add_node_secure)
hass.services.register(DOMAIN, const.SERVICE_REMOVE_NODE, remove_node)
hass.services.register(DOMAIN, const.SERVICE_CANCEL_COMMAND,
cancel_command)
hass.services.register(DOMAIN, const.SERVICE_HEAL_NETWORK,
heal_network)
hass.services.register(DOMAIN, const.SERVICE_SOFT_RESET, soft_reset)
hass.services.register(DOMAIN, const.SERVICE_UPDATE_CONFIG,
update_config)
hass.services.register(DOMAIN, const.SERVICE_TEST_NETWORK,
test_network)
hass.services.register(DOMAIN, const.SERVICE_STOP_NETWORK,
stop_network)
hass.services.register(DOMAIN, const.SERVICE_RENAME_NODE, rename_node,
schema=RENAME_NODE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_RENAME_VALUE,
rename_value,
schema=RENAME_VALUE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_CONFIG_PARAMETER,
set_config_parameter,
schema=SET_CONFIG_PARAMETER_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_NODE_VALUE,
set_node_value,
schema=SET_NODE_VALUE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REFRESH_NODE_VALUE,
refresh_node_value,
schema=REFRESH_NODE_VALUE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_PRINT_CONFIG_PARAMETER,
print_config_parameter,
schema=PRINT_CONFIG_PARAMETER_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REMOVE_FAILED_NODE,
remove_failed_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REPLACE_FAILED_NODE,
replace_failed_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_CHANGE_ASSOCIATION,
change_association,
schema=CHANGE_ASSOCIATION_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_WAKEUP,
set_wakeup,
schema=SET_WAKEUP_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_PRINT_NODE,
print_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REFRESH_ENTITY,
async_refresh_entity,
schema=REFRESH_ENTITY_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REFRESH_NODE,
refresh_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_RESET_NODE_METERS,
reset_node_meters,
schema=RESET_NODE_METERS_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_POLL_INTENSITY,
set_poll_intensity,
schema=SET_POLL_INTENSITY_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_HEAL_NODE,
heal_node,
schema=HEAL_NODE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_TEST_NODE,
test_node,
schema=TEST_NODE_SCHEMA)
# Setup autoheal
if autoheal:
_LOGGER.info("Z-Wave network autoheal is enabled")
async_track_time_change(hass, heal_network, hour=0, minute=0, second=0)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_zwave)
hass.services.async_register(DOMAIN, const.SERVICE_START_NETWORK,
start_zwave)
for entry_component in SUPPORTED_PLATFORMS:
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
config_entry, entry_component))
return True
class ZWaveDeviceEntityValues():
"""Manages entity access to the underlying zwave value objects."""
def __init__(self, hass, schema, primary_value, zwave_config,
device_config, registry):
"""Initialize the values object with the passed entity schema."""
self._hass = hass
self._zwave_config = zwave_config
self._device_config = device_config
self._schema = copy.deepcopy(schema)
self._values = {}
self._entity = None
self._workaround_ignore = False
self._registry = registry
for name in self._schema[const.DISC_VALUES].keys():
self._values[name] = None
self._schema[const.DISC_VALUES][name][const.DISC_INSTANCE] = \
[primary_value.instance]
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
# Check values that have already been discovered for node
for value in self._node.values.values():
self.check_value(value)
self._check_entity_ready()
def __getattr__(self, name):
"""Get the specified value for this entity."""
return self._values[name]
def __iter__(self):
"""Allow iteration over all values."""
return iter(self._values.values())
def check_value(self, value):
"""Check if the new value matches a missing value for this entity.
If a match is found, it is added to the values mapping.
"""
if not check_node_schema(value.node, self._schema):
return
for name in self._values:
if self._values[name] is not None:
continue
if not check_value_schema(
value, self._schema[const.DISC_VALUES][name]):
continue
self._values[name] = value
if self._entity:
self._entity.value_added()
self._entity.value_changed()
self._check_entity_ready()
def _check_entity_ready(self):
"""Check if all required values are discovered and create entity."""
if self._workaround_ignore:
return
if self._entity is not None:
return
for name in self._schema[const.DISC_VALUES]:
if self._values[name] is None and \
not self._schema[const.DISC_VALUES][name].get(
const.DISC_OPTIONAL):
return
component = self._schema[const.DISC_COMPONENT]
workaround_component = workaround.get_device_component_mapping(
self.primary)
if workaround_component and workaround_component != component:
if workaround_component == workaround.WORKAROUND_IGNORE:
_LOGGER.info("Ignoring Node %d Value %d due to workaround.",
self.primary.node.node_id, self.primary.value_id)
# No entity will be created for this value
self._workaround_ignore = True
return
_LOGGER.debug("Using %s instead of %s",
workaround_component, component)
component = workaround_component
entity_id = self._registry.async_get_entity_id(
component, DOMAIN,
compute_value_unique_id(self._node, self.primary))
if entity_id is None:
value_name = _value_name(self.primary)
entity_id = generate_entity_id(component + '.{}', value_name, [])
node_config = self._device_config.get(entity_id)
# Configure node
_LOGGER.debug("Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Value type=%s, "
"Genre=%s as %s", self._node.node_id,
self._node.generic, self._node.specific,
self.primary.command_class, self.primary.type,
self.primary.genre, component)
if node_config.get(CONF_IGNORED):
_LOGGER.info(
"Ignoring entity %s due to device settings", entity_id)
# No entity will be created for this value
self._workaround_ignore = True
return
polling_intensity = convert(
node_config.get(CONF_POLLING_INTENSITY), int)
if polling_intensity:
self.primary.enable_poll(polling_intensity)
platform = import_module('.{}'.format(component),
__name__)
device = platform.get_device(
node=self._node, values=self,
node_config=node_config, hass=self._hass)
if device is None:
# No entity will be created for this value
self._workaround_ignore = True
return
self._entity = device
@callback
def _on_ready(sec):
_LOGGER.info(
"Z-Wave entity %s (node_id: %d) ready after %d seconds",
device.name, self._node.node_id, sec)
self._hass.async_add_job(discover_device, component, device)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave entity %s (node_id: %d) not ready after %d seconds, "
"continuing anyway",
device.name, self._node.node_id, sec)
self._hass.async_add_job(discover_device, component, device)
async def discover_device(component, device):
"""Put device in a dictionary and call discovery on it."""
if self._hass.data[DATA_DEVICES].get(device.unique_id):
return
self._hass.data[DATA_DEVICES][device.unique_id] = device
if component in SUPPORTED_PLATFORMS:
async_dispatcher_send(
self._hass, 'zwave_new_{}'.format(component), device)
else:
await discovery.async_load_platform(
self._hass, component, DOMAIN,
{const.DISCOVERY_DEVICE: device.unique_id},
self._zwave_config)
if device.unique_id:
self._hass.add_job(discover_device, component, device)
else:
self._hass.add_job(check_has_unique_id, device, _on_ready,
_on_timeout, self._hass.loop)
class ZWaveDeviceEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node entity."""
def __init__(self, values, domain):
"""Initialize the z-Wave device."""
# pylint: disable=import-error
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self.values = values
self.node = values.primary.node
self.values.primary.set_change_verified(False)
self._name = _value_name(self.values.primary)
self._unique_id = self._compute_unique_id()
self._update_attributes()
dispatcher.connect(
self.network_value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
def network_value_changed(self, value):
"""Handle a value change on the network."""
if value.value_id in [v.value_id for v in self.values if v]:
return self.value_changed()
def value_added(self):
"""Handle a new value of this entity."""
pass
def value_changed(self):
"""Handle a changed value for this entity's node."""
self._update_attributes()
self.update_properties()
self.maybe_schedule_update()
async def async_added_to_hass(self):
"""Add device to dict."""
async_dispatcher_connect(
self.hass,
SIGNAL_REFRESH_ENTITY_FORMAT.format(self.entity_id),
self.refresh_from_network)
def _update_attributes(self):
"""Update the node attributes. May only be used inside callback."""
self.node_id = self.node.node_id
self._name = _value_name(self.values.primary)
if not self._unique_id:
self._unique_id = self._compute_unique_id()
if self._unique_id:
self.try_remove_and_add()
if self.values.power:
self.power_consumption = round(
self.values.power.data, self.values.power.precision)
else:
self.power_consumption = None
def update_properties(self):
"""Update on data changes for node values."""
pass
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {
'identifiers': {
(DOMAIN, self.node_id)
},
'manufacturer': self.node.manufacturer_name,
'model': self.node.product_name,
'name': node_name(self.node),
}
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
const.ATTR_NODE_ID: self.node_id,
const.ATTR_VALUE_INDEX: self.values.primary.index,
const.ATTR_VALUE_INSTANCE: self.values.primary.instance,
const.ATTR_VALUE_ID: str(self.values.primary.value_id),
}
if self.power_consumption is not None:
attrs[ATTR_POWER] = self.power_consumption
return attrs
def refresh_from_network(self):
"""Refresh all dependent values from zwave network."""
for value in self.values:
if value is not None:
self.node.refresh_value(value.value_id)
def _compute_unique_id(self):
if (is_node_parsed(self.node) and
self.values.primary.label != "Unknown") or \
self.node.is_ready:
return compute_value_unique_id(self.node, self.values.primary)
return None
def compute_value_unique_id(node, value):
"""Compute unique_id a value would get if it were to get one."""
return "{}-{}".format(node.node_id, value.object_id)
| apache-2.0 | -1,543,759,490,632,505,300 | 38.39095 | 79 | 0.59099 | false | 3.964569 | true | false | false |
Luindil/Glassure | glassure/gui/widgets/control/transfer.py | 1 | 3814 | # -*- coding: utf-8 -*-
from ...qt import QtWidgets, QtCore
from ..custom import FlatButton, HorizontalLine, LabelAlignRight
class TransferFunctionWidget(QtWidgets.QWidget):
def __init__(self, *args):
super(TransferFunctionWidget, self).__init__(*args)
self.create_widgets()
self.create_layout()
self.style_widgets()
self.create_signals()
def create_widgets(self):
self.load_std_btn = FlatButton("Load Std")
self.load_std_bkg_btn = FlatButton("Load Std Bkg")
self.load_sample_btn = FlatButton("Load Sample")
self.load_sample_bkg_btn = FlatButton("Load Sample Bkg")
self.std_filename_lbl = LabelAlignRight('')
self.std_bkg_filename_lbl = LabelAlignRight("")
self.sample_filename_lbl = LabelAlignRight("")
self.sample_bkg_filename_lbl = LabelAlignRight("")
self.std_bkg_scaling_sb = QtWidgets.QDoubleSpinBox()
self.std_bkg_scaling_sb.setValue(1.0)
self.std_bkg_scaling_sb.setSingleStep(0.01)
self.sample_bkg_scaling_sb = QtWidgets.QDoubleSpinBox()
self.sample_bkg_scaling_sb.setValue(1.0)
self.sample_bkg_scaling_sb.setSingleStep(0.01)
self.smooth_sb = QtWidgets.QDoubleSpinBox()
self.smooth_sb.setValue(1.0)
self.smooth_sb.setSingleStep(0.1)
def create_layout(self):
self.main_layout = QtWidgets.QVBoxLayout()
self.activate_cb = QtWidgets.QCheckBox("activate")
self.main_layout.addWidget(self.activate_cb)
self.main_layout.addWidget(HorizontalLine())
self.transfer_layout = QtWidgets.QGridLayout()
self.transfer_layout.addWidget(self.load_sample_btn, 0, 0)
self.transfer_layout.addWidget(self.sample_filename_lbl, 0, 1)
self.transfer_layout.addWidget(self.load_sample_bkg_btn, 1, 0)
self.transfer_layout.addWidget(self.sample_bkg_filename_lbl, 1, 1)
self.transfer_layout.addWidget(self.load_std_btn, 2, 0)
self.transfer_layout.addWidget(self.std_filename_lbl, 2, 1)
self.transfer_layout.addWidget(self.load_std_bkg_btn, 3, 0)
self.transfer_layout.addWidget(self.std_bkg_filename_lbl, 3, 1)
self.scaling_gb = QtWidgets.QGroupBox("")
self.scaling_layout = QtWidgets.QGridLayout()
self.scaling_layout.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.Fixed), 0, 0)
self.scaling_layout.addWidget(LabelAlignRight("Sample bkg scaling:"), 0, 1)
self.scaling_layout.addWidget(self.sample_bkg_scaling_sb, 0, 2)
self.scaling_layout.addWidget(LabelAlignRight("Std bkg scaling:"), 1, 1)
self.scaling_layout.addWidget(self.std_bkg_scaling_sb, 1, 2)
self.scaling_layout.addWidget(LabelAlignRight("Smoothing:"), 2, 1)
self.scaling_layout.addWidget(self.smooth_sb, 2, 2)
self.scaling_gb.setLayout(self.scaling_layout)
self.transfer_layout.addWidget(self.scaling_gb, 4, 0, 1, 2)
self.main_layout.addLayout(self.transfer_layout)
self.setLayout(self.main_layout)
def style_widgets(self):
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.setSpacing(5)
self.transfer_layout.setContentsMargins(5, 5, 5, 5)
self.sample_bkg_scaling_sb.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.std_bkg_scaling_sb.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.smooth_sb.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.sample_bkg_scaling_sb.setMinimumWidth(75)
self.std_bkg_scaling_sb.setMinimumWidth(75)
self.smooth_sb.setMinimumWidth(75)
def create_signals(self):
pass
| mit | 7,296,712,362,812,249,000 | 41.377778 | 103 | 0.667016 | false | 3.451584 | false | false | false |
alexgorban/models | official/recommendation/ncf_input_pipeline.py | 1 | 7065 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF model input pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# pylint: disable=g-bad-import-order
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import data_pipeline
NUM_SHARDS = 16
def create_dataset_from_tf_record_files(input_file_pattern,
pre_batch_size,
batch_size,
is_training=True):
"""Creates dataset from (tf)records files for training/evaluation."""
files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training)
def make_dataset(files_dataset, shard_index):
"""Returns dataset for sharded tf record files."""
if pre_batch_size != batch_size:
raise ValueError("Pre-batch ({}) size is not equal to batch "
"size ({})".format(pre_batch_size, batch_size))
files_dataset = files_dataset.shard(NUM_SHARDS, shard_index)
dataset = files_dataset.interleave(tf.data.TFRecordDataset)
decode_fn = functools.partial(
data_pipeline.DatasetManager.deserialize,
batch_size=pre_batch_size,
is_training=is_training)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
dataset = tf.data.Dataset.range(NUM_SHARDS)
map_fn = functools.partial(make_dataset, files)
dataset = dataset.interleave(
map_fn,
cycle_length=NUM_SHARDS,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_dataset_from_data_producer(producer, params):
"""Return dataset online-generating data."""
def preprocess_train_input(features, labels):
"""Pre-process the training data.
This is needed because
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for DUPLICATE_MASK in training data.
Args:
features: Dictionary of features for training.
labels: Training labels.
Returns:
Processed training features.
"""
fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN])
features[rconst.DUPLICATE_MASK] = fake_dup_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
train_input_fn = producer.make_input_fn(is_training=True)
train_input_dataset = train_input_fn(params).map(preprocess_train_input)
def preprocess_eval_input(features):
"""Pre-process the eval data.
This is needed because:
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for VALID_PT_MASK in eval data.
Args:
features: Dictionary of features for evaluation.
Returns:
Processed evaluation features.
"""
labels = tf.cast(tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
fake_valid_pt_mask = tf.cast(
tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
eval_input_fn = producer.make_input_fn(is_training=False)
eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input)
return train_input_dataset, eval_input_dataset
def create_ncf_input_data(params,
producer=None,
input_meta_data=None,
strategy=None):
"""Creates NCF training/evaluation dataset.
Args:
params: Dictionary containing parameters for train/evaluation data.
producer: Instance of BaseDataConstructor that generates data online. Must
not be None when params['train_dataset_path'] or
params['eval_dataset_path'] is not specified.
input_meta_data: A dictionary of input metadata to be used when reading data
from tf record files. Must be specified when params["train_input_dataset"]
is specified.
strategy: Distribution strategy used for distributed training. If specified,
used to assert that evaluation batch size is correctly a multiple of
total number of devices used.
Returns:
(training dataset, evaluation dataset, train steps per epoch,
eval steps per epoch)
Raises:
ValueError: If data is being generated online for when using TPU's.
"""
# NCF evaluation metric calculation logic assumes that evaluation data
# sample size are in multiples of (1 + number of negative samples in
# evaluation) for each device. As so, evaluation batch size must be a
# multiple of (number of replicas * (1 + number of negative samples)).
num_devices = strategy.num_replicas_in_sync if strategy else 1
if (params["eval_batch_size"] % (num_devices *
(1 + rconst.NUM_EVAL_NEGATIVES))):
raise ValueError("Evaluation batch size must be divisible by {} "
"times {}".format(num_devices,
(1 + rconst.NUM_EVAL_NEGATIVES)))
if params["train_dataset_path"]:
assert params["eval_dataset_path"]
train_dataset = create_dataset_from_tf_record_files(
params["train_dataset_path"],
input_meta_data["train_prebatch_size"],
params["batch_size"],
is_training=True)
eval_dataset = create_dataset_from_tf_record_files(
params["eval_dataset_path"],
input_meta_data["eval_prebatch_size"],
params["eval_batch_size"],
is_training=False)
num_train_steps = int(input_meta_data["num_train_steps"])
num_eval_steps = int(input_meta_data["num_eval_steps"])
else:
if params["use_tpu"]:
raise ValueError("TPU training does not support data producer yet. "
"Use pre-processed data.")
assert producer
# Start retrieving data from producer.
train_dataset, eval_dataset = create_dataset_from_data_producer(
producer, params)
num_train_steps = producer.train_batches_per_epoch
num_eval_steps = producer.eval_batches_per_epoch
return train_dataset, eval_dataset, num_train_steps, num_eval_steps
| apache-2.0 | 7,662,879,708,559,198,000 | 37.396739 | 80 | 0.675442 | false | 4.055683 | false | false | false |
protwis/protwis | construct/tool.py | 1 | 81937 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.db.models import Min, Count, Max
from django.conf import settings
from django.views.decorators.cache import cache_page
from django import forms
from construct.models import *
from structure.models import Structure
from protein.models import ProteinConformation, Protein, ProteinSegment, ProteinFamily
from alignment.models import AlignmentConsensus
from common.definitions import AMINO_ACIDS, AMINO_ACID_GROUPS, STRUCTURAL_RULES, STRUCTURAL_SWITCHES
import json
from collections import OrderedDict
import re
import xlrd
import yaml
import os
import time
import pickle
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
class FileUploadForm(forms.Form):
file_source = forms.FileField()
def parse_excel(path):
workbook = xlrd.open_workbook(path)
worksheets = workbook.sheet_names()
d = {}
for worksheet_name in worksheets:
if worksheet_name in d:
print('Error, worksheet with this name already loaded')
continue
d[worksheet_name] = {}
#d[worksheet_name] = OrderedDict()
worksheet = workbook.sheet_by_name(worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols
curr_row = 0 #skip first, otherwise -1
headers = []
for i in range(num_cells):
h = worksheet.cell_value(0, i)
if h=="":
h = "i_"+str(i)
if h in headers:
h += "_"+str(i)
headers.append(worksheet.cell_value(0, i))
for curr_row in range(1,num_rows+1):
row = worksheet.row(curr_row)
key = worksheet.cell_value(curr_row, 0)
if key=='':
continue
if key not in d[worksheet_name]:
d[worksheet_name][key] = []
temprow = OrderedDict()
for curr_cell in range(num_cells):
cell_value = worksheet.cell_value(curr_row, curr_cell)
if headers[curr_cell] not in temprow:
temprow[headers[curr_cell]] = cell_value
d[worksheet_name][key].append(temprow)
return d
def compare_family_slug(a,b):
a = a.split("_")
b = b.split("_")
if a[0]!=b[0]:
if a[0] == '001':
# class A doesnt borrow from other classes
return -1, 'ignore'
elif a[0] == '002':
# if Class B1 only use if B2
if b[0]!= '003':
return -1, 'ignore'
elif a[0] == '003':
# if Class B1 only use if B2
if b[0]!= '002':
return -1, 'ignore'
elif a[0] == '004':
# if Class C ignore others
return -1, 'ignore'
elif a[0] == '006':
# if Class F ignore others
return -1, 'ignore'
elif a[0] == '007':
# if Class Taste take for A
if b[0]!= '001':
return -1, 'ignore'
return 0,"Different Class"
elif a[1]!=b[1]:
return 1,"Class"
elif a[2]!=b[2]:
# return 2,"Ligand Type" Ignore Ligand Type level for construct Design purposes.
return 1,"Class"
elif a[3]!=b[3]:
return 3,"Receptor Family"
else:
return 4,"Receptor"
def new_tool(request):
simple_selection = request.session.get('selection', False)
if simple_selection == False or not simple_selection.targets:
return redirect("/construct/design")
proteins = []
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
context = {}
context['target'] = proteins[0]
level = proteins[0].family.slug
if level.split("_")[0]=='001':
c_level = 'A'
elif level.split("_")[0]=='002':
c_level = 'B'
elif level.split("_")[0]=='003':
c_level = 'B'
elif level.split("_")[0]=='004':
c_level = 'C'
elif level.split("_")[0]=='006':
c_level = 'F'
else:
c_level = ''
states = list(Structure.objects.filter(protein_conformation__protein__family__slug__startswith=level.split("_")[0]).all().values_list('state__slug', flat = True).distinct())
if 'active' in states:
active_xtals = True
else:
active_xtals = False
rs = Residue.objects.filter(protein_conformation__protein=proteins[0]).prefetch_related('protein_segment','display_generic_number','generic_number')
residues = {}
residues_gn = {}
residues_pos = {}
for r in rs:
segment = r.protein_segment.slug
segment = segment.replace("-","")
if segment not in residues:
residues[segment] = []
residues[segment].append(r)
label = ''
if r.generic_number:
residues_gn[r.generic_number.label] = r
label = r.display_generic_number.label
residues_pos[r.sequence_number] = [r.amino_acid,r.protein_segment.slug,label]
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
inserts = {}
inserts['fusions'] = []
inserts['other'] = {}
for ins in ConstructInsertionType.objects.all().order_by('name','subtype'):
# print(ins.name,ins.subtype,ins.sequence)
if ins.name == 'fusion':
inserts['fusions'].append(ins.subtype)
else:
if ins.name not in inserts['other']:
inserts['other'][ins.name] = []
if ins.subtype not in inserts['other'][ins.name]:
inserts['other'][ins.name].append(ins.subtype)
# fusion, f_results = c.fusion()
# if fusion:
# f_protein = f_results[0][2]
# if f_protein not in inserts['fusions']:
# inserts['fusions'].append(f_protein)
# else:
# for ins in c.insertions.all():
# print(ins)
context['ICL_max'] = {'ICL2': residues['ICL2'][-1].sequence_number, 'ICL3': residues['ICL3'][-1].sequence_number}
context['ICL_min'] = {'ICL2': residues['ICL2'][0].sequence_number,'ICL3': residues['ICL3'][0].sequence_number}
context['residues'] = residues
context['residues_gn'] = residues_gn
context['residues_pos'] = residues_pos
context['class'] = c_level
context['active_xtals'] = active_xtals
context['inserts'] = inserts
context['form'] = FileUploadForm
context['signal_p'] = None
path_to_signal_p = os.sep.join([settings.BASE_DIR, "construct","signal_p.txt"])
with open(path_to_signal_p, "r", encoding='UTF-8') as signal_p:
for row in signal_p:
r = row.split()
if r[0]==proteins[0].entry_name:
context['signal_p'] = r[4]
print(row.split())
#print(residues)
return render(request,'new_tool.html',context)
def tool(request):
simple_selection = request.session.get('selection', False)
proteins = []
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
print(proteins)
context = {}
context['target'] = proteins[0]
level = proteins[0].family.slug
if level.split("_")[0]=='001':
c_level = 'A'
elif level.split("_")[0]=='002':
c_level = 'B'
elif level.split("_")[0]=='003':
c_level = 'B'
elif level.split("_")[0]=='004':
c_level = 'C'
elif level.split("_")[0]=='006':
c_level = 'F'
else:
c_level = ''
states = list(Structure.objects.filter(protein_conformation__protein__family__slug__startswith=level.split("_")[0]).all().values_list('state__slug', flat = True).distinct())
if 'active' in states:
active_xtals = True
else:
active_xtals = False
rs = Residue.objects.filter(protein_conformation__protein=proteins[0]).prefetch_related('protein_segment','display_generic_number','generic_number')
residues = {}
residues_gn = {}
residues_pos = {}
for r in rs:
segment = r.protein_segment.slug
segment = segment.replace("-","")
if segment not in residues:
residues[segment] = []
residues[segment].append(r)
label = ''
if r.generic_number:
residues_gn[r.generic_number.label] = r
label = r.display_generic_number.label
residues_pos[r.sequence_number] = [r.amino_acid,r.protein_segment.slug,label]
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
inserts = {}
inserts['fusions'] = []
inserts['other'] = {}
for ins in ConstructInsertionType.objects.all().order_by('name','subtype'):
# print(ins.name,ins.subtype,ins.sequence)
if ins.name == 'fusion':
inserts['fusions'].append(ins.subtype)
else:
if ins.name not in inserts['other']:
inserts['other'][ins.name] = []
if ins.subtype not in inserts['other'][ins.name]:
inserts['other'][ins.name].append(ins.subtype)
# fusion, f_results = c.fusion()
# if fusion:
# f_protein = f_results[0][2]
# if f_protein not in inserts['fusions']:
# inserts['fusions'].append(f_protein)
# else:
# for ins in c.insertions.all():
# print(ins)
print(inserts)
context['residues'] = residues
context['residues_gn'] = residues_gn
context['residues_pos'] = residues_pos
context['class'] = c_level
context['active_xtals'] = active_xtals
context['inserts'] = inserts
context['form'] = FileUploadForm
#print(residues)
return render(request,'tool.html',context)
@cache_page(60 * 60 * 24 * 7)
def json_fusion(request, slug, **response_kwargs):
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
#proteins = Construct.objects.all().values_list('protein', flat = True)
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions')
jsondata = "glyco"
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_palmi(request, slug, **response_kwargs):
start_time = time.time()
seq = Protein.objects.filter(entry_name=slug).values_list('sequence', flat = True).get()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug,protein_segment__slug__in=['H8','C-term']).order_by('sequence_number').prefetch_related('protein_segment')
residues = {}
seq = ''
end_h8 = 0
start_h8 = 0
for r in rs:
if not start_h8 and r.protein_segment.slug == 'H8':
start_h8 = r.sequence_number
if not end_h8 and r.protein_segment.slug == 'C-term':
end_h8 = r.sequence_number-1 #end_h8 was prev residue
elif end_h8 and r.sequence_number-10>end_h8:
continue
seq += r.amino_acid
residues[r.sequence_number] = r.protein_segment.slug
#No proline!
p = re.compile("C")
#print('all')
mutations_all = []
for m in p.finditer(seq):
mutations_all.append([m.start()+start_h8,"A",'','',m.group(),residues[m.start()+start_h8]])
palmi = OrderedDict()
palmi['']= mutations_all
jsondata = palmi
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("palmi",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_glyco(request, slug, **response_kwargs):
start_time = time.time()
seq = Protein.objects.filter(entry_name=slug).values_list('sequence', flat = True).get()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug).prefetch_related('protein_segment')
residues = {}
for r in rs:
residues[r.sequence_number] = r.protein_segment.slug
#No proline!
p = re.compile("N[^P][TS]")
#print('all')
mutations_all = []
matches = re.finditer(r'(?=([N][^P][TS]))',seq)
matches_seq = re.findall(r'(?=([N][^P][TS]))',seq)
#{"all": [[39, "Q", "", "", "NTS", "N-term"], [203, "Q", "", "", "NNT", "ECL2"]], "mammalian": [[205, "V", 206, "V", "TTCVLNDPN", "ECL2"]]}
for i,m in enumerate(matches):
#print(matches_seq[i],m.start())
#print(m.start(), m.group())
if residues[m.start()+1] in ['N-term','ECL1','ECL2','ECL3']:
mutations_all.append([m.start()+1,"Q",'','',matches_seq[i],residues[m.start()+1]])
#print('mamalian')
#p = re.compile("[TS]{2}[A-Z]{1,11}[N]", overlapped=True)
matches = re.finditer(r'(?=([TS]{2}[A-Z]{1,10}[N]))',seq)
matches_seq = re.findall(r'(?=([TS]{2}[A-Z]{1,10}[N]))',seq)
#matches = re.findall(r'(?=(\w\w))', seq)
#print(matches)
mutations_mammalian = []
for i,m in enumerate(matches):
#print(matches_seq[i],m.start())
if matches_seq[i][0]=="T":
pos0 = "V"
if matches_seq[i][1]=="T":
pos1 = "V"
if matches_seq[i][0]=="S":
pos0 = "A"
if matches_seq[i][1]=="S":
pos1 = "A"
if residues[m.start()+1] in ['N-term','ECL1','ECL2','ECL3']:
mutations_mammalian.append([m.start()+1,pos0,m.start()+2,pos1,matches_seq[i],residues[m.start()+1]])
glyco = OrderedDict()
glyco['n-linked']= mutations_all
glyco['o-linked'] = mutations_mammalian
jsondata = glyco
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("glyco",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_icl3(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
tm5_start = {}
tm5_end = {}
tm6_start = {}
tm6_end = {}
tm5_50 = {}
tm6_50 = {}
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM5').annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm5_start[pc.protein.entry_name] = pc.start
tm5_end[pc.protein.entry_name] = pc.end
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM6').annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm6_start[pc.protein.entry_name] = pc.start
tm6_end[pc.protein.entry_name] = pc.end
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__generic_number__label__in=['5x50','6x50']).annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm5_50[pc.protein.entry_name] = pc.start
tm6_50[pc.protein.entry_name] = pc.end
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
deletions = OrderedDict()
deletions['Receptor'] = {}
deletions['Receptor Family'] = {}
deletions['Ligand Type'] = {}
deletions['Class'] = {}
deletions['Different Class'] = {}
states = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
p_level = p.family.slug
d_level, d_level_name = compare_family_slug(level,p_level)
if d_level==-1: continue
pdb = c.crystal.pdb_code
state = c.structure.state.slug
if pdb not in states:
states[pdb] = state
fusion, f_results, linkers = c.fusion()
if fusion:
f_protein = f_results[0][2]
else:
f_protein = ""
for deletion in c.deletions.all():
#print(pdb,deletion.start,deletion.end)
if deletion.start > tm5_start[entry_name] and deletion.start < tm6_end[entry_name]:
if p.entry_name not in deletions[d_level_name]:
deletions[d_level_name][entry_name] = {}
#deletions[entry_name][pdb] = [tm5_end[entry_name],tm6_start[entry_name],deletion.start,deletion.end,deletion.start-tm5_end[entry_name],tm6_start[entry_name]-deletion.end]
deletions[d_level_name][entry_name][pdb] = [deletion.start-tm5_50[entry_name]-1,tm6_50[entry_name]-deletion.end-1,state,str(fusion),f_protein]
# if (str(fusion)=='icl3'):
# print(entry_name,pdb,50+deletion.start-tm5_50[entry_name],50-(tm6_50[entry_name]-deletion.end-1),str(fusion),f_protein)
# for pdb,state in sorted(states.items()):
# print(pdb,"\t",state)
jsondata = deletions
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("icl3",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_icl2(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
tm3_start = {}
tm3_end = {}
tm4_start = {}
tm4_end = {}
tm3_50 = {}
tm4_50 = {}
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM3').annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm3_start[pc.protein.entry_name] = pc.start
tm3_end[pc.protein.entry_name] = pc.end
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM4').annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm4_start[pc.protein.entry_name] = pc.start
tm4_end[pc.protein.entry_name] = pc.end
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__generic_number__label__in=['3x50','4x50']).annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm3_50[pc.protein.entry_name] = pc.start
tm4_50[pc.protein.entry_name] = pc.end
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
deletions = OrderedDict()
deletions['Receptor'] = {}
deletions['Receptor Family'] = {}
deletions['Ligand Type'] = {}
deletions['Class'] = {}
deletions['Different Class'] = {}
states = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
p_level = p.family.slug
d_level, d_level_name = compare_family_slug(level,p_level)
if d_level==-1: continue
pdb = c.crystal.pdb_code
state = c.structure.state.slug
if pdb not in states:
states[pdb] = state
fusion, f_results, linkers = c.fusion()
if fusion:
f_protein = f_results[0][2]
else:
f_protein = ""
for deletion in c.deletions.all():
#print(pdb,deletion.start,deletion.end)
if deletion.start > tm3_start[entry_name] and deletion.start < tm4_end[entry_name]:
if p.entry_name not in deletions[d_level_name]:
deletions[d_level_name][entry_name] = {}
#deletions[entry_name][pdb] = [tm5_end[entry_name],tm6_start[entry_name],deletion.start,deletion.end,deletion.start-tm5_end[entry_name],tm6_start[entry_name]-deletion.end]
deletions[d_level_name][entry_name][pdb] = [deletion.start-tm3_50[entry_name]-1,tm4_50[entry_name]-deletion.end-1,state,str(fusion),f_protein]
# for pdb,state in sorted(states.items()):
# print(pdb,"\t",state)
jsondata = deletions
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("icl2",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_nterm(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM1').annotate(start=Min('residue__sequence_number'))
#pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__generic_number__label__in=['1x50']).values_list('protein__entry_name','residue__sequence_number','residue__generic_number__label')
tm1_start = {}
for pc in pconfs:
tm1_start[pc.protein.entry_name] = pc.start
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
deletions = OrderedDict()
deletions['Receptor'] = {}
deletions['Receptor Family'] = {}
deletions['Ligand Type'] = {}
deletions['Class'] = {}
deletions['Different Class'] = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
p_level = p.family.slug
d_level, d_level_name = compare_family_slug(level,p_level)
if d_level==-1: continue
pdb = c.crystal.pdb_code
state = c.structure.state.slug
fusion, f_results, linkers = c.fusion()
if fusion:
f_protein = f_results[0][2]
else:
f_protein = ""
for deletion in c.deletions.all():
if deletion.start < tm1_start[entry_name]:
if p.entry_name not in deletions[d_level_name]:
deletions[d_level_name][entry_name] = {}
deletions[d_level_name][entry_name][pdb] = [deletion.start,deletion.end-1, tm1_start[entry_name]-deletion.end-1,state,str(fusion),f_protein]
jsondata = deletions
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("nterm",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_cterm(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
# pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__protein_segment__slug='C-term').annotate(start=Min('residue__sequence_number'))
# cterm_start = {}
# for pc in pconfs:
# cterm_start[pc.protein.entry_name] = pc.start
# pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__protein_segment__slug='C-term').annotate(start=Min('residue__sequence_number')).values_list('protein__entry_name','start','residue__generic_number__label')
#pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__generic_number__label__in=['8x50']).values_list('protein__entry_name','residue__sequence_number','residue__generic_number__label')
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='C-term').annotate(start=Min('residue__sequence_number'))
cterm_start = {}
for pc in pconfs:
cterm_start[pc.protein.entry_name] = pc.start
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
deletions = OrderedDict()
deletions['Receptor'] = {}
deletions['Receptor Family'] = {}
deletions['Ligand Type'] = {}
deletions['Class'] = {}
deletions['Different Class'] = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
p_level = p.family.slug
d_level, d_level_name = compare_family_slug(level,p_level)
if d_level==-1: continue
pdb = c.crystal.pdb_code
state = c.structure.state.slug
fusion, f_results, linkers = c.fusion()
if fusion:
f_protein = f_results[0][2]
else:
f_protein = ""
for deletion in c.deletions.all():
if deletion.start >= cterm_start[entry_name]:
if p.entry_name not in deletions[d_level_name]:
deletions[d_level_name][entry_name] = {}
deletions[d_level_name][entry_name][pdb] = [deletion.start,deletion.end, deletion.start-cterm_start[entry_name],state,str(fusion),f_protein]
jsondata = deletions
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cterm",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def thermostabilising(request, slug, **response_kwargs):
start_time = time.time()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug).prefetch_related('protein_segment','display_generic_number','generic_number')
wt_lookup = {}
wt_lookup_pos = {}
for r in rs:
if r.generic_number:
gn = r.generic_number.label
wt_lookup[gn] = [r.amino_acid, r.sequence_number]
pos = r.sequence_number
wt_lookup_pos[pos] = [r.amino_acid]
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
if level.split("_")[0]=='001':
c_level = 'A'
elif level.split("_")[0]=='002':
c_level = 'B'
elif level.split("_")[0]=='003':
c_level = 'B'
elif level.split("_")[0]=='004':
c_level = 'C'
elif level.split("_")[0]=='006':
c_level = 'F'
else:
c_level = ''
path = os.sep.join([settings.DATA_DIR, 'structure_data', 'construct_data', 'termo.xlsx'])
d = parse_excel(path)
if c_level in d:
termo = d[c_level]
else:
termo = []
results = OrderedDict()
results['1'] = {}
results['2'] = {} #fixed mut
results['3'] = {} #fixed wt
for mut in termo:
gn = mut['GN']
mut_aa = mut['MUT']
wt_aa = mut['WT']
entry_name = mut['UniProt']
pos = int(mut['POS'])
pdb = mut['PDB']
if mut['Effect'] != 'Thermostabilising':
continue #only thermo!
if gn is "":
continue
if (entry_name == slug) or (entry_name.split('_')[0] == slug.split('_')[0] and wt_aa == wt_lookup[gn][0]):
if gn not in results['1']:
results['1'][gn] = {}
if mut_aa not in results['1'][gn]:
results['1'][gn][mut_aa] = {'pdbs':[], 'hits':0, 'wt':wt_lookup[gn]}
if mut['PDB'] not in results['1'][gn][mut_aa]['pdbs']:
results['1'][gn][mut_aa]['pdbs'].append(pdb)
results['1'][gn][mut_aa]['hits'] += 1
if gn:
if gn in wt_lookup:
if gn not in results['2']:
results['2'][gn] = {}
if mut_aa not in results['2'][gn]:
results['2'][gn][mut_aa] = {'pdbs':[], 'proteins':[], 'hits':0, 'wt':wt_lookup[gn]}
if entry_name not in results['2'][gn][mut_aa]['proteins']:
results['2'][gn][mut_aa]['proteins'].append(entry_name)
results['2'][gn][mut_aa]['hits'] += 1
if wt_lookup[gn][0] == wt_aa:
if gn not in results['3']:
results['3'][gn] = {}
if wt_aa not in results['3'][gn]:
results['3'][gn][wt_aa] = {'pdbs':[], 'proteins':[], 'hits':0, 'wt':wt_lookup[gn], 'muts':[]}
if entry_name not in results['3'][gn][wt_aa]['proteins']:
results['3'][gn][wt_aa]['proteins'].append(entry_name)
results['3'][gn][wt_aa]['hits'] += 1
if mut_aa not in results['3'][gn][wt_aa]['muts']:
results['3'][gn][wt_aa]['muts'].append(mut_aa)
temp = {}
for gn, vals1 in results['2'].items():
for mut_aa, vals2 in vals1.items():
if vals2['hits']>1:
if gn not in temp:
temp[gn] = {}
if mut_aa not in temp[gn]:
temp[gn][mut_aa] = vals2
#results['2'][gn].pop(mut_aa, None)
results['2'] = temp
temp_single = {}
temp = {}
for gn, vals1 in results['3'].items():
for mut_aa, vals2 in vals1.items():
if vals2['hits']>1:
if gn not in temp:
temp[gn] = {}
if mut_aa not in temp[gn]:
temp[gn][mut_aa] = vals2
#results['2'][gn].pop(mut_aa, None)
elif vals2['hits']==1:
if gn not in temp_single:
temp_single[gn] = {}
if mut_aa not in temp_single[gn]:
temp_single[gn][mut_aa] = vals2
results['3'] = temp
results['4'] = temp_single
jsondata = results
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("termo",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def structure_rules(request, slug, **response_kwargs):
start_time = time.time()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug).prefetch_related('protein_segment','display_generic_number','generic_number')
wt_lookup = {}
wt_lookup_pos = {}
for r in rs:
if r.generic_number:
gn = r.generic_number.label
wt_lookup[gn] = [r.amino_acid, r.sequence_number]
pos = r.sequence_number
wt_lookup_pos[pos] = [r.amino_acid]
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
if level.split("_")[0]=='001':
c_level = 'A'
elif level.split("_")[0]=='002':
c_level = 'B'
elif level.split("_")[0]=='003':
c_level = 'B'
elif level.split("_")[0]=='004':
c_level = 'C'
elif level.split("_")[0]=='006':
c_level = 'F'
else:
c_level = ''
# path = os.sep.join([settings.DATA_DIR, 'structure_data', 'construct_data', 'structure_rules.xlsx'])
# d = parse_excel(path)
# d_clean = {}
# regex = r"(\d+)x(\d+)"
# for rule_class, values in d.items():
# d_clean[rule_class] = []
# for rule in values:
# if rule['Type']!="Structure-based":
# # Only use structure based ones in this function
# continue
# if re.search(regex, rule['Definition']):
# match = re.search(regex, rule['Definition'])
# gn = match.group(1) + "x" + match.group(2)
# print(rule['Definition'],gn)
# else:
# continue
# regex = r"(\d+)x(\d+)"
# if re.search(regex, rule['Definition']):
# match = re.search(regex, rule['Definition'])
# rule['Generic Position'] = match.group(1) + "x" + match.group(2)
# else:
# continue
# d_clean[rule_class].append(rule)
# # print(d)
# print(json.dumps(d_clean,sort_keys=True, indent=4))
d = STRUCTURAL_RULES
# print(d)
if c_level in d:
rules = d[c_level]
else:
rules = []
results = OrderedDict()
results['active'] = {}
results['inactive'] = {} #fixed mut
for rule in rules:
# if rule['Type']!="Structure-based":
# # Only use structure based ones in this function
# continue
# regex = r"(\d+)x(\d+)"
# if re.search(regex, rule['Definition']):
# match = re.search(regex, rule['Definition'])
# gn = match.group(1) + "x" + match.group(2)
# print(rule['Definition'],gn)
# else:
# continue
gn = rule['Generic Position']
mut_aa = rule['Mut AA']
wt_aas = rule['Wt AA'].split("/")
definition = rule['Design Principle']+" "+rule['Addition / Removal']
state = rule['State'].lower()
valid = False
if gn in wt_lookup:
for wt_aa in wt_aas:
if wt_aa=='X' and wt_lookup[gn][0]!=mut_aa: #if universal but not mut aa
valid = True
elif wt_lookup[gn][0]==wt_aa:
valid = True
if valid:
mut = {'wt':wt_lookup[gn][0], 'gn': gn, 'pos':wt_lookup[gn][1], 'mut':mut_aa, 'definition':definition}
if state=='all':
if gn not in results['active']:
results['active'][gn] = []
if gn not in results['inactive']:
results['inactive'][gn] = []
results['active'][gn].append(mut)
results['inactive'][gn].append(mut)
else:
if gn not in results[state]:
results[state][gn] = []
results[state][gn].append(mut)
# entry_name = mut['UniProt']
# pos = int(mut['POS'])
# pdb = mut['PDB']
# if mut['Effect'] != 'Thermostabilising':
# continue #only thermo!
# if entry_name == slug:
# if gn not in results['1']:
# results['1'][gn] = {}
# if mut_aa not in results['1'][gn]:
# results['1'][gn][mut_aa] = {'pdbs':[], 'hits':0, 'wt':wt_lookup[gn]}
# if mut['PDB'] not in results['1'][gn][mut_aa]['pdbs']:
# results['1'][gn][mut_aa]['pdbs'].append(pdb)
# results['1'][gn][mut_aa]['hits'] += 1
# if gn:
# if gn in wt_lookup:
# if gn not in results['2']:
# results['2'][gn] = {}
# if mut_aa not in results['2'][gn]:
# results['2'][gn][mut_aa] = {'pdbs':[], 'proteins':[], 'hits':0, 'wt':wt_lookup[gn]}
# if entry_name not in results['2'][gn][mut_aa]['proteins']:
# results['2'][gn][mut_aa]['proteins'].append(entry_name)
# results['2'][gn][mut_aa]['hits'] += 1
# if wt_lookup[gn][0] == wt_aa:
# if gn not in results['3']:
# results['3'][gn] = {}
# if wt_aa not in results['3'][gn]:
# results['3'][gn][wt_aa] = {'pdbs':[], 'proteins':[], 'hits':0, 'wt':wt_lookup[gn], 'muts':[]}
# if entry_name not in results['3'][gn][wt_aa]['proteins']:
# results['3'][gn][wt_aa]['proteins'].append(entry_name)
# results['3'][gn][wt_aa]['hits'] += 1
# if mut_aa not in results['3'][gn][wt_aa]['muts']:
# results['3'][gn][wt_aa]['muts'].append(mut_aa)
# temp = {}
# for gn, vals1 in results['2'].items():
# for mut_aa, vals2 in vals1.items():
# if vals2['hits']>1:
# if gn not in temp:
# temp[gn] = {}
# if mut_aa not in temp[gn]:
# temp[gn][mut_aa] = vals2
# #results['2'][gn].pop(mut_aa, None)
# results['2'] = temp
# temp = {}
# for gn, vals1 in results['3'].items():
# for mut_aa, vals2 in vals1.items():
# if vals2['hits']>1:
# if gn not in temp:
# temp[gn] = {}
# if mut_aa not in temp[gn]:
# temp[gn][mut_aa] = vals2
# #results['2'][gn].pop(mut_aa, None)
# results['3'] = temp
jsondata = results
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("rules",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def mutations(request, slug, **response_kwargs):
from django.db import connection
start_time = time.time()
protein = Protein.objects.get(entry_name=slug)
protein_class_slug = protein.family.slug.split("_")[0]
protein_rf_name = protein.family.parent.name
protein_rf_slug = protein.family.parent.slug
protein_rf_count = ProteinFamily.objects.filter(parent__slug=protein_rf_slug).count()
# Grab thermostabilising mutations
key = "CD_all_thermo_mutations_class_%s" % protein_class_slug
mutations = cache.get(key)
if not mutations:
mutations = []
mutations_thermo = ConstructMutation.objects.filter(effects__slug='thermostabilising', construct__protein__family__parent__parent__parent__slug=protein_class_slug).all()\
.prefetch_related(
# "construct__structure__state",
"residue__generic_number",
# "residue__protein_segment",
"construct__protein__family__parent__parent__parent",
"construct__crystal"
)
for mutant in mutations_thermo:
if not mutant.residue.generic_number:
continue
prot = mutant.construct.protein
p_receptor = prot.family.parent.name
real_receptor = prot.entry_name
pdb = mutant.construct.crystal.pdb_code
gn = mutant.residue.generic_number.label
mutations.append(([mutant.sequence_number,mutant.wild_type_amino_acid,mutant.mutated_amino_acid],real_receptor,pdb, p_receptor,gn))
cache.set(key,mutations,60*60*24)
# Build current target residue GN mapping
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__isnull=False).prefetch_related('generic_number', 'protein_segment')
# Build a dictionary to know how far a residue is from segment end/start
# Used for propensity removals
start_end_segments = {}
for r in rs:
if r.protein_segment.slug not in start_end_segments:
start_end_segments[r.protein_segment.slug] = {'start':r.sequence_number}
start_end_segments[r.protein_segment.slug]['end'] = r.sequence_number
wt_lookup = {}
GP_residues_in_target = []
for r in rs:
gn = r.generic_number.label
from_start = r.sequence_number-start_end_segments[r.protein_segment.slug]['start']
from_end = start_end_segments[r.protein_segment.slug]['end'] - r.sequence_number
wt_lookup[gn] = [r.amino_acid, r.sequence_number,r.protein_segment.slug, r.display_generic_number.label]
if r.amino_acid in ["G","P"] and from_start>=4 and from_end>=4:
# build a list of potential GP removals (ignore those close to helix borders)
GP_residues_in_target.append(gn)
# Go through all mutations and find groupings (common)
mutation_list = OrderedDict()
for mutation in mutations:
pos = mutation[0][0]
mut_wt = mutation[0][1]
mut_mut = mutation[0][2]
entry_name = mutation[1]
pdb = mutation[2]
family = mutation[3]
gn = mutation[4]
# First do the ones with same WT
full_mutation = "%s_%s_%s" % (gn,mut_wt,"X")
if gn in wt_lookup and wt_lookup[gn][0]==mut_wt:
# Only use those that have the same WT residue at GN
if full_mutation not in mutation_list:
mutation_list[full_mutation] = {'proteins':[], 'hits':0, 'mutation':[[],[]], 'wt':'', 'pdbs':[], 'protein_families': []}
entry_name = mutation[1].split("_")[0]
if entry_name not in mutation_list[full_mutation]['proteins']:
mutation_list[full_mutation]['proteins'].append(entry_name)
mutation_list[full_mutation]['hits'] += 1
mutation_list[full_mutation]['mutation'][0].append(mut_wt)
mutation_list[full_mutation]['mutation'][1].append(mut_mut)
if gn in wt_lookup:
mutation_list[full_mutation]['wt'] = wt_lookup[gn]
if family not in mutation_list[full_mutation]['protein_families']:
mutation_list[full_mutation]['protein_families'].append(family)
if pdb not in mutation_list[full_mutation]['pdbs']:
mutation_list[full_mutation]['pdbs'].append(pdb)
# Second, check those with same mutated AA
full_mutation = "%s_%s_%s" % (gn,"X",mut_mut)
if gn in wt_lookup and wt_lookup[gn][0]!=mut_mut:
if full_mutation not in mutation_list:
mutation_list[full_mutation] = {'proteins':[], 'hits':0, 'mutation':[[],[]], 'wt':'', 'pdbs':[], 'protein_families': []}
entry_name = mutation[1].split("_")[0]
if entry_name not in mutation_list[full_mutation]['proteins']:
mutation_list[full_mutation]['proteins'].append(entry_name)
mutation_list[full_mutation]['hits'] += 1
mutation_list[full_mutation]['mutation'][0].append(mut_wt)
mutation_list[full_mutation]['mutation'][1].append(mut_mut)
if gn in wt_lookup:
mutation_list[full_mutation]['wt'] = wt_lookup[gn]
if family not in mutation_list[full_mutation]['protein_families']:
mutation_list[full_mutation]['protein_families'].append(family)
if pdb not in mutation_list[full_mutation]['pdbs']:
mutation_list[full_mutation]['pdbs'].append(pdb)
# Go through the previous list and filter with rules and add rule matches
simple_list = OrderedDict()
mutation_list = OrderedDict(sorted(mutation_list.items(), key=lambda x: x[1]['hits'],reverse=True))
for gn, vals in mutation_list.items():
definition_matches = []
if gn.split("_")[1] == "X":
# Below rules only apply the mutations that share the same mutation AA
if slug.split("_")[0] in vals['proteins']:
# Check if same receptor
definition_matches.append([1,'same_receptor'])
elif protein_rf_name in vals['protein_families']:
# Check if same receptor receptor family
definition_matches.append([2,'same_receptor_family'])
elif len(vals['protein_families'])<2:
# If not same receptor or receptor family and not in two receptor families,
# it is just a single match on position used in B-F class
if protein_class_slug!='001':
# If class A require two distinct receptor families
definition_matches.append([4,'same_pos'])
if len(vals['protein_families'])>=2:
# If mutation is seen in >=2 receptor families
# Put this one outside the above logic, to allow multi definitions
definition_matches.append([4,'hotspot_mut'])
# # Check for membrane binding
# if 'K' in vals['mutation'][1] or 'R' in vals['mutation'][1]:
# if vals['wt'][0] not in ['R','K']:
# # Only if not R,K already
# definition_matches.append([2,'membrane_binding'])
# elif vals['wt'][0] in ['K'] and 'R' in vals['mutation'][1]:
# # If K
# definition_matches.append([3,'membrane_binding_weak'])
else:
# Below rules is for the common WT (But different mut AA)
if len(vals['protein_families'])>=2:
definition_matches.append([2,'hotspot_wt'])
elif protein_rf_name not in vals['protein_families']:
# if receptor family not the one, then check if it's a same wt match for B-F
if protein_class_slug!='001':
# If class A require two distinct receptor families
definition_matches.append([3,'same_wt'])
if definition_matches:
min_priority = min(x[0] for x in definition_matches)
pos = vals['wt'][1]
wt_aa = vals['wt'][0]
segment = vals['wt'][2]
origin = {'pdbs': vals['pdbs'], 'protein_families': vals['protein_families'], 'proteins': vals['proteins'], 'hits':vals['hits']}
gpcrdb = gn.split("_")[0]
for mut_aa in set(vals['mutation'][1]):
if mut_aa!=wt_aa:
mut = {'wt_aa': wt_aa, 'segment': segment, 'pos': pos, 'gpcrdb':gpcrdb, 'mut_aa':mut_aa, 'definitions' : definition_matches, 'priority': min_priority, 'origin': [origin]}
key = '%s%s%s' % (wt_aa,pos,mut_aa)
# print(key,mut)
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += definition_matches
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
simple_list[key]['origin'].append(origin)
# TODO : overlay with other types of mutations, e.g. surfacing expressing
# Conservation rules and Helix propensity rules
CONSERVED_RESIDUES = 'ADEFIJLMNQSTVY'
POS_RESIDUES = 'HKR'
if protein_rf_count>1:
# Only perform on RF families with more than one member
rf_conservation = calculate_conservation(slug=protein_rf_slug)
rf_cutoff = 7
rf_cutoff_pos = 4
rf_conservation_priority = 3
definition_matches = [rf_conservation_priority,'conservation_rf']
for cons_gn, aa in rf_conservation.items():
if cons_gn in wt_lookup and wt_lookup[cons_gn][0]!=aa[0] and aa[0]!="+":
# If cons_gn exist in target but AA is not the same
if (int(aa[1])>=rf_cutoff and aa[0] in CONSERVED_RESIDUES): # or (int(aa[1])>=rf_cutoff_pos and aa[0] in POS_RESIDUES) # EXCLUDE POSITIVE RULE AT RF LEVEL
# differenciate between the two rules for pos or the other residues as they require different cons levels
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':aa[0], 'definitions' : [definition_matches], 'priority': rf_conservation_priority}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],aa[0])
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
# Apply helix propensity rule (P)
if cons_gn in GP_residues_in_target:
remove = False
if wt_lookup[cons_gn][0]=='P':
# If it is P then only change if ONLY P
if aa[2]['P'][0] == 1:
# if only one count of P (will be this P)
remove = True
# elif wt_lookup[cons_gn][0]=='G':
# print('it is G',aa[2]['G'])
# cut_offs = {'001':0.03, '002': 0.21, '003': 0.19, '004': 0.21 ,'006': 0.21}
# if protein_class_slug in cut_offs:
# cut_off = cut_offs[protein_class_slug]
# print('cutoff',cut_off,cut_off>aa[2]['G'][1])
# if cut_off>aa[2]['G'][1]:
# # if cut_off is larger than conserved fraction of G, then it can be removed
# remove = True
if remove:
rule = [3,"remove_unconserved_%s" % wt_lookup[cons_gn][0]]
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':'A', 'definitions' : [rule], 'priority': 3}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],'A')
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [rule]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
class_conservation = calculate_conservation(slug=protein_class_slug)
class_cutoff = 7
class_cutoff_pos = 4
class_conservation_priority = 3
definition_matches = [class_conservation_priority,'conservation_class']
for cons_gn, aa in class_conservation.items():
if cons_gn in wt_lookup and wt_lookup[cons_gn][0]!=aa[0] and aa[0]!="+":
# If cons_gn exist in target but AA is not the same
if (int(aa[1])>=class_cutoff and aa[0] in CONSERVED_RESIDUES) or (int(aa[1])>=class_cutoff_pos and aa[0] in POS_RESIDUES):
# differenciate between the two rules for pos or the other residues as they require different cons levels
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':aa[0], 'definitions' : [definition_matches], 'priority': class_conservation_priority}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],aa[0])
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
# Apply helix propensity rule (P+G)
if cons_gn in GP_residues_in_target:
remove = False
if wt_lookup[cons_gn][0]=='P':
# If it is P then only change if ONLY P
if aa[2]['P'][0] == 1:
# if only one count of P (will be this P)
remove = True
elif wt_lookup[cons_gn][0]=='G':
cut_offs = {'001':0.03, '002': 0.21, '003': 0.19, '004': 0.21 ,'006': 0.21}
if protein_class_slug in cut_offs:
cut_off = cut_offs[protein_class_slug]
if cut_off>aa[2]['G'][1]:
# if cut_off is larger than conserved fraction of G, then it can be removed
remove = True
if remove:
rule = [3,"remove_unconserved_%s" % wt_lookup[cons_gn][0]]
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':'A', 'definitions' : [rule], 'priority': 3}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],'A')
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [rule]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
# # Apply helix propensity rules from class when receptor family only has one member or non-classA
# if (protein_rf_count==1 or protein_class_slug!='001') and cons_gn in GP_residues_in_target:
# if not (wt_lookup[cons_gn][0]==aa[0] and int(aa[1])>5):
# rule = [2,"remove_unconserved_%s" % wt_lookup[cons_gn][0]]
# mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':'A', 'definitions' : [rule], 'priority': 2}
# key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],'A')
# if key not in simple_list:
# simple_list[key] = mut
# else:
# if rule not in simple_list[key]['definitions']:
# # Do not add this rule if it is already there (From RF check)
# simple_list[key]['definitions'] += [rule]
# min_priority = min(x[0] for x in simple_list[key]['definitions'])
# simple_list[key]['priority'] = min_priority
if protein_class_slug in ['001','002','003']:
# Only perform the xtal cons rules for A, B1 and B2
xtals_conservation = cache.get("CD_xtal_cons_"+protein_class_slug)
if not xtals_conservation:
c_proteins = Construct.objects.filter(protein__family__slug__startswith = protein_class_slug).all().values_list('protein__pk', flat = True).distinct()
xtal_proteins = Protein.objects.filter(pk__in=c_proteins)
if len(xtal_proteins)>0:
xtals_conservation = calculate_conservation(proteins=xtal_proteins)
cache.set("CD_xtal_cons_"+protein_class_slug,xtals_conservation,60*60*24)
xtals_cutoff = 7
xtals_cutoff_pos = 4
xtals_conservation_priority = 3
definition_matches = [xtals_conservation_priority,'conservation_xtals']
for cons_gn, aa in class_conservation.items():
if cons_gn in wt_lookup and wt_lookup[cons_gn][0]!=aa[0] and aa[0]!="+":
# If cons_gn exist in target but AA is not the same
if (int(aa[1])>=xtals_cutoff and aa[0] in CONSERVED_RESIDUES) or (int(aa[1])>=xtals_cutoff_pos and aa[0] in POS_RESIDUES):
# differenciate between the two rules for pos or the other residues as they require different cons levels
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':aa[0], 'definitions' : [definition_matches], 'priority': xtals_conservation_priority}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],aa[0])
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
# path = os.sep.join([settings.DATA_DIR, 'structure_data', 'Mutation_Rules.xlsx'])
# d = parse_excel(path)
# print(json.dumps(d,sort_keys=True, indent=4))
#print(wt_lookup)
for c, v in STRUCTURAL_SWITCHES.items():
match = False
if protein_class_slug in ['001'] and c=='A':
match = True
elif protein_class_slug in ['002','003'] and c=='B':
match = True
elif protein_class_slug in ['002'] and c=='B1':
match = True
elif protein_class_slug in ['003'] and c=='B2':
match = True
elif protein_class_slug in ['004'] and c=='C':
match = True
if match:
for r in v:
try:
aa_1 = [r['AA1 Pos'],r['Match AA1'],r['Inactive1'],r['Active1']]
aa_2 = [r['AA2 Pos'],r['Match AA2'],r['Inactive2'],r['Active2']]
prio = r['Prio']
motif = r['Motif']
match_1 = False
if r['Match AA1']=='X' or wt_lookup[aa_1[0]][0] in r['Match AA1']:
match_1 = True
match_2 = False
if r['Match AA2']=='X' or wt_lookup[aa_2[0]][0] in r['Match AA2']:
match_2 = True
# Only of the two positions are matched perform mutation
if match_1 and match_2:
# Active state version
# Is AA1 the same as WT?
active = []
if aa_1[3]!='Wt' and aa_1[3]!=wt_lookup[aa_1[0]][0]:
active.append([wt_lookup[aa_1[0]][0],aa_1[3],wt_lookup[aa_1[0]][1],aa_1[0]])
if aa_2[3]!='Wt' and aa_2[3]!=wt_lookup[aa_2[0]][0]:
active.append([wt_lookup[aa_2[0]][0],aa_2[3],wt_lookup[aa_2[0]][1],aa_2[0]])
inactive = []
if aa_1[2]!='Wt' and aa_1[2]!=wt_lookup[aa_1[0]][0]:
inactive.append([wt_lookup[aa_1[0]][0],aa_1[2],wt_lookup[aa_1[0]][1],aa_1[0]])
if aa_2[2]!='Wt' and aa_2[2]!=wt_lookup[aa_2[0]][0]:
inactive.append([wt_lookup[aa_2[0]][0],aa_2[2],wt_lookup[aa_2[0]][1],aa_2[0]])
# print(aa_1,wt_lookup[aa_1[0]],match_1)
# print(aa_2,wt_lookup[aa_2[0]],match_2)
# print("inactive",inactive,len(inactive))
definition_matches = [int(prio),motif]
muts = []
disable_double = True
if len(active)==1:
# print("active",active,len(active))
active = active[0]
mut = {'wt_aa': active[0], 'segment': wt_lookup[active[3]][2], 'pos': active[2], 'gpcrdb':active[3], 'mut_aa':active[1], 'definitions' : [definition_matches], 'priority': int(prio)}
key = 'active_%s%s%s' % (active[0],active[2],active[1])
#print(key,mut)
muts.append([key,mut])
elif len(active)==2:
mut = {'wt_aa1': active[0][0], 'segment1': wt_lookup[active[0][3]][2], 'pos': active[0][2], 'gpcrdb1':active[0][3], 'mut_aa1':active[0][1],'wt_aa2': active[1][0], 'segment2': wt_lookup[active[1][3]][2], 'pos2': active[1][2], 'gpcrdb2':active[1][3], 'mut_aa2':active[1][1], 'definitions' : [definition_matches], 'priority': int(prio)}
key = 'active_%s%s%s_%s%s%s' % (active[0][0],active[0][2],active[0][1],active[1][0],active[1][2],active[1][1])
#print(key,mut)
if not disable_double: muts.append([key,mut])
if len(inactive)==1:
# print("active",inactive,len(inactive))
inactive = inactive[0]
mut = {'wt_aa': inactive[0], 'segment': wt_lookup[inactive[3]][2], 'pos': inactive[2], 'gpcrdb':inactive[3], 'mut_aa':inactive[1], 'definitions' : [definition_matches], 'priority': int(prio)}
key = 'inactive_%s%s%s' % (inactive[0],inactive[2],inactive[1])
#print(key,mut)
muts.append([key,mut])
elif len(inactive)==2:
mut = {'wt_aa1': inactive[0][0], 'segment1': wt_lookup[inactive[0][3]][2], 'pos': inactive[0][2], 'gpcrdb1':inactive[0][3], 'mut_aa1':inactive[0][1],'wt_aa2': inactive[1][0], 'segment2': wt_lookup[inactive[1][3]][2], 'pos2': inactive[1][2], 'gpcrdb2':inactive[1][3], 'mut_aa2':inactive[1][1], 'definitions' : [definition_matches], 'priority': int(prio)}
key = 'inactive_%s%s%s_%s%s%s' % (inactive[0][0],inactive[0][2],inactive[0][1],inactive[1][0],inactive[1][2],inactive[1][1])
# print(key,mut)
if not disable_double: muts.append([key,mut])
for mut in muts:
key = mut[0]
mut = mut[1]
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
except Exception as e:
print("problem with",r, e)
# GLYCO
seq = Protein.objects.filter(entry_name=slug).values_list('sequence', flat = True).get()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug).prefetch_related('protein_segment')
residues = {}
for r in rs:
residues[r.sequence_number] = r.protein_segment.slug
#No proline!
p = re.compile("N[^P][TS]")
matches = re.finditer(r'(?=([N][^P][TS]))',seq)
matches_seq = re.findall(r'(?=([N][^P][TS]))',seq)
#{"all": [[39, "Q", "", "", "NTS", "N-term"], [203, "Q", "", "", "NNT", "ECL2"]], "mammalian": [[205, "V", 206, "V", "TTCVLNDPN", "ECL2"]]}
definition_matches = [int(3),"n-linked glycosylation removal"]
for i,m in enumerate(matches):
#print(matches_seq[i],m.start())
#print(m.start(), m.group())
if residues[m.start()+1] in ['N-term','ECL1','ECL2','ECL3']:
key = '%s%s%s' % ("N",m.start()+1,"Q")
mut = {'wt_aa': "N", 'segment': residues[m.start()+1], 'pos': m.start()+1, 'gpcrdb':'', 'mut_aa':"Q", 'definitions' : [definition_matches], 'priority': 3}
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
matches = re.finditer(r'(?=([TS]{2}[A-Z]{1,10}[N]))',seq)
matches_seq = re.findall(r'(?=([TS]{2}[A-Z]{1,10}[N]))',seq)
definition_matches = [int(3),"o-linked glycosylation removal"]
for i,m in enumerate(matches):
#print(matches_seq[i],m.start())
if matches_seq[i][0]=="T":
pos0 = "V"
if matches_seq[i][1]=="T":
pos1 = "V"
if matches_seq[i][0]=="S":
pos0 = "A"
if matches_seq[i][1]=="S":
pos1 = "A"
if residues[m.start()+1] in ['N-term','ECL1','ECL2','ECL3']:
key = '%s%s%s' % (matches_seq[i][0],m.start()+1,pos0)
mut = {'wt_aa': matches_seq[i][0], 'segment': residues[m.start()+1], 'pos': m.start()+1, 'gpcrdb':'', 'mut_aa':pos0, 'definitions' : [definition_matches], 'priority': 3}
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
key = '%s%s%s' % (matches_seq[i][1],m.start()+2,pos1)
mut = {'wt_aa': matches_seq[i][1], 'segment': residues[m.start()+1], 'pos': m.start()+2, 'gpcrdb':'', 'mut_aa':pos1, 'definitions' : [definition_matches], 'priority': 3}
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
#PALMI
definition_matches = [int(3),"palmitoylation removal"]
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug,protein_segment__slug__in=['H8','C-term']).order_by('sequence_number').prefetch_related('protein_segment')
residues = {}
seq = ''
end_h8 = 0
start_h8 = 0
for r in rs:
if not start_h8 and r.protein_segment.slug == 'H8':
start_h8 = r.sequence_number
if not end_h8 and r.protein_segment.slug == 'C-term':
end_h8 = r.sequence_number-1 #end_h8 was prev residue
elif end_h8 and r.sequence_number-10>end_h8:
continue
seq += r.amino_acid
residues[r.sequence_number] = r.protein_segment.slug
#No proline!
p = re.compile("C")
#print('all')
mutations_all = []
for m in p.finditer(seq):
key = '%s%s%s' % ("C",m.start()+start_h8,"Q")
mut = {'wt_aa': "C", 'segment': residues[m.start()+start_h8], 'pos': m.start()+start_h8, 'gpcrdb':'', 'mut_aa':"A", 'definitions' : [definition_matches], 'priority': 3}
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list = OrderedDict(sorted(simple_list.items(), key=lambda x: (x[1]['priority'],x[1]['pos']) ))
for key, val in simple_list.items():
if val['gpcrdb']:
val['display_gn'] = wt_lookup[val['gpcrdb']][3]
else:
val['display_gn'] = ""
val['definitions'] = list(set([x[1] for x in val['definitions']]))
# print(val)
jsondata = simple_list
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
diff = round(time.time() - start_time,1)
print("muts",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def cons_strucs(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
c_proteins = Construct.objects.filter(protein__family__slug__startswith = level.split("_")[0]).all().values_list('protein__pk', flat = True).distinct()
xtal_proteins = Protein.objects.filter(pk__in=c_proteins)
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
potentials = cache.get("CD_xtal_"+level.split("_")[0])
if potentials==None:
a = Alignment()
a.load_proteins(xtal_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials[gn] = [aa[0],aa[1]]
cache.set("CD_xtal_"+level.split("_")[0],potentials,60*60*24)
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=list(potentials.keys())).prefetch_related('protein_segment','display_generic_number','generic_number')
results = {}
for r in rs:
gn = r.generic_number.label
if r.amino_acid!=potentials[gn][0]:
results[gn] = [r.amino_acid, r.sequence_number,potentials[gn][0],potentials[gn][1]]
jsondata = json.dumps(results)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cons_strucs",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def cons_rf(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
#c_proteins = Construct.objects.filter(protein__family__slug__startswith = level.split("_")[0]).all().values_list('protein__pk', flat = True).distinct()
rf_proteins = Protein.objects.filter(family__slug__startswith="_".join(level.split("_")[0:3]), source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
print(len(rf_proteins))
try:
# Load alignment
a = pickle.loads(AlignmentConsensus.objects.get(slug="_".join(level.split("_")[0:3])).alignment)
except:
print('failed!')
a = Alignment()
a.load_proteins(rf_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials[gn] = [aa[0],aa[1]]
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=list(potentials.keys())).prefetch_related('protein_segment','display_generic_number','generic_number')
results = {}
for r in rs:
gn = r.generic_number.label
if r.amino_acid!=potentials[gn][0]:
results[gn] = [r.amino_acid, r.sequence_number,potentials[gn][0],potentials[gn][1]]
jsondata = json.dumps(results)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cons_rf",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def cons_rf_and_class(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
#c_proteins = Construct.objects.filter(protein__family__slug__startswith = level.split("_")[0]).all().values_list('protein__pk', flat = True).distinct()
rf_proteins = Protein.objects.filter(family__slug__startswith="_".join(level.split("_")[0:3]), source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
try:
# Load alignment
a = pickle.loads(AlignmentConsensus.objects.get(slug="_".join(level.split("_")[0:3])).alignment)
except:
print('failed!')
a = Alignment()
a.load_proteins(rf_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials[gn] = [aa[0],aa[1]]
potentials2 = cache.get("CD_rfc_"+"_".join(level.split("_")[0:1]))
if potentials2==None:
class_proteins = Protein.objects.filter(family__slug__startswith="_".join(level.split("_")[0:1]), source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
try:
# Load alignment
a = pickle.loads(AlignmentConsensus.objects.get(slug="_".join(level.split("_")[0:1])).alignment)
except:
print('failed!')
a = Alignment()
a.load_proteins(class_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials2 = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials2[gn] = [aa[0],aa[1]]
cache.set("CD_rfc_"+"_".join(level.split("_")[0:1]),potentials2,60*60*24)
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=list(potentials.keys())).prefetch_related('protein_segment','display_generic_number','generic_number')
results = {}
for r in rs:
gn = r.generic_number.label
if r.amino_acid!=potentials[gn][0]:
if gn in potentials2:
results[gn] = [r.amino_acid, r.sequence_number,potentials[gn][0],potentials[gn][1]]
jsondata = json.dumps(results)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cons_rf_and_class",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def cons_rm_GP(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
#c_proteins = Construct.objects.filter(protein__family__slug__startswith = level.split("_")[0]).all().values_list('protein__pk', flat = True).distinct()
rf_proteins = Protein.objects.filter(family__slug__startswith="_".join(level.split("_")[0:3]), source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
a = Alignment()
a.load_proteins(rf_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials[gn] = [aa[0],aa[1]]
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=list(potentials.keys())).prefetch_related('protein_segment','display_generic_number','generic_number')
results = {}
results2 = {}
for r in rs:
gn = r.generic_number.label
if r.amino_acid in ['G','P']:
if r.amino_acid!=potentials[gn][0]:
results[gn] = [r.amino_acid, r.sequence_number,potentials[gn][0],potentials[gn][1]]
if r.amino_acid=='G' and potentials[gn][0]=='G':
results2[gn] = [r.amino_acid, r.sequence_number,'A',potentials[gn][1]]
jsondata = json.dumps({'non-conserved':results, 'conserved':results2})
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cons_rm_GP",diff)
return HttpResponse(jsondata, **response_kwargs)
def calculate_conservation(proteins = None, slug = None):
# Return a a dictionary of each generic number and the conserved residue and its frequency
# Can either be used on a list of proteins or on a slug. If slug then use the cached alignment object.
amino_acids_stats = {}
amino_acids_groups_stats = {}
if slug:
try:
# Load alignment
alignment_consensus = AlignmentConsensus.objects.get(slug=slug)
if alignment_consensus.gn_consensus:
alignment_consensus = pickle.loads(alignment_consensus.gn_consensus)
# make sure it has this value, so it's newest version
test = alignment_consensus['1x50'][2]
return alignment_consensus
a = pickle.loads(alignment_consensus.alignment)
except:
print('no saved alignment')
proteins = Protein.objects.filter(family__slug__startswith=slug, source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
a = Alignment()
a.load_proteins(proteins)
a.load_segments(align_segments)
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
alignment_consensus = None
elif proteins:
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
a = Alignment()
a.load_proteins(proteins)
a.load_segments(align_segments)
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
num_proteins = len(a.proteins)
# print(a.aa_count)
consensus = {}
for seg, aa_list in a.consensus.items():
for gn, aal in aa_list.items():
aa_count_dict = {}
for aa, num in a.aa_count[seg][gn].items():
if num:
aa_count_dict[aa] = (num,round(num/num_proteins,3))
if 'x' in gn: # only takes those GN positions that are actual 1x50 etc
consensus[gn] = [aal[0],aal[1],aa_count_dict]
if slug and alignment_consensus:
alignment_consensus.gn_consensus = pickle.dumps(consensus)
alignment_consensus.save()
return consensus
| apache-2.0 | -2,259,260,584,328,600,800 | 43.386241 | 381 | 0.556964 | false | 3.413757 | false | false | false |
devilry/devilry-django | devilry/devilry_group/tests/test_feedbackfeed/examiner/test_feedbackfeed_examiner_discuss.py | 1 | 50866 | # -*- coding: utf-8 -*-
from datetime import timedelta
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.utils import timezone
from django.conf import settings
from model_bakery import baker
from devilry.devilry_comment import models as comment_models
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_group import devilry_group_baker_factories as group_baker
from devilry.devilry_group import models as group_models
from devilry.apps.core import models as core_models
from devilry.devilry_group.tests.test_feedbackfeed.mixins import mixin_feedbackfeed_examiner
from devilry.devilry_group.views.examiner import feedbackfeed_examiner
class MixinTestFeedbackfeedExaminerDiscuss(mixin_feedbackfeed_examiner.MixinTestFeedbackfeedExaminer):
def test_get_examiner_first_attempt_feedback_tab_does_not_exist_if_last_feedbackset_is_published(self):
testgroup = baker.make('core.AssignmentGroup')
group_baker.feedbackset_first_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertFalse(mockresponse.selector.exists('.devilry-group-feedbackfeed-feedback-button'))
def test_get_examiner_first_attempt_feedback_tab_exist_if_last_feedbackset_is_unpublished(self):
testgroup = baker.make('core.AssignmentGroup')
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-feedback-button'))
def test_get_examiner_new_attempt_feedback_tab_does_not_exist_if_last_feedbackset_is_published(self):
testgroup = baker.make('core.AssignmentGroup')
group_baker.feedbackset_new_attempt_published(
group=testgroup,
deadline_datetime=timezone.now() + timedelta(days=3))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertFalse(mockresponse.selector.exists('.devilry-group-feedbackfeed-feedback-button'))
def test_get_examiner_new_attempt_feedback_tab_exist_if_last_feedbackset_is_unpublished(self):
testgroup = baker.make('core.AssignmentGroup')
group_baker.feedbackset_new_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-feedback-button'))
def test_post_comment_always_to_last_feedbackset(self):
assignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
grading_system_plugin_id=core_models.Assignment
.GRADING_SYSTEM_PLUGIN_ID_PASSEDFAILED)
group = baker.make('core.AssignmentGroup', parentnode=assignment)
examiner = baker.make('core.Examiner',
assignmentgroup=group,
relatedexaminer=baker.make('core.RelatedExaminer'))
feedbackset_first = group_baker.feedbackset_first_attempt_published(group=group)
feedbackset_last = group_baker.feedbackset_new_attempt_unpublished(group=group)
self.mock_http302_postrequest(
cradmin_role=examiner.assignmentgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': group.id},
requestkwargs={
'data': {
'text': 'This is a feedback',
'examiner_add_public_comment': 'unused value',
}
})
comments = group_models.GroupComment.objects.all()
self.assertEqual(len(comments), 1)
self.assertNotEqual(feedbackset_first, comments[0].feedback_set)
self.assertEqual(feedbackset_last, comments[0].feedback_set)
self.assertEqual(2, group_models.FeedbackSet.objects.count())
def test_event_deadline_moved_feedbackset_unpublished(self):
testgroup = baker.make('core.AssignmentGroup')
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
now1 = timezone.now()
new_deadline1 = now1 + timedelta(days=2)
baker.make('devilry_group.FeedbackSetDeadlineHistory',
feedback_set=testfeedbackset,
changed_datetime=now1,
deadline_old=testfeedbackset.deadline_datetime,
deadline_new=new_deadline1)
now2 = timezone.now() + timedelta(days=2)
new_deadline2 = now2 + timedelta(days=4)
baker.make('devilry_group.FeedbackSetDeadlineHistory',
feedback_set=testfeedbackset,
changed_datetime=now2,
deadline_old=testfeedbackset.deadline_datetime,
deadline_new=new_deadline2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertEqual(mockresponse.selector.count('.devilry-group-feedbackfeed-event-message__deadline-moved'), 2)
self.assertEqual(mockresponse.selector.count('.deadline-move-info'), 2)
def test_event_deadline_moved_feedbackset_published(self):
testgroup = baker.make('core.AssignmentGroup')
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
testfeedbackset = group_baker.feedbackset_first_attempt_published(group=testgroup)
now1 = timezone.now()
new_deadline1 = now1 + timedelta(days=2)
baker.make('devilry_group.FeedbackSetDeadlineHistory',
feedback_set=testfeedbackset,
changed_datetime=now1,
deadline_old=testfeedbackset.deadline_datetime,
deadline_new=new_deadline1)
now2 = timezone.now() + timedelta(days=2)
new_deadline2 = now2 + timedelta(days=4)
baker.make('devilry_group.FeedbackSetDeadlineHistory',
feedback_set=testfeedbackset,
changed_datetime=now2,
deadline_old=testfeedbackset.deadline_datetime,
deadline_new=new_deadline2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertEqual(mockresponse.selector.count('.devilry-group-feedbackfeed-event-message__deadline-moved'), 2)
self.assertEqual(mockresponse.selector.count('.deadline-move-info'), 2)
def test_get_feedbackset_header_grading_info_passed(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup, grading_points=1)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertEqual(mockresponse.selector.one('.header-grading-info').alltext_normalized, 'passed (1/1)')
def test_get_feedbackset_header_grading_info_failed(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup, grading_points=0)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertEqual(mockresponse.selector.one('.header-grading-info').alltext_normalized, 'failed (0/1)')
def test_get_feedbackset_header_buttons_not_graded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertEqual(
mockresponse.selector.one('.devilry-group-event__grade-move-deadline-button').alltext_normalized,
'Move deadline')
self.assertFalse(mockresponse.selector.exists('.devilry-group-event__grade-last-edit-button'))
self.assertNotContains(mockresponse.response, 'Edit grade')
self.assertFalse(mockresponse.selector.exists('.devilry-group-event__grade-last-new-attempt-button'))
self.assertNotContains(mockresponse.response, 'Give new attempt')
def test_get_feedbackset_published_move_deadline_button_not_rendered(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertFalse(
mockresponse.selector.exists('.devilry-group-event__grade-move-deadline-button'))
self.assertEqual(
mockresponse.selector.one('.devilry-group-event__grade-last-edit-button').alltext_normalized,
'Edit grade')
self.assertEqual(
mockresponse.selector.one('.devilry-group-event__grade-last-new-attempt-button').alltext_normalized,
'Give new attempt')
def test_get_feedbackset_not_published_only_move_deadline_button_shows(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertEqual(
mockresponse.selector.one('.devilry-group-event__grade-move-deadline-button').alltext_normalized,
'Move deadline')
def test_get_feedbackset_grading_updated_multiple_events_rendered(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='[email protected]', fullname='Test User')
test_feedbackset = group_baker.feedbackset_first_attempt_published(group=testgroup, grading_points=1)
baker.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=1,
updated_by=testuser)
baker.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=0,
updated_by=testuser)
baker.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=1,
updated_by=testuser)
baker.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=0,
updated_by=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup
)
event_text_list = [element.alltext_normalized for element in
mockresponse.selector.list('.devilry-group-event__grading_updated')]
self.assertEqual(len(event_text_list), 4)
self.assertIn('The grade was changed from passed (1/1) to failed (0/1) by Test User([email protected])', event_text_list[0])
self.assertIn('The grade was changed from failed (0/1) to passed (1/1) by Test User([email protected])', event_text_list[1])
self.assertIn('The grade was changed from passed (1/1) to failed (0/1) by Test User([email protected])', event_text_list[2])
self.assertIn('The grade was changed from failed (0/1) to passed (1/1) by Test User([email protected])', event_text_list[3])
class TestFeedbackfeedExaminerPublicDiscuss(TestCase, MixinTestFeedbackfeedExaminerDiscuss):
viewclass = feedbackfeed_examiner.ExaminerPublicDiscussView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_get_examiner_add_comment_button(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('#submit-id-examiner_add_public_comment'))
self.assertEqual(
'Add comment',
mockresponse.selector.one('#submit-id-examiner_add_public_comment').alltext_normalized
)
def test_get_examiner_form_heading(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-form-heading'))
self.assertEqual(
'Discuss with the student(s). Anything you write or upload here is visible to the student(s), '
'co-examiners (if any), and admins, but it is not considered part of your feedback/grading.',
mockresponse.selector.one('.devilry-group-feedbackfeed-form-heading').alltext_normalized
)
def test_post_comment_mail_sent_to_everyone_in_group_sanity(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner_email = baker.make('devilry_account.UserEmail', user=examiner.relatedexaminer.user,
email='[email protected]')
# Create two examiners with mails
examiner1 = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner1_email = baker.make('devilry_account.UserEmail', user=examiner1.relatedexaminer.user,
email='[email protected]')
examiner2 = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner2_email = baker.make('devilry_account.UserEmail', user=examiner2.relatedexaminer.user,
email='[email protected]')
# Create two students with mails
student1 = baker.make('core.Candidate', assignment_group=testgroup)
student1_email = baker.make('devilry_account.UserEmail', user=student1.relatedstudent.user,
email='[email protected]')
student2 = baker.make('core.Candidate', assignment_group=testgroup)
student2_email = baker.make('devilry_account.UserEmail', user=student2.relatedstudent.user,
email='[email protected]')
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(len(mail.outbox), 4)
recipient_list = []
for outbox in mail.outbox:
recipient_list.append(outbox.recipients()[0])
self.assertIn(examiner1_email.email, recipient_list)
self.assertIn(examiner2_email.email, recipient_list)
self.assertIn(student1_email.email, recipient_list)
self.assertIn(student2_email.email, recipient_list)
self.assertNotIn(examiner_email.email, recipient_list)
def test_post_first_attempt_unpublished_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_first_attempt_published_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_new_attempt_unpublished_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testfeedbackset = group_baker.feedbackset_new_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(2, group_models.FeedbackSet.objects.count())
last_feedbackset = group_models.FeedbackSet.objects.all()[1]
self.assertEqual(last_feedbackset, testfeedbackset)
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_new_attempt_published_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testfeedbackset = group_baker.feedbackset_new_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(2, group_models.FeedbackSet.objects.count())
last_feedbackset = group_models.FeedbackSet.objects.all()[1]
self.assertEqual(last_feedbackset, testfeedbackset)
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
class TestFeedbackfeedExaminerWithAdminDiscuss(TestCase, MixinTestFeedbackfeedExaminerDiscuss):
viewclass = feedbackfeed_examiner.ExaminerWithAdminsDiscussView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_get_examiner_add_comment_button(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('#submit-id-examiner_add_comment_for_examiners_and_admins'))
self.assertEqual(
'Add note',
mockresponse.selector.one('#submit-id-examiner_add_comment_for_examiners_and_admins').alltext_normalized
)
def test_get_examiner_form_heading(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-form-heading'))
self.assertEqual(
'Internal notes for this student or project group. Visible only to you, your co-examiners (if any) '
'and admins. Students can not see these notes.',
mockresponse.selector.one('.devilry-group-feedbackfeed-form-heading').alltext_normalized
)
def test_post_comment_mail_only_sent_to_examiners(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner_email = baker.make('devilry_account.UserEmail', user=examiner.relatedexaminer.user,
email='[email protected]')
# Create two examiners with mails
examiner1 = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner1_email = baker.make('devilry_account.UserEmail', user=examiner1.relatedexaminer.user,
email='[email protected]')
examiner2 = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner2_email = baker.make('devilry_account.UserEmail', user=examiner2.relatedexaminer.user,
email='[email protected]')
# Create two students with mails
student1 = baker.make('core.Candidate', assignment_group=testgroup)
student1_email = baker.make('devilry_account.UserEmail', user=student1.relatedstudent.user,
email='[email protected]')
student2 = baker.make('core.Candidate', assignment_group=testgroup)
student2_email = baker.make('devilry_account.UserEmail', user=student2.relatedstudent.user,
email='[email protected]')
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(len(mail.outbox), 2)
recipient_list = []
for outbox in mail.outbox:
recipient_list.append(outbox.recipients()[0])
self.assertIn(examiner1_email.email, recipient_list)
self.assertIn(examiner2_email.email, recipient_list)
self.assertNotIn(student1_email.email, recipient_list)
self.assertNotIn(student2_email.email, recipient_list)
self.assertNotIn(examiner_email.email, recipient_list)
def test_post_first_attempt_unpublished_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_first_attempt_published_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_new_attempt_unpublished_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testfeedbackset = group_baker.feedbackset_new_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(2, group_models.FeedbackSet.objects.count())
last_feedbackset = group_models.FeedbackSet.objects.all()[1]
self.assertEqual(last_feedbackset, testfeedbackset)
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_new_attempt_published_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testfeedbackset = group_baker.feedbackset_new_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(2, group_models.FeedbackSet.objects.count())
last_feedbackset = group_models.FeedbackSet.objects.all()[1]
self.assertEqual(last_feedbackset, testfeedbackset)
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
class TestFeedbackfeedPublicDiscussFileUploadExaminer(TestCase,
mixin_feedbackfeed_examiner.MixinTestFeedbackfeedExaminer):
viewclass = feedbackfeed_examiner.ExaminerPublicDiscussView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_comment_without_text_or_file_visibility_everyone(self):
# Tests that error message pops up if trying to post a comment without either text or file.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.Examiner', assignmentgroup=testfeedbackset.group)
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'examiner_add_public_comment': 'unused value'
}
})
self.assertEqual(0, group_models.GroupComment.objects.count())
self.assertEqual(
'A comment must have either text or a file attached, or both. An empty comment is not allowed.',
mockresponse.selector.one('#error_1_id_text').alltext_normalized)
def test_upload_single_file_visibility_everyone(self):
# Test that a CommentFile is created on upload.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfile(
user=testexaminer.relatedexaminer.user)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(1, comment_models.CommentFile.objects.count())
def test_upload_single_file_content_visibility_everyone(self):
# Test the content of a CommentFile after upload.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile.txt', content=b'Test content', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, comment_models.CommentFile.objects.count())
comment_file = comment_models.CommentFile.objects.all()[0]
group_comment = group_models.GroupComment.objects.get(id=comment_file.comment.id)
self.assertEqual(group_comment.visibility, group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
self.assertEqual('testfile.txt', comment_file.filename)
self.assertEqual(b'Test content', comment_file.file.file.read())
self.assertEqual(len('Test content'), comment_file.filesize)
self.assertEqual('text/txt', comment_file.mimetype)
def test_upload_multiple_files_visibility_everyone(self):
# Test the content of CommentFiles after upload.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
SimpleUploadedFile(name='testfile3.txt', content=b'Test content3', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(3, comment_models.CommentFile.objects.count())
def test_upload_multiple_files_contents_visibility_everyone(self):
# Test the content of a CommentFile after upload.
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
SimpleUploadedFile(name='testfile3.txt', content=b'Test content3', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(3, comment_models.CommentFile.objects.count())
comment_file1 = comment_models.CommentFile.objects.get(filename='testfile1.txt')
comment_file2 = comment_models.CommentFile.objects.get(filename='testfile2.txt')
comment_file3 = comment_models.CommentFile.objects.get(filename='testfile3.txt')
# Check content of testfile 1.
self.assertEqual('testfile1.txt', comment_file1.filename)
self.assertEqual(b'Test content1', comment_file1.file.file.read())
self.assertEqual(len('Test content1'), comment_file1.filesize)
self.assertEqual('text/txt', comment_file1.mimetype)
# Check content of testfile 2.
self.assertEqual('testfile2.txt', comment_file2.filename)
self.assertEqual(b'Test content2', comment_file2.file.file.read())
self.assertEqual(len('Test content2'), comment_file2.filesize)
self.assertEqual('text/txt', comment_file2.mimetype)
# Check content of testfile 3.
self.assertEqual('testfile3.txt', comment_file3.filename)
self.assertEqual(b'Test content3', comment_file3.file.file.read())
self.assertEqual(len(b'Test content3'), comment_file3.filesize)
self.assertEqual('text/txt', comment_file3.mimetype)
def test_upload_files_and_comment_text(self):
# Test the content of a CommentFile after upload.
testfeedbackset = group_baker.feedbackset_first_attempt_published(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': 'Test comment',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(2, comment_models.CommentFile.objects.count())
self.assertEqual(1, group_models.GroupComment.objects.count())
group_comments = group_models.GroupComment.objects.all()
self.assertEqual('Test comment', group_comments[0].text)
class TestFeedbackfeedExaminerWithAdminDiscussFileUpload(TestCase,
mixin_feedbackfeed_examiner.MixinTestFeedbackfeedExaminer):
viewclass = feedbackfeed_examiner.ExaminerWithAdminsDiscussView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_comment_without_text_or_file_visibility_examiners_and_admins(self):
# Tests that error message pops up if trying to post a comment without either text or file.
# Posting comment with visibility for examiners and admins only
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
}
})
self.assertEqual(0, group_models.GroupComment.objects.count())
self.assertEqual(
'A comment must have either text or a file attached, or both. An empty comment is not allowed.',
mockresponse.selector.one('#error_1_id_text').alltext_normalized)
def test_upload_single_file_visibility_examiners_and_admins(self):
# Test that a CommentFile is created on upload.
# Posting comment with visibility visible to examiners and admins
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfile(
user=testexaminer.relatedexaminer.user)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(1, comment_models.CommentFile.objects.count())
def test_upload_single_file_content_visibility_examiners_and_admins(self):
# Test the content of a CommentFile after upload.
# Posting comment with visibility visible to examiners and admins
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile.txt', content=b'Test content', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(1, comment_models.CommentFile.objects.count())
comment_file = comment_models.CommentFile.objects.all()[0]
self.assertEqual('testfile.txt', comment_file.filename)
self.assertEqual(b'Test content', comment_file.file.file.read())
self.assertEqual(len('Test content'), comment_file.filesize)
self.assertEqual('text/txt', comment_file.mimetype)
def test_upload_multiple_files_visibility_examiners_and_admins(self):
# Test the content of CommentFiles after upload.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
SimpleUploadedFile(name='testfile3.txt', content=b'Test content3', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'examiner_add_comment_for_examiners': 'unused value',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(3, comment_models.CommentFile.objects.count())
def test_upload_multiple_files_contents_visibility_examiners_and_admins(self):
# Test the content of a CommentFile after upload.
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
SimpleUploadedFile(name='testfile3.txt', content=b'Test content3', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'examiner_add_comment_for_examiners': 'unused value',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(3, comment_models.CommentFile.objects.count())
comment_file1 = comment_models.CommentFile.objects.get(filename='testfile1.txt')
comment_file2 = comment_models.CommentFile.objects.get(filename='testfile2.txt')
comment_file3 = comment_models.CommentFile.objects.get(filename='testfile3.txt')
# Check content of testfile 1.
self.assertEqual('testfile1.txt', comment_file1.filename)
self.assertEqual(b'Test content1', comment_file1.file.file.read())
self.assertEqual(len('Test content1'), comment_file1.filesize)
self.assertEqual('text/txt', comment_file1.mimetype)
# Check content of testfile 2.
self.assertEqual('testfile2.txt', comment_file2.filename)
self.assertEqual(b'Test content2', comment_file2.file.file.read())
self.assertEqual(len('Test content2'), comment_file2.filesize)
self.assertEqual('text/txt', comment_file2.mimetype)
# Check content of testfile 3.
self.assertEqual('testfile3.txt', comment_file3.filename)
self.assertEqual(b'Test content3', comment_file3.file.file.read())
self.assertEqual(len(b'Test content3'), comment_file3.filesize)
self.assertEqual('text/txt', comment_file3.mimetype)
| bsd-3-clause | 61,710,043,098,759,816 | 54.652079 | 131 | 0.657728 | false | 3.952292 | true | false | false |
milliman/spark | python/pyspark/sql/tests.py | 1 | 107755 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type
from pyspark.tests import ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode
d = [Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"})]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_wholefile_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
wholeFile=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_wholefile_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", wholeFile=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _verify_type
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_verify_type(ExamplePoint(1.0, 2.0), ExamplePointUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], ExamplePointUDT()))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_verify_type(PythonOnlyPoint(1.0, 2.0), PythonOnlyUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], PythonOnlyUDT()))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
aq = df.stat.approxQuantile("a", [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", "b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile(("a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
with self.assertRaises(ValueError):
struct1 = StructType().add("name")
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
with self.assertRaises(KeyError):
not_a_field = struct1["f9"]
with self.assertRaises(IndexError):
not_a_field = struct1[9]
with self.assertRaises(TypeError):
not_a_field = struct1[9.9]
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame([(u'Alice', None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with string
row = self.spark.createDataFrame([(None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(dateCol)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| apache-2.0 | 1,648,245,848,236,969,200 | 42.07517 | 100 | 0.58984 | false | 3.662327 | true | false | false |
sideeffects/pycparser | pycparser/c_parser.py | 1 | 60688 | #------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2013, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lextab='pycparser.lextab',
yacc_optimize=True,
yacctab='pycparser.yacctab',
yacc_debug=False):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
"""
self.clex = CLexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self._parse_error(
"Typedef %r previously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module
# c_ast) and the modifiers are FuncDecl, PtrDecl and
# ArrayDecl.
#
# The standard states that whenever a new modifier is parsed,
# it should be added to the end of the list of modifiers. For
# example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
#
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
#
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto
# it
#
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
#
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problems.
#
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum ..")
# If all the types are basic, they're collected in the
# IdentifierType holder.
#
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[])
spec[kind].insert(0, newspec)
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
""" Builds a list of declarations all sharing the given specifiers.
If typedef_namespace is true, each declared name is added
to the "typedef namespace", which also includes objects,
functions, and enum constants.
"""
is_typedef = 'typedef' in spec['storage']
declarations = []
# Bit-fields are allowed to be unnamed.
#
if decls[0].get('bitsize') is not None:
pass
# When redeclaring typedef names as identifiers in inner scopes, a
# problem can occur where the identifier gets grouped into
# spec['type'], leaving decl as None. This can only occur for the
# first declarator.
#
elif decls[0]['decl'] is None:
if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
not self._is_type_in_scope(spec['type'][-1].names[0]):
coord = '?'
for t in spec['type']:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Invalid declaration', coord)
# Make this look as if it came from "direct_declarator:ID"
decls[0]['decl'] = c_ast.TypeDecl(
declname=spec['type'][-1].names[0],
type=None,
quals=None,
coord=spec['type'][-1].coord)
# Remove the "new" type's name from the end of spec['type']
del spec['type'][-1]
# A similar problem can occur where the declaration ends up looking
# like an abstract declarator. Give it a name if this is the case.
#
elif not isinstance(decls[0]['decl'],
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
decls_0_tail = decls[0]['decl']
while not isinstance(decls_0_tail, c_ast.TypeDecl):
decls_0_tail = decls_0_tail.type
if decls_0_tail.declname is None:
decls_0_tail.declname = spec['type'][-1].names[0]
del spec['type'][-1]
for decl in decls:
assert decl['decl'] is not None
if is_typedef:
declaration = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl['decl'],
coord=decl['decl'].coord)
else:
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl['decl'],
init=decl.get('init'),
bitsize=decl.get('bitsize'),
coord=decl['decl'].coord)
if isinstance(declaration.type,
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
fixed_decl = declaration
else:
fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
#
if typedef_namespace:
if is_typedef:
self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
else:
self._add_identifier(fixed_decl.name, fixed_decl.coord)
declarations.append(fixed_decl)
return declarations
def _build_function_definition(self, spec, decl, param_decls, body):
""" Builds a function definition.
"""
assert 'typedef' not in spec['storage']
declaration = self._build_declarations(
spec=spec,
decls=[dict(decl=decl, init=None)],
typedef_namespace=True)[0]
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
#
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
#
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
if p[2] is not None:
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogenous.
#
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
"""
p[0] = p[1]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = None
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._coord(p.lineno(1)))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
#
def p_function_definition_1(self, p):
""" function_definition : declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))],
function=[])
p[0] = self._build_function_definition(
spec=spec,
decl=p[1],
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
spec=spec,
decl=p[2],
param_decls=p[3],
body=p[4])
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
"""
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
"""
spec = p[1]
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# By the standard, you must have at least one declarator unless
# declaring a structure tag, a union tag, or the members of an
# enumeration.
#
ty = spec['type']
s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
decls = [c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)]
# However, this case can also occur on redeclared identifiers in
# an inner scope. The trouble is that the redeclared type's name
# gets grouped into declaration_specifiers; _build_declarations
# compensates for this.
#
else:
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)],
typedef_namespace=True)
else:
decls = self._build_declarations(
spec=spec,
decls=p[2],
typedef_namespace=True)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : type_qualifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : type_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : storage_class_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : function_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
"""
p[0] = p[1]
def p_type_specifier_1(self, p):
""" type_specifier : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_type_specifier_2(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
"""
p[0] = p[1]
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
"""
p[0] = p[1]
def p_init_declarator_list_1(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# If the code is declaring a variable that was declared a typedef in an
# outer scope, yacc will think the name is part of declaration_specifiers,
# not init_declarator, and will then get confused by EQUALS. Pass None
# up in place of declarator, and handle this at a higher level.
#
def p_init_declarator_list_2(self, p):
""" init_declarator_list : EQUALS initializer
"""
p[0] = [dict(decl=None, init=p[2])]
# Similarly, if the code contains duplicate typedefs of, for example,
# array types, the array portion will appear as an abstract declarator.
#
def p_init_declarator_list_3(self, p):
""" init_declarator_list : abstract_declarator
"""
p[0] = [dict(decl=p[1], init=None)]
# Returns a {decl=<declarator> : init=<initializer>} dictionary
# If there's no initializer, uses None
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : type_specifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=None,
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=None,
decls=p[3],
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._coord(p.lineno(2)))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
assert 'typedef' not in spec['storage']
if p[2] is not None:
decls = self._build_declarations(
spec=spec,
decls=p[2])
elif len(spec['type']) == 1:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=decl_type)])
else:
# Structure/union members can have the same names as typedefs.
# The trouble is that the member's name gets grouped into
# specifier_qualifier_list; _build_declarations compensates.
#
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)])
p[0] = decls
def p_struct_declaration_2(self, p):
""" struct_declaration : specifier_qualifier_list abstract_declarator SEMI
"""
# "Abstract declarator?!", you ask? Structure members can have the
# same names as typedefs. The trouble is that the member's name gets
# grouped into specifier_qualifier_list, leaving any remainder to
# appear as an abstract declarator, as in:
# typedef int Foo;
# struct { Foo Foo[3]; };
#
p[0] = self._build_declarations(
spec=p[1],
decls=[dict(decl=p[2], init=None)])
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1)))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1)))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1)))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
self._coord(p.lineno(1)))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
self._coord(p.lineno(1)))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
def p_declarator_1(self, p):
""" declarator : direct_declarator
"""
p[0] = p[1]
def p_declarator_2(self, p):
""" declarator : pointer direct_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
# Since it's impossible for a type to be specified after a pointer, assume
# it's intended to be the name for this declaration. _add_identifier will
# raise an error if this TYPEID can't be redeclared.
#
def p_declarator_3(self, p):
""" declarator : pointer TYPEID
"""
decl = c_ast.TypeDecl(
declname=p[2],
type=None,
quals=None,
coord=self._coord(p.lineno(2)))
p[0] = self._type_modify_decl(decl, p[1])
def p_direct_declarator_1(self, p):
""" direct_declarator : ID
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
coord=self._coord(p.lineno(1)))
def p_direct_declarator_2(self, p):
""" direct_declarator : LPAREN declarator RPAREN
"""
p[0] = p[2]
def p_direct_declarator_3(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
# Accept dimension qualifiers
# Per C99 6.7.5.3 p7
arr = c_ast.ArrayDecl(
type=None,
dim=p[4],
dim_quals=p[3] if p[3] != None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_4(self, p):
""" direct_declarator : direct_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
| direct_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
"""
# Using slice notation for PLY objects doesn't work in Python 3 for the
# version of PLY embedded with pycparser; see PLY Google Code issue 30.
# Work around that here by listing the two elements separately.
listed_quals = [item if isinstance(item, list) else [item]
for item in [p[3],p[4]]]
dim_quals = [qual for sublist in listed_quals for qual in sublist
if qual is not None]
arr = c_ast.ArrayDecl(
type=None,
dim=p[5],
dim_quals=dim_quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
def p_direct_declarator_5(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[4], self._coord(p.lineno(4))),
dim_quals=p[3] if p[3] != None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_6(self, p):
""" direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN
| direct_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
# To see why _get_yacc_lookahead_token is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# Outside the function, TT is a typedef, but inside (starting and
# ending with the braces) it's a parameter. The trouble begins with
# yacc's lookahead token. We don't know if we're declaring or
# defining a function until we see LBRACE, but if we wait for yacc to
# trigger a rule on that token, then TT will have already been read
# and incorrectly interpreted as TYPEID. We need to add the
# parameters to the scope the moment the lexer sees LBRACE.
#
if self._get_yacc_lookahead_token().type == "LBRACE":
if func.args is not None:
for param in func.args.params:
if isinstance(param, c_ast.EllipsisParam): break
self._add_identifier(param.name, param.coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._coord(p.lineno(1))
p[0] = c_ast.PtrDecl(
quals=p[2] or [],
type=p[3] if len(p) > 3 else None,
coord=coord)
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3))))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list brace_close
| brace_open initializer_list COMMA brace_close
"""
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
#~ print '=========='
#~ print p[1]
#~ print p[2]
#~ print p[2].children()
#~ print '=========='
typename = c_ast.Typename(
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[2],
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(1)))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._coord(p.lineno(1)))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON statement """
p[0] = c_ast.Label(p[1], p[3], self._coord(p.lineno(1)))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON statement """
p[0] = c_ast.Case(p[2], [p[4]], self._coord(p.lineno(1)))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON statement """
p[0] = c_ast.Default([p[3]], self._coord(p.lineno(1)))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement """
p[0] = c_ast.If(p[3], p[5], None, self._coord(p.lineno(1)))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._coord(p.lineno(1)))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._coord(p.lineno(1))))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN statement """
p[0] = c_ast.While(p[3], p[5], self._coord(p.lineno(1)))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._coord(p.lineno(1)))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._coord(p.lineno(1)))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(c_ast.DeclList(p[3], self._coord(p.lineno(1))),
p[4], p[6], p[8], self._coord(p.lineno(1)))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._coord(p.lineno(1)))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._coord(p.lineno(1)))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._coord(p.lineno(1)))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._coord(p.lineno(1)))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._coord(p.lineno(1)))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._coord(p.lineno(1)))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._coord(p.lineno(1)))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD ID
| postfix_expression PERIOD TYPEID
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
field = c_ast.ID(p[3], self._coord(p.lineno(3)))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._coord(p.lineno(1)))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
"""
p[0] = c_ast.Constant(
'int', p[1], self._coord(p.lineno(1)))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
p[0] = c_ast.Constant(
'float', p[1], self._coord(p.lineno(1)))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._coord(p.lineno(1)))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value.rstrip()[:-1] + p[2][2:]
p[0] = p[1]
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
p[0] = p[1]
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
p[0] = p[1]
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
# If error recovery is added here in the future, make sure
# _get_yacc_lookahead_token still works!
#
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', '')
#------------------------------------------------------------------------------
if __name__ == "__main__":
import pprint
import time, sys
#t1 = time.time()
#parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False)
#sys.write(time.time() - t1)
#buf = '''
#int (*k)(int);
#'''
## set debuglevel to 2 for debugging
#t = parser.parse(buf, 'x.c', debuglevel=0)
#t.show(showcoord=True)
| bsd-3-clause | -4,062,740,907,788,481,500 | 35.515042 | 122 | 0.525145 | false | 3.947957 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.