code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
def lengthOfLongestSubstring(s):
if s == "":
return 0
else:
maxLength=1
for i in range(len(s)):
for j in range(i+1,len(s)):
identical = 0
for k in range(i,j):
for m in range(k+1,j+1):
if s[k]==s[m]:
identical = 1
if identical == 0:
if j-i+1 > maxLength:
maxLength = j-i +1
return maxLength
s = "pwwkew"
print(lengthOfLongestSubstring(s))
s = "abcabcbb"
print(lengthOfLongestSubstring(s))
s = "bbbbbbb"
print(lengthOfLongestSubstring(s))
s = ""
print(lengthOfLongestSubstring(s))
s= "au"
print(lengthOfLongestSubstring(s)) | iamwrm/coding | leetcode/lengthOfLongestSubstring.py | Python | mit | 719 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimGeomSurfaceModel_FaceBasedSurfaceModel_Default', [dirname(__file__)])
except ImportError:
import _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default
if fp is not None:
try:
_mod = imp.load_module('_SimGeomSurfaceModel_FaceBasedSurfaceModel_Default', fp, pathname, description)
finally:
fp.close()
return _mod
_SimGeomSurfaceModel_FaceBasedSurfaceModel_Default = swig_import_helper()
del swig_import_helper
else:
import _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
class SimGeomSurfaceModel(base.SimGeometricRepresentationItem):
__swig_setmethods__ = {}
for _s in [base.SimGeometricRepresentationItem]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimGeomSurfaceModel, name, value)
__swig_getmethods__ = {}
for _s in [base.SimGeometricRepresentationItem]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimGeomSurfaceModel, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.new_SimGeomSurfaceModel(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel__clone(self, f, c)
__swig_destroy__ = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.delete_SimGeomSurfaceModel
__del__ = lambda self: None
SimGeomSurfaceModel_swigregister = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_swigregister
SimGeomSurfaceModel_swigregister(SimGeomSurfaceModel)
class SimGeomSurfaceModel_FaceBasedSurfaceModel(SimGeomSurfaceModel):
__swig_setmethods__ = {}
for _s in [SimGeomSurfaceModel]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimGeomSurfaceModel_FaceBasedSurfaceModel, name, value)
__swig_getmethods__ = {}
for _s in [SimGeomSurfaceModel]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimGeomSurfaceModel_FaceBasedSurfaceModel, name)
__repr__ = _swig_repr
def FbsmFaces(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_FbsmFaces(self, *args)
def __init__(self, *args):
this = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.new_SimGeomSurfaceModel_FaceBasedSurfaceModel(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel__clone(self, f, c)
__swig_destroy__ = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.delete_SimGeomSurfaceModel_FaceBasedSurfaceModel
__del__ = lambda self: None
SimGeomSurfaceModel_FaceBasedSurfaceModel_swigregister = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_swigregister
SimGeomSurfaceModel_FaceBasedSurfaceModel_swigregister(SimGeomSurfaceModel_FaceBasedSurfaceModel)
class SimGeomSurfaceModel_FaceBasedSurfaceModel_Default(SimGeomSurfaceModel_FaceBasedSurfaceModel):
__swig_setmethods__ = {}
for _s in [SimGeomSurfaceModel_FaceBasedSurfaceModel]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimGeomSurfaceModel_FaceBasedSurfaceModel_Default, name, value)
__swig_getmethods__ = {}
for _s in [SimGeomSurfaceModel_FaceBasedSurfaceModel]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimGeomSurfaceModel_FaceBasedSurfaceModel_Default, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.new_SimGeomSurfaceModel_FaceBasedSurfaceModel_Default(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default__clone(self, f, c)
__swig_destroy__ = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.delete_SimGeomSurfaceModel_FaceBasedSurfaceModel_Default
__del__ = lambda self: None
SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_swigregister = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_swigregister
SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_swigregister(SimGeomSurfaceModel_FaceBasedSurfaceModel_Default)
class SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.new_SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_assign(self, n, x)
def begin(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_begin(self, *args)
def end(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_end(self, *args)
def rbegin(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_rend(self, *args)
def at(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_at(self, *args)
def front(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_front(self, *args)
def back(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_back(self, *args)
def push_back(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_push_back(self, *args)
def pop_back(self):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_insert(self, *args)
def erase(self, *args):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_swap(self, x)
__swig_destroy__ = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.delete_SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence
__del__ = lambda self: None
SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_swigregister = _SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_swigregister
SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence_swigregister(SimGeomSurfaceModel_FaceBasedSurfaceModel_Default_sequence)
# This file is compatible with both classic and new-style classes.
| EnEff-BIM/EnEffBIM-Framework | SimModel_Python_API/simmodel_swig/Release/SimGeomSurfaceModel_FaceBasedSurfaceModel_Default.py | Python | mit | 11,364 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-05-15 19:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotes', '0024_auto_20180515_1609'),
]
operations = [
migrations.AlterField(
model_name='lote',
name='local_at',
field=models.DateTimeField(blank=True, db_index=True, null=True, verbose_name='localizado em'),
),
]
| anselmobd/fo2 | src/lotes/migrations/0025_auto_20180515_1613.py | Python | mit | 506 |
#!/usr/bin/env python
import os, sys, re, csv
def make_and_move_zimages_lowres_thumbnails_dir_or_singlefile(pathname):
import os, sys, re, csv
from PIL import Image
import pyexiv2
import glob, os, re
size = 600, 720
regex_jpeg = re.compile(r'.+?\.[jpgJPG]{3}$')
regex_productionraw_Exports = re.compile(r'^/.+?/ON_FIGURE/.+?SELECTS/.*?[0-9]{9}_[1-9]_[0-9]{1,4}\.[jpgJPG]{3}$')
zimages_root = '/mnt/Production_Raw/.zImages_1'
test_zimages = '/'.join(pathname.split('/')[:4])
if test_zimages == zimages_root:
pass
elif re.findall(regex_productionraw_Exports, pathname):
zimages_root = '/mnt/Production_Raw/.zImages_1'
## If input variable is a single File Create 1 Thumb
if os.path.isfile(pathname):
#try:
infile = os.path.abspath(pathname)
filename, ext = os.path.splitext(infile)
zimages_name = os.path.split(infile)[-1]
zimages_dir = zimages_name[:4]
zimages_dir = os.path.join(zimages_root, zimages_dir)
zimages_filepath = os.path.join(zimages_dir, zimages_name)
#print infile, zimages_filepath
## Try to make 4 digit directory or pass if already present
try:
os.mkdir(zimages_dir, 16877)
except OSError:
pass
## Test if this file has already been copied to Zimages Dir -- If not Make 600x720 jpg in zimagesdir
if os.path.isfile(zimages_filepath):
print "File Exists: {0}".format(zimages_filepath)
pass
else:
try:
## Extract Originals Metadata prior to Resizing
source_metadata = pyexiv2.ImageMetadata(infile)
source_metadata.read()
# Resize and Save Thumb copy to Zimages
im = Image.open(infile)
im.thumbnail(size, Image.ANTIALIAS)
im.save(zimages_filepath , "JPEG")
print infile, zimages_filepath
# Copy EXIF data from Source to Resized Image
dest_metadata = pyexiv2.ImageMetadata(zimages_filepath)
dest_metadata.read()
source_metadata.copy(dest_metadata, exif=True, iptc=True, xmp=True, comment=True)
# set EXIF image size info to resized size
# dest_metadata.read()
# dest_metadata["Exif.Photo.PixelXDimension"] = im.size[0]
# dest_metadata["Exif.Photo.PixelYDimension"] = im.size[1]
dest_metadata.write()
print "Success copied {} --> {}".format(pathname,zimages_filepath)
except IOError:
print "Bad Image File {}".format(zimages_filepath)
pass
return zimages_filepath
#except:
# print "Error Creating Single File Thumbnail for {0}".format(infile)
## If input variable is a Directory Decend into Dir and Crate Thumnails for all jpgs
elif os.path.isdir(pathname):
dirname = os.path.abspath(pathname)
print dirname
for infile in glob.glob(os.path.join(dirname, "*.jpg")):
try:
infile = os.path.abspath(infile)
filename, ext = os.path.splitext(infile)
zimages_name = os.path.split(infile)[-1]
zimages_dir = zimages_name[:4]
zimages_dir = os.path.join(zimages_root, zimages_dir)
zimages_filepath = os.path.join(zimages_dir, zimages_name)
print infile, zimages_filepath
## Try to make 4 digit directory or pass if already present
try:
os.mkdir(zimages_dir, 16877)
except OSError:
pass
## Test if this file has already been copied to Zimages Dir -- If not Make 600x720 jpg in zimagesdir
if os.path.isfile(zimages_filepath):
pass
print "File Exists: {0}".format(zimages_filepath)
else:
## Extract Originals Metadata prior to Resizing
source_metadata = pyexiv2.ImageMetadata(infile)
source_metadata.read()
# Resize and Save Thumb copy to Zimages
im = Image.open(infile)
im.thumbnail(size, Image.ANTIALIAS)
im.save(zimages_filepath , "JPEG")
print infile, zimages_filepath
# Copy EXIF data from Source to Resized Image
dest_metadata = pyexiv2.ImageMetadata(zimages_filepath)
dest_metadata.read()
source_metadata.copy(dest_metadata, exif=True, iptc=True, xmp=True, comment=True)
# set EXIF image size info to resized size
# dest_metadata.read()
# dest_metadata["Exif.Photo.PixelXDimension"] = im.size[0]
# dest_metadata["Exif.Photo.PixelYDimension"] = im.size[1]
dest_metadata.write()
return zimages_filepath
except:
print "Error Creating Thumbnail for {0}".format(infile)
else:
print "File: {0} is not a jpg".format(pathname)
############# RUN
pathname = sys.argv[1]
| relic7/prodimages | python/image_or_dir_make_thumbs_zimagesRAW.py | Python | mit | 5,518 |
import os
import numpy as np
nx_min = 1000
d_nx = 495
n_nx = 200
nt_min = 1000
d_nt = 495
n_nt = 200
output_dir = "./convergence_outputs/"
results_dir = "./convergence_results/"
resid_fname = "vd_resid_analysis.txt"
resid_matrix = np.zeros((n_nx,n_nt))
def ensure_dir(path):
# Make sure a directory exists.
os.makedirs(path, exist_ok=True)
ensure_dir(results_dir)
for i_x in range(n_nx):
nx = nx_min + d_nx * i_x
for i_t in range(n_nt):
nt = nt_min + d_nt * i_t
dir_name = output_dir+str(nx)+"_"+str(nt)+"/"
f_name = dir_name + resid_fname
f = open(f_name, "r")
lines = f.readlines()
f.close()
for line in lines:
if (line.find("Cumulative") >= 0):
split = line.rstrip("\n").split(":")
resid_matrix[i_x][i_t] = float(split[1])
resid_matrix_t = np.transpose(resid_matrix)
f = open(results_dir+"rp10-b4-cxm_tx.dat", "w")
for i_x in range(n_nx):
out_line = []
for i_t in range(n_nt):
out_line.append(resid_matrix[i_x][i_t])
out = "\t".join(str(i) for i in out_line)+"\n"
f.write(out)
f.close()
g = open(results_dir+"rp10-b4-cm_xt.dat", "w")
for i_t in range(n_nt):
out_line = []
for i_x in range(n_nx):
out_line.append(resid_matrix_t[i_t][i_x])
out = "\t".join(str(i) for i in out_line)+"\n"
g.write(out)
g.close()
| kramer314/1d-vd-test | convergence-tests/combine-output.py | Python | mit | 1,395 |
from setuptools import setup
setup(name='rb87',
version='0.1',
description='Contains useful constants related to Rubidium 87',
url='http://github.com/shreyaspotnis/rb87',
author='Shreyas Potnis',
author_email='[email protected]',
license='MIT',
packages=['rb87'],
zip_safe=False)
| shreyaspotnis/rb87 | setup.py | Python | mit | 335 |
# -*- coding: utf-8 -*-
"""Entry point for the CLI"""
from __future__ import division, absolute_import
import sys
import argparse
import datetime as dt
import numpy as np
import apexpy
try:
# Python 3
STDIN = sys.stdin.buffer
STDOUT = sys.stdout.buffer
except AttributeError:
# Python 2
STDIN = sys.stdin
STDOUT = sys.stdout
def main():
"""Entry point for the script"""
desc = 'Converts between geodetic, modified apex, quasi-dipole and MLT'
parser = argparse.ArgumentParser(description=desc, prog='apexpy')
parser.add_argument('source', metavar='SOURCE',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert from {geo, apex, qd, mlt}')
parser.add_argument('dest', metavar='DEST',
choices=['geo', 'apex', 'qd', 'mlt'],
help='Convert to {geo, apex, qd, mlt}')
desc = 'YYYY[MM[DD[HHMMSS]]] date/time for IGRF coefficients, time part '
desc += 'required for MLT calculations'
parser.add_argument('date', metavar='DATE', help=desc)
parser.add_argument('--height', dest='height', default=0, metavar='HEIGHT',
type=float, help='height for conversion')
parser.add_argument('--refh', dest='refh', metavar='REFH', type=float,
default=0,
help='reference height for modified apex coordinates')
parser.add_argument('-i', '--input', dest='file_in', metavar='FILE_IN',
type=argparse.FileType('r'), default=STDIN,
help='input file (stdin if none specified)')
parser.add_argument('-o', '--output', dest='file_out', metavar='FILE_OUT',
type=argparse.FileType('wb'), default=STDOUT,
help='output file (stdout if none specified)')
args = parser.parse_args()
array = np.loadtxt(args.file_in, ndmin=2)
if 'mlt' in [args.source, args.dest] and len(args.date) < 14:
desc = 'full date/time YYYYMMDDHHMMSS required for MLT calculations'
raise ValueError(desc)
if 9 <= len(args.date) and len(args.date) <= 13:
desc = 'full date/time must be given as YYYYMMDDHHMMSS, not ' \
+ 'YYYYMMDDHHMMSS'[:len(args.date)]
raise ValueError(desc)
datetime = dt.datetime.strptime(args.date,
'%Y%m%d%H%M%S'[:len(args.date) - 2])
A = apexpy.Apex(date=datetime, refh=args.refh)
lats, lons = A.convert(array[:, 0], array[:, 1], args.source, args.dest,
args.height, datetime=datetime)
np.savetxt(args.file_out, np.column_stack((lats, lons)), fmt='%.8f')
if __name__ == '__main__':
sys.exit(main())
| cmeeren/apexpy | src/apexpy/__main__.py | Python | mit | 2,748 |
import coremltools
folder = 'cnn_age_gender_models_and_data.0.0.2'
coreml_model = coremltools.converters.caffe.convert(
(folder + '/gender_net.caffemodel', folder + '/deploy_gender.prototxt'),
image_input_names = 'data',
class_labels = 'genders.txt'
)
coreml_model.author = 'Gil Levi and Tal Hassner'
coreml_model.license = 'Unknown'
coreml_model.short_description = 'Gender Classification using Convolutional Neural Networks'
coreml_model.input_description['data'] = 'An image with a face.'
coreml_model.output_description['prob'] = 'The probabilities for each gender, for the given input.'
coreml_model.output_description['classLabel'] = 'The most likely gender, for the given input.'
coreml_model.save('GenderNet.mlmodel')
| cocoa-ai/FacesVisionDemo | Convert/gender.py | Python | mit | 740 |
# coding=utf-8
"""
pyserial 简单测试(python2)
"""
import serial
import struct
import logging
def run_server():
ser = serial.Serial('COM3', 38400, timeout=0,
parity=serial.PARITY_EVEN,
rtscts=1)
s = ser.read(100)
print struct.unpack('!f', s[:4])
ser.write(struct.pack('!f', 111.111))
if __name__ == "__main__":
logging.basicConfig(filename='udp-server.log', filemode='a',
level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
run_server()
| Yuvv/LearnTestDemoTempMini | py-socket/NodeMonitor/pys.py | Python | mit | 641 |
import namespaces_lib as namespaces
import datasets_lib as datasets
import projects_lib as projects
import accounts_lib as accounts
import events_lib as events
import queries_lib as queries
import unittest
import common
from pprint import pprint
import time
teardown = False
class TestCase(unittest.TestCase):
accountId1 = None
accountId2 = None
accountId3 = None
accountId4 = None
projectId11 = None
projectId21 = None
projectId21 = None
projectId22 = None
def setUp(self):
common.remove_all_accounts()
self.accountId1, self.accountId2, self.session1, self.session2, self.projectId11, self.projectId12, self.projectId21, self.projectId22 = common.two_accounts_with_projects(None, None)
self.namespace = namespaces.create_namespace(name = "/a/b/c", projectId = self.projectId11, session = self.session1)
"""def tearDown(self):
if teardown == True:
if self.accountId1 != None:
accounts.delete_account(self.accountId1, self.session1)
if self.accountId2 != None:
accounts.delete_account(self.accountId2, self.session2)
if self.accountId3 != None:
accounts.delete_account(self.accountId3, self.session3)
if self.accountId4 != None:
accounts.delete_account(self.accountId4, self.session4)"""
def test_create_dataset(self):
# 1. ok
datasetId = datasets.create_basic_dataset(self.projectId11, self.session1, name = 'name1', namespace = "/a/b/c")
ret = datasets.get_dataset(datasetId, self.session1)
assert ret["datasetId"] == datasetId
assert ret["name"] == 'name1'
assert ret["primaryKey"] == "col1,col2"
#assert ret["namespace"] == "/a/b/c"
assert len(ret["columns"]) == 3
print(ret)
# 2. missing name
dataset = {
'projectId':self.projectId11
}
ret = datasets.create_dataset(dataset, self.session1, status = 400, error = "ErrBadRequest")
# 3. invalid name
dataset = {
'projectId':self.projectId11,
'name' : '012asdasd'
}
ret = datasets.create_dataset(dataset, self.session1, status = 400, error = "ErrBadRequest")
# 3. missing projectId
dataset = {
'name':'dilldall',
'projectId':None
}
ret = datasets.create_dataset(dataset, self.session1, status = 400, error = "ErrBadRequest")
# 4. already exists
ret = datasets.create_basic_dataset(self.projectId11, self.session1, name = 'name1')
# 5. no session
ret = datasets.create_basic_dataset(self.projectId11, session = None, status=401, error="ErrNotAuthorized")
# 6. wrong session
ret = datasets.create_basic_dataset(self.projectId11, session = self.session2, status=401, error="ErrNotAuthorized")
ret = datasets.create_basic_dataset(self.projectId21, session = self.session1, status=401, error="ErrNotAuthorized")
def test_create_dataset_upsert(self):
# 1. ok
dataset = {
'name':'adadadas',
'projectId':self.projectId11,
'columns': [
{'name':'col1', 'dataType': 'date'},
{'name':'col2', 'dataType': 'string'},
{'name':'col3', 'dataType': 'number'},
],
'primaryKey' : 'col1,col2',
'dateColumn' : 'col1',
'description' : 'adasd',
'updatemode' : 'upsert'
}
datasetId = datasets.create_dataset(dataset, self.session1)
def test_delete_dataset(self):
datasetId1, nouse = common.two_datasets(self.projectId11, self.session1, 'test_delete_dataset')
datasetId2, nouse = common.two_datasets(self.projectId21, self.session2, 'test_delete_dataset')
#1. no session, not ok
ret = datasets.delete_dataset(datasetId2, session = None, status=401, error="ErrNotAuthorized")
#2. wrong session, not ok
ret = datasets.delete_dataset(datasetId2, session = self.session1, status=401, error="ErrNotAuthorized")
#3. wrong session, not ok
ret = datasets.delete_dataset(datasetId1, session = self.session2, status=401, error="ErrNotAuthorized")
#4. ok
ret = datasets.delete_dataset(datasetId2, self.session2)
#5. delete again, not found
ret = datasets.delete_dataset(datasetId2, self.session2, status=404, error="ErrDatasetNotFound")
#6. ok
ret = datasets.delete_dataset(datasetId1, self.session1)
#7. delete again, not found
ret = datasets.delete_dataset(datasetId1, self.session1, status=404, error="ErrDatasetNotFound")
def test_get_dataset(self):
datasetId1, nouse = common.two_datasets(self.projectId11, self.session1, 'test_get_dataset')
datasetId2, nouse = common.two_datasets(self.projectId21, self.session2, 'test_get_dataset')
#1. no session, not ok
ret = datasets.get_dataset(datasetId2, session = None, status=401, error="ErrNotAuthorized")
#2. wrong session, not ok
ret = datasets.get_dataset(datasetId2, session = self.session1, status=401, error="ErrNotAuthorized")
#3. wrong session, not ok
ret = datasets.get_dataset(datasetId1, session = self.session2, status=401, error="ErrNotAuthorized")
#4. ok
ret = datasets.get_dataset(datasetId2, self.session2)
assert ret["name"] == "name1_" + self.projectId21 + "_test_get_dataset"
assert len(ret["columns"]) == 3
#6. ok
ret = datasets.get_dataset(datasetId1, self.session1)
assert ret["name"] == "name1_" + self.projectId11+ "_test_get_dataset"
assert len(ret["columns"]) == 3
def test_get_project_with_two_datasets(self):
projectId1, projectId2, datasetId12, datasetId12,datasetId22, datasetId22 = common.two_projects_with_datasets(self.accountId1, self.session1, 'test_get_project_with_two_datasets')
ret = projects.get_project(projectId1, self.session1)
pprint(ret)
assert ret["name"] == self.accountId1 + "_name1"
for ds in ret["datasets"]:
if ds["name"] == "__ingestion_log":
assert len(ds["columns"]) == 2
else:
assert len(ds["columns"]) == 3
def test_contributor(self):
account = accounts.get_account(self.accountId2, self.session2)
pprint(account)
ret = projects.add_contributor(self.projectId11, account["username"], session = self.session1)
project = projects.get_project(self.projectId11, self.session2)
#1. Create dataset, ok
datasetId = datasets.create_basic_dataset(self.projectId11, self.session2, name = 'session2datasetInProjectId11')
ret = datasets.get_dataset(datasetId, self.session2)
assert ret["datasetId"] == datasetId
assert ret["name"] == 'session2datasetInProjectId11'
assert ret["primaryKey"] == "col1,col2"
assert len(ret["columns"]) == 3
#2. Delete dataset, ok
ret = datasets.delete_dataset(datasetId, self.session2)
ret = datasets.get_dataset(datasetId, self.session2, 404, "ErrDatasetNotFound")
#3. Add columns
# TODO
def test_truncate_dataset(self):
projectId11 = self.projectId11
projectId21 = self.projectId21
session1 = self.session1
session2 = self.session2
datasetId1, nouse = common.two_datasets(projectId11, session1, 'test_truncate_dataset')
datasetId2, nouse = common.two_datasets(projectId21, session2, 'test_truncate_dataset')
datasetName1 = 'name1_' + projectId11 + '_test_truncate_dataset'
datasetName2 = 'name1_' + projectId21 + '_test_truncate_dataset'
common.dataset_with_events(datasetName1, projectId11)
common.dataset_with_events(datasetName2, projectId21)
time.sleep(10)
self.expect_data(datasetName1, projectId11, 10)
self.expect_data(datasetName2, projectId21, 10)
#1. no session, not ok
ret = datasets.truncate_dataset(datasetId2, session = None, status=401, error="ErrNotAuthorized")
#2. wrong session, not ok
ret = datasets.truncate_dataset(datasetId2, session = session1, status=401, error="ErrNotAuthorized")
#3. wrong session, not ok
ret = datasets.truncate_dataset(datasetId1, session = session2, status=401, error="ErrNotAuthorized")
#4. ok
ret = datasets.truncate_dataset(datasetId1, session1)
self.expect_data(datasetName1, projectId11, 0)
#5. ok
ret = datasets.truncate_dataset(datasetId2, session2)
self.expect_data(datasetName2, projectId21, 0)
#6. ok
ret = datasets.truncate_dataset(datasetId1, session1)
def test_create_same_dataset_in_diff_namespaces(self):
# 1. ok
datasetId1 = datasets.create_basic_dataset(self.projectId11, self.session1, name = 'name1', namespace = "/a/b/c")
datasetId2 = datasets.create_basic_dataset(self.projectId11, self.session1, name = 'name1', namespace = "/a/b")
ret = datasets.get_dataset(datasetId1, session = self.session1)
ret = datasets.get_dataset(datasetId2, session = self.session1)
def expect_data(self, datasetName, projectId, cnt):
query = {
"sql" : "select count(*) as cnt, col3 from " + datasetName + " where col3 between 10 and 20 GROUP BY col3",
"dataTypes": ["number"]
}
ret = queries.query(projectId, query)
print(ret)
if cnt > 0:
assert ret["results"][0][0] == str(cnt)
else:
assert len(ret["results"]) == 0
| ericmort/axelerator | backend/tests/test_datasets.py | Python | mit | 9,884 |
#!/usr/bin/env python
# Written by Greg Ver Steeg
# See readme.pdf for documentation
# Or go to http://www.isi.edu/~gregv/npeet.html
import scipy.spatial as ss
from scipy.special import digamma
from math import log
import numpy.random as nr
import numpy as np
import random
# CONTINUOUS ESTIMATORS
def entropy(x, k=3, base=2):
""" The classic K-L k-nearest neighbor continuous entropy estimator
x should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
d = len(x[0])
N = len(x)
intens = 1e-10 # small noise to break degeneracy, see doc.
x = [list(p + intens * nr.rand(len(x[0]))) for p in x]
tree = ss.cKDTree(x)
nn = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in x]
const = digamma(N) - digamma(k) + d * log(2)
return (const + d * np.mean(map(log, nn))) / log(base)
def centropy(x, y, k=3, base=2):
""" The classic K-L k-nearest neighbor continuous entropy estimator for the
entropy of X conditioned on Y.
"""
hxy = entropy([xi + yi for (xi, yi) in zip(x, y)], k, base)
hy = entropy(y, k, base)
return hxy - hy
def column(xs, i):
return [[x[i]] for x in xs]
def tc(xs, k=3, base=2):
xis = [entropy(column(xs, i), k, base) for i in range(0, len(xs[0]))]
return np.sum(xis) - entropy(xs, k, base)
def ctc(xs, y, k=3, base=2):
xis = [centropy(column(xs, i), y, k, base) for i in range(0, len(xs[0]))]
return np.sum(xis) - centropy(xs, y, k, base)
def corex(xs, ys, k=3, base=2):
cxis = [mi(column(xs, i), ys, k, base) for i in range(0, len(xs[0]))]
return np.sum(cxis) - mi(xs, ys, k, base)
def mi(x, y, k=3, base=2):
""" Mutual information of x and y
x, y should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x) == len(y), "Lists should have same length"
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
intens = 1e-10 # small noise to break degeneracy, see doc.
x = [list(p + intens * nr.rand(len(x[0]))) for p in x]
y = [list(p + intens * nr.rand(len(y[0]))) for p in y]
points = zip2(x, y)
# Find nearest neighbors in joint space, p=inf means max-norm
tree = ss.cKDTree(points)
dvec = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in points]
a, b, c, d = avgdigamma(x, dvec), avgdigamma(y, dvec), digamma(k), digamma(len(x))
return (-a - b + c + d) / log(base)
def cmi(x, y, z, k=3, base=2):
""" Mutual information of x and y, conditioned on z
x, y, z should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x) == len(y), "Lists should have same length"
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
intens = 1e-10 # small noise to break degeneracy, see doc.
x = [list(p + intens * nr.rand(len(x[0]))) for p in x]
y = [list(p + intens * nr.rand(len(y[0]))) for p in y]
z = [list(p + intens * nr.rand(len(z[0]))) for p in z]
points = zip2(x, y, z)
# Find nearest neighbors in joint space, p=inf means max-norm
tree = ss.cKDTree(points)
dvec = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in points]
a, b, c, d = avgdigamma(zip2(x, z), dvec), avgdigamma(zip2(y, z), dvec), avgdigamma(z, dvec), digamma(k)
return (-a - b + c + d) / log(base)
def kldiv(x, xp, k=3, base=2):
""" KL Divergence between p and q for x~p(x), xp~q(x)
x, xp should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
assert k <= len(xp) - 1, "Set k smaller than num. samples - 1"
assert len(x[0]) == len(xp[0]), "Two distributions must have same dim."
d = len(x[0])
n = len(x)
m = len(xp)
const = log(m) - log(n - 1)
tree = ss.cKDTree(x)
treep = ss.cKDTree(xp)
nn = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in x]
nnp = [treep.query(point, k, p=float('inf'))[0][k - 1] for point in x]
return (const + d * np.mean(map(log, nnp)) - d * np.mean(map(log, nn))) / log(base)
# DISCRETE ESTIMATORS
def entropyd(sx, base=2):
""" Discrete entropy estimator
Given a list of samples which can be any hashable object
"""
return entropyfromprobs(hist(sx), base=base)
def midd(x, y, base=2):
""" Discrete mutual information estimator
Given a list of samples which can be any hashable object
"""
return -entropyd(zip(x, y), base) + entropyd(x, base) + entropyd(y, base)
def cmidd(x, y, z):
""" Discrete mutual information estimator
Given a list of samples which can be any hashable object
"""
return entropyd(zip(y, z)) + entropyd(zip(x, z)) - entropyd(zip(x, y, z)) - entropyd(z)
def centropyd(x, y, base=2):
""" The classic K-L k-nearest neighbor continuous entropy estimator for the
entropy of X conditioned on Y.
"""
return entropyd(zip(x, y), base) - entropyd(y, base)
def tcd(xs, base=2):
xis = [entropyd(column(xs, i), base) for i in range(0, len(xs[0]))]
hx = entropyd(xs, base)
return np.sum(xis) - hx
def ctcd(xs, y, base=2):
xis = [centropyd(column(xs, i), y, base) for i in range(0, len(xs[0]))]
return np.sum(xis) - centropyd(xs, y, base)
def corexd(xs, ys, base=2):
cxis = [midd(column(xs, i), ys, base) for i in range(0, len(xs[0]))]
return np.sum(cxis) - midd(xs, ys, base)
def hist(sx):
sx = discretize(sx)
# Histogram from list of samples
d = dict()
for s in sx:
if type(s) == list:
s = tuple(s)
d[s] = d.get(s, 0) + 1
return map(lambda z: float(z) / len(sx), d.values())
def entropyfromprobs(probs, base=2):
# Turn a normalized list of probabilities of discrete outcomes into entropy (base 2)
return -sum(map(elog, probs)) / log(base)
def elog(x):
# for entropy, 0 log 0 = 0. but we get an error for putting log 0
if x <= 0. or x >= 1.:
return 0
else:
return x * log(x)
# MIXED ESTIMATORS
def micd(x, y, k=3, base=2, warning=True):
""" If x is continuous and y is discrete, compute mutual information
"""
overallentropy = entropy(x, k, base)
n = len(y)
word_dict = dict()
for i in range(len(y)):
if type(y[i]) == list:
y[i] = tuple(y[i])
for sample in y:
word_dict[sample] = word_dict.get(sample, 0) + 1. / n
yvals = list(set(word_dict.keys()))
mi = overallentropy
for yval in yvals:
xgiveny = [x[i] for i in range(n) if y[i] == yval]
if k <= len(xgiveny) - 1:
mi -= word_dict[yval] * entropy(xgiveny, k, base)
else:
if warning:
print("Warning, after conditioning, on y=", yval, " insufficient data. Assuming maximal entropy in this case.")
mi -= word_dict[yval] * overallentropy
return np.abs(mi) # units already applied
def midc(x, y, k=3, base=2, warning=True):
return micd(y, x, k, base, warning)
def centropydc(x, y, k=3, base=2, warning=True):
return entropyd(x, base) - midc(x, y, k, base, warning)
def centropycd(x, y, k=3, base=2, warning=True):
return entropy(x, k, base) - micd(x, y, k, base, warning)
def ctcdc(xs, y, k=3, base=2, warning=True):
xis = [centropydc(column(xs, i), y, k, base, warning) for i in range(0, len(xs[0]))]
return np.sum(xis) - centropydc(xs, y, k, base, warning)
def ctccd(xs, y, k=3, base=2, warning=True):
xis = [centropycd(column(xs, i), y, k, base, warning) for i in range(0, len(xs[0]))]
return np.sum(xis) - centropycd(xs, y, k, base, warning)
def corexcd(xs, ys, k=3, base=2, warning=True):
cxis = [micd(column(xs, i), ys, k, base, warning) for i in range(0, len(xs[0]))]
return np.sum(cxis) - micd(xs, ys, k, base, warning)
def corexdc(xs, ys, k=3, base=2, warning=True):
#cxis = [midc(column(xs, i), ys, k, base, warning) for i in range(0, len(xs[0]))]
#joint = midc(xs, ys, k, base, warning)
#return np.sum(cxis) - joint
return tcd(xs, base) - ctcdc(xs, ys, k, base, warning)
# UTILITY FUNCTIONS
def vectorize(scalarlist):
""" Turn a list of scalars into a list of one-d vectors
"""
return [[x] for x in scalarlist]
def shuffle_test(measure, x, y, z=False, ns=200, ci=0.95, **kwargs):
""" Shuffle test
Repeatedly shuffle the x-values and then estimate measure(x, y, [z]).
Returns the mean and conf. interval ('ci=0.95' default) over 'ns' runs.
'measure' could me mi, cmi, e.g. Keyword arguments can be passed.
Mutual information and CMI should have a mean near zero.
"""
xp = x[:] # A copy that we can shuffle
outputs = []
for i in range(ns):
random.shuffle(xp)
if z:
outputs.append(measure(xp, y, z, **kwargs))
else:
outputs.append(measure(xp, y, **kwargs))
outputs.sort()
return np.mean(outputs), (outputs[int((1. - ci) / 2 * ns)], outputs[int((1. + ci) / 2 * ns)])
# INTERNAL FUNCTIONS
def avgdigamma(points, dvec):
# This part finds number of neighbors in some radius in the marginal space
# returns expectation value of <psi(nx)>
N = len(points)
tree = ss.cKDTree(points)
avg = 0.
for i in range(N):
dist = dvec[i]
# subtlety, we don't include the boundary point,
# but we are implicitly adding 1 to kraskov def bc center point is included
num_points = len(tree.query_ball_point(points[i], dist - 1e-15, p=float('inf')))
avg += digamma(num_points) / N
return avg
def zip2(*args):
# zip2(x, y) takes the lists of vectors and makes it a list of vectors in a joint space
# E.g. zip2([[1], [2], [3]], [[4], [5], [6]]) = [[1, 4], [2, 5], [3, 6]]
return [sum(sublist, []) for sublist in zip(*args)]
def discretize(xs):
def discretize_one(x):
if len(x) > 1:
return tuple(x)
else:
return x[0]
# discretize(xs) takes a list of vectors and makes it a list of tuples or scalars
return [discretize_one(x) for x in xs]
if __name__ == "__main__":
print("NPEET: Non-parametric entropy estimation toolbox. See readme.pdf for details on usage.")
| sethuiyer/mlhub | Borda Count/entropy_estimators.py | Python | mit | 10,429 |
# Copyright 2014 John Reese
# Licensed under the MIT license
from app import app
from mc import mc, mcdict, Cacheable
import encoder
from routing import context, api, get, post
from template import template
| jreese/seinfeld | core/__init__.py | Python | mit | 209 |
# Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
#
# For example, given the array [-2,1,-3,4,-1,2,1,-5,4],
# the contiguous subarray [4,-1,2,1] has the largest sum = 6.
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
dp = [0] * n
res = nums[0]
dp[0] = nums[0]
for i in range(1, n):
dp[i] = nums[i] + (dp[i-1] if dp[i-1] > 0 else 0)
res = max(res, dp[i])
return res
# max_so_far = nums[0]
# max_ending_here = nums[0]
# for num in nums[1:]:
# max_ending_here = max(max_ending_here + num, num)
# max_so_far = max(max_so_far, max_ending_here)
# return max_so_far
def maxSubArrayDC(self, nums):
n = len(nums)
if n > 1:
l = self.maxSubArrayDC(nums[:n / 2])
r = self.maxSubArrayDC(nums[n / 2:])
s = self.span(nums)
return max(l, r, s)
else:
return nums[0]
def span(self, nums):
n = len(nums)
r1 = nums[n / 2]
r2 = r1
for num in nums[(n / 2) + 1:]:
r2 += num
r1 = max(r1, r2)
l1 = nums[(n / 2) - 1]
l2 = l1
for num in nums[:(n / 2) - 1][::-1]:
l2 += num
l1 = max(l1, l2)
return r1 + l1
# Note:
# Code is self explanatory
| jigarkb/Programming | LeetCode/053-E-MaximumSubarray.py | Python | mit | 1,509 |
# Copyright 2018 Google LLC
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
import cipher
class W32(cipher.ARXCipher):
_word_bytes = 4
_byteorder = 'little'
w32 = W32()
class W64(cipher.ARXCipher):
_word_bytes = 8
_byteorder = 'little'
w64 = W64()
class NH(cipher.Cipher):
def __init__(self):
super().__init__()
self.choose_variant(lambda x: True)
def variant_name(self):
return self.name()
def variants(self):
yield {
'cipher': 'NH',
'passes': 4,
'word_bytes': w32._word_bytes,
'stride': 2,
'unitcount': 64}
def lengths(self):
v = self.variant
unit = v['word_bytes'] * v['stride'] * 2
return {
'unit': unit,
'messagemax': unit * v['unitcount'],
'key': unit * (v['unitcount'] + v['passes'] - 1),
'hash': v['word_bytes'] * v['passes']
}
def test_input_lengths(self):
v = self.lengths()
for l in [v['unit'], v['messagemax'] - v['unit'], v['messagemax']]:
yield {'key': v['key'], 'message': l}
def make_testvector(self, input, description):
return {
'cipher': self.variant,
'description': description,
'input': input,
'hash': self.nh(**input),
}
def check_testvector(self, tv):
self.variant = tv['cipher']
assert tv['hash'] == self.nh(**tv['input'])
def _nhpass(self, key, message):
stride = self.variant['stride']
return w64._mod(sum(
w32._mod(message[j] + key[j])
* w32._mod(message[j + stride] + key[j + stride])
for i in range(0, len(message), stride * 2)
for j in range(i, i + stride)
))
def _nh_vec(self, key, message):
step = self.variant['stride'] * 2
return [self._nhpass(key[off:off + len(message)], message)
for off in range(0, step * self.variant['passes'], step)]
def nh(self, key, message):
lengths = self.lengths()
assert len(message) > 0
assert len(message) <= lengths['messagemax']
assert len(message) % lengths['unit'] == 0
assert len(key) == lengths['key']
key = w32._to_ints(key)
message = w32._to_ints(message)
return w64._from_ints(self._nh_vec(key, message))
| google/adiantum | python/nh.py | Python | mit | 2,501 |
# coding: utf-8
from fabkit import task, parallel
from fablib.openstack import Horizon
horizon = Horizon()
@task
@parallel
def setup():
horizon.setup()
return {'status': 1}
@task
def restart():
horizon.restart_services()
| syunkitada/fabkit-repo | fabscript/openstack/horizon.py | Python | mit | 240 |
__author__ = 'Jiu Moon'
if __name__ == '__main__':
<<<<<<< HEAD
print("hi")
=======
helper.greeting('develop')
>>>>>>> develop
| jkm4ca/cs3240-labdemo | dev.py | Python | mit | 130 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Import Command
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from pyknackhq.py23compatible import (
_str_type, _int_types, _number_types, is_py3)
"""
import sys
if sys.version_info[0] == 3:
_str_type = str
_int_types = (int,)
_number_types = (int, float)
is_py3 = True
else:
_str_type = basestring
_int_types = (int, long)
_number_types = (int, long, float)
is_py3 = False | MacHu-GWU/pyknackhq-project | pyknackhq/py23compatible.py | Python | mit | 496 |
"""
Direct.py
Permalink views
"""
from ViewHandler import Handler
from models.Word import Word
import logging
import Tools
class DirectParent(Handler):
"""Parent class for Permalink pages"""
def get(self, word_id):
try:
p = Word.get_by_id(int(word_id))
if p:
logging.info(p)
self.render("direct.html", **{"input_str":p.word_str, "use_css":self.use_css, "word_id":word_id, "is_up_to_date":Tools.isUpToDate(self),})
else:
self.redirect(self.error_page)
except Exception, e:
#logger.debug(e)
self.redirect(self.error_page)
class DirectLinkHandler(DirectParent):
"""Standard Permalink"""
use_css = True
error_page = "/wordnotfound"
class CSSFreeDirectLinkHandler(DirectParent):
"""CSS Free Permalink"""
use_css = False
error_page = "/nocss/wordnotfound" | adtraub/isthatawordadam | views/Direct.py | Python | mit | 921 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from unittest import mock
from preggy import expect
import thumbor.metrics
from tests.base import TestCase
from thumbor.importer import Importer
class DefaultMetricsTestCase(TestCase):
def get_importer(self):
importer = Importer(self.config)
importer.import_modules()
return importer
def test_can_create_context_with_default_metrics(self):
expect(self.context).not_to_be_null()
expect(self.context.metrics).to_be_instance_of(
thumbor.metrics.logger_metrics.Metrics
)
@mock.patch("thumbor.metrics.BaseMetrics.initialize")
def test_can_initizalize_when_request_comes(self, mocked_initialize):
expect(mocked_initialize.call_count).to_equal(0)
self.fetch("/unsafe/smart/image.jpg")
expect(mocked_initialize.call_count).to_equal(1)
| thumbor/thumbor | tests/metrics/test_default_metrics.py | Python | mit | 1,085 |
import urllib2
import re
import bs4
from django.core.management.base import BaseCommand
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.db import DatabaseError
from main.models import Country, CIAWFBEntry
VERBOSE = True
field_data_codes = {'administrative_divisions': '2051',
'age_structure': '2010',
'agriculture_products': '2052',
'airports': '2053',
'airports_with_paved_runways': '2030',
'airports_with_unpaved_runways': '2031',
'area': '2147',
'area_comparative': '2023',
'background': '2028',
'birth_rate': '2054',
'broadcast_media': '2213',
'budget': '2056',
'budget_surplus_or_deficit': '2222',
'capital': '2057',
'carbon_dioxide_emissions_from_consumption_of_energy': '2254',
'central_bank_discount_rate': '2207',
'children_under_the_age_of_5_years_underweight': '2224',
'climate': '2059',
'coastline': '2060',
'commercial_bank_prime_lending_rate': '2208',
'communications_note': '2138',
'constitution': '2063',
'country_name': '2142',
'crude_oil_exports': '2242',
'crude_oil_imports': '2243',
'crude_oil_production': '2241',
'crude_oil_proved_reserves': '2244',
'current_account_balance': '2187',
'death_rate': '2066',
'debt_external': '2079',
'demographic_profile': '2257',
'dependency_status': '2006',
'dependent_areas': '2068',
'diplomatic_representation_from_the_us': '2007',
'diplomatic_representation_in_the_us': '2149',
'disputes_international': '2070',
'distribution_of_family_income_gini_index': '2172',
'drinking_water_source': '2216',
'economy_overview': '2116',
'education_expenditures': '2206',
'electricity_consumption': '2233',
'electricity_exports': '2234',
'electricity_from_fossil_fuels': '2237',
'electricity_from_hydroelectric_plants': '2238',
'electricity_from_nuclear_fuels': '2239',
'electricity_from_other_renewable_sources': '2240',
'electricity_imports': '2235',
'electricity_installed_generating_capacity': '2236',
'electricity_production': '2232',
'elevation_extremes': '2020',
'environment_current_issues': '2032',
'environment_international_agreements': '2033',
'ethnic_groups': '2075',
'exchange_rates': '2076',
'executive_branch': '2077',
'exports': '2078',
'exports_commodities': '2049',
'exports_partners': '2050',
'fiscal_year': '2080',
'flag_description': '2081',
'freshwater_withdrawal_domesticindustrialagricultural': '2202',
'gdp_official_exchange_rate': '2195',
'gdp_purchasing_power_parity': '2001',
'gdp_composition_by_sector': '2012',
'gdp_per_capita_ppp': '2004',
'gdp_real_growth_rate': '2003',
'geographic_coordinates': '2011',
'geography_note': '2113',
'government_note': '2140',
'government_type': '2128',
'health_expenditures': '2225',
'heliports': '2019',
'hivaids_adult_prevalence_rate': '2155',
'hivaids_deaths': '2157',
'hivaids_people_living_with_hivaids': '2156',
'hospital_bed_density': '2227',
'household_income_or_consumption_by_percentage_share': '2047',
'illicit_drugs': '2086',
'imports': '2087',
'imports_commodities': '2058',
'imports_partners': '2061',
'independence': '2088',
'industrial_production_growth_rate': '2089',
'industries': '2090',
'infant_mortality_rate': '2091',
'inflation_rate_consumer_prices': '2092',
'international_law_organization_participation': '2220',
'international_organization_participation': '2107',
'internet_country_code': '2154',
'internet_hosts': '2184',
'internet_users': '2153',
'investment_gross_fixed': '2185',
'irrigated_land': '2146',
'judicial_branch': '2094',
'labor_force': '2095',
'labor_force_by_occupation': '2048',
'land_boundaries': '2096',
'land_use': '2097',
'languages': '2098',
'legal_system': '2100',
'legislative_branch': '2101',
'life_expectancy_at_birth': '2102',
'literacy': '2103',
'location': '2144',
'major_cities_population': '2219',
'major_infectious_diseases': '2193',
'manpower_available_for_military_service': '2105',
'manpower_fit_for_military_service': '2025',
'manpower_reaching_militarily_significant_age_annually': '2026',
'map_references': '2145',
'maritime_claims': '2106',
'market_value_of_publicly_traded_shares': '2200',
'maternal_mortality_rate': '2223',
'median_age': '2177',
'merchant_marine': '2108',
'military_note': '2137',
'military_branches': '2055',
'military_expenditures': '2034',
'military_service_age_and_obligation': '2024',
'national_anthem': '2218',
'national_holiday': '2109',
'national_symbols': '2230',
'nationality': '2110',
'natural_gas_consumption': '2250',
'natural_gas_exports': '2251',
'natural_gas_imports': '2252',
'natural_gas_production': '2249',
'natural_gas_proved_reserves': '2253',
'natural_hazards': '2021',
'natural_resources': '2111',
'net_migration_rate': '2112',
'obesity_adult_prevalence_rate': '2228',
'people_note': '2022',
'physicians_density': '2226',
'pipelines': '2117',
'political_parties_and_leaders': '2118',
'political_pressure_groups_and_leaders': '2115',
'population': '2119',
'population_below_poverty_line': '2046',
'population_growth_rate': '2002',
'ports_and_terminals': '2120',
'public_debt': '2186',
'railways': '2121',
'refined_petroleum_products_consumption': '2246',
'refined_petroleum_products_exports': '2247',
'refined_petroleum_products_imports': '2248',
'refined_petroleum_products_production': '2245',
'refugees_and_internally_displaced_persons': '2194',
'religions': '2122',
'reserves_of_foreign_exchange_and_gold': '2188',
'roadways': '2085',
'sanitation_facility_access': '2217',
'school_life_expectancy_primary_to_tertiary_education': '2205',
'sex_ratio': '2018',
'stock_of_broad_money': '2215',
'stock_of_direct_foreign_investment_abroad': '2199',
'stock_of_direct_foreign_investment_at_home': '2198',
'stock_of_domestic_credit': '2211',
'stock_of_narrow_money': '2214',
'suffrage': '2123',
'taxes_and_other_revenues': '2221',
'telephone_system': '2124',
'telephones_main_lines_in_use': '2150',
'telephones_mobile_cellular': '2151',
'terrain': '2125',
'total_fertility_rate': '2127',
'total_renewable_water_resources': '2201',
'trafficking_in_persons': '2196',
'transportation_note': '2008',
'unemployment_rate': '2129',
'unemployment_youth_ages_15_24': '2229',
'urbanization': '2212',
'waterways': '2093'}
class Command(BaseCommand):
help = 'Updates Country Data from CIA World Factbook'
def handle(self, *args, **options):
def extract_field_data(field_name, field_url):
""" Note: Requires HTML5 Library: pip intall html5lib
"""
country_attribute_list = {}
rootURL = "https://www.cia.gov/library/publications/the-world-factbook/fields/"
fullURL = rootURL + field_url + '.html'
soup = bs4.BeautifulSoup(urllib2.urlopen(fullURL).read())
tables = soup.find_all('table', width="638")
for table in tables:
try:
country = table.find('a', href=re.compile('geos')).text.strip()
except AttributeError:
continue
try:
field_value = table.find('td', class_="category_data").text.strip()
except AttributeError:
continue
country_attribute_list[country] = field_value
return country_attribute_list
def write_field_data_to_db(field_name, field_data):
for country_name in field_data.keys():
# get country if it exists; create it if it doesn't.
country_slug = slugify(country_name)
try:
country = Country.objects.get(url_name=country_slug)
except Country.DoesNotExist:
country = Country(url_name=country_slug)
country.CIAWFB_name_short = country_name
country.save()
# Get CIA WFB Entry if it exists; create it if it doesn't.
try:
CIAWFB_object = CIAWFBEntry.objects.get(country__id=country.id)
except CIAWFBEntry.DoesNotExist:
CIAWFB_object = CIAWFBEntry(country=country, date_entered=timezone.now())
CIAWFB_object.save()
# Now update the field we've got for that CIAWFB entry
db_name = slugify(field_name).replace('-', '_')
try:
setattr(CIAWFB_object, db_name, field_data[country_name])
CIAWFB_object.save()
except DatabaseError:
print('Unable to write field "%s" (country "%s"). Size to write was %s.' %
(db_name, country_name, len(field_data[country_name])))
longest_field = 0
for cname in field_data.keys():
len_data = len(field_data[cname])
if len_data > longest_field:
longest_field = len_data
print("Field: %s; Max Length: %s" % (field_name, longest_field))
raise DatabaseError
for field_name in sorted(field_data_codes.keys()):
if VERBOSE:
print('Processing field: %s' % field_name)
field_data = extract_field_data(field_name, field_data_codes[field_name])
write_field_data_to_db(field_name, field_data)
| johnmarkschofield/country-data.org | countrydata/main/management/commands/updatecountries.py | Python | mit | 12,640 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@author: sharpdeep
@file:webwxapi.py
@time: 2016-02-13 15:47
"""
import inspect
import time
import re
import json
import random,sys,os
import subprocess
import qrcode
from bs4 import BeautifulSoup
from urllib import request, parse
from http.cookiejar import CookieJar
from WxRobot.message import MESSAGE_TYPES, UnKnownMessage
from WxRobot.reply import WeChatReply,TextReply
QRCODE_PATH = os.path.join(os.getcwd(), 'qrcode.jpg')
class WebWxAPI(object):
message_types_dict = {
1: 'text', # 文本消息
3: 'image', # 图片消息
34: 'voice', # 语音消息
42: 'recommend', #名片
48: 'sharelocation', # 位置共享
51: 'initmsg', # 微信初始化消息
62: 'video', # 小视频
10002: 'revoke', #撤回消息
}
message_types = message_types_dict.values()
def __str__(self):
description = \
"=========================\n" + \
"[#] Web Weixin\n" + \
"[#] Debug Mode: " + str(self.DEBUG) + "\n" + \
"[#] Uuid: " + self.uuid + "\n" + \
"[#] Uin: " + str(self.uin) + "\n" + \
"[#] Sid: " + self.sid + "\n" + \
"[#] Skey: " + self.skey + "\n" + \
"[#] DeviceId: " + self.deviceId + "\n" + \
"[#] PassTicket: " + self.pass_ticket + "\n" + \
"========================="
return description
def __init__(self):
self.DEBUG = False
self.appid = 'wx782c26e4c19acffb'
self.uuid = ''
self.base_uri = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.deviceId = 'e' + repr(random.random())[2:17]
self.BaseRequest = {}
self.synckey = ''
self.SyncKey = []
self.User = []
self.MemberList = []
self.ContactList = []
self.GroupList = []
self.autoReplyMode = False
self.syncHost = ''
self._handlers = dict((k, []) for k in self.message_types)
self._handlers['location'] = []
self._handlers['all'] = []
self._filters = dict()
opener = request.build_opener(request.HTTPCookieProcessor(CookieJar()))
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36'),
('Referer','https://wx2.qq.com/')]
request.install_opener(opener)
def getUUID(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': self.appid,
'fun': 'new',
'lang': 'zh_CN',
'_': int(time.time()),
}
data = self._post(url, params, False)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
result = re.search(regx, data)
if result:
code = result.group(1)
self.uuid = result.group(2)
return code == '200'
return False
def genQRCode(self):
if sys.platform.find('win') >= 0:
self._genQRCodeImg()
self._safe_open(QRCODE_PATH)
else:
mat = self._str2QRMat('https://login.weixin.qq.com/l/' + self.uuid)
self._printQR(mat)
def waitForLogin(self, tip=1):
data = self._get('https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, self.uuid, int(time.time())))
result = re.search(r'window.code=(\d+);', data)
code = result.group(1)
if code == '201': # 扫描成功
return True
elif code == '200': # 登陆成功
result = re.search(r'window.redirect_uri="(\S+?)";', data)
r_uri = result.group(1) + '&fun=new'
self.redirect_uri = r_uri
self.base_uri = r_uri[:r_uri.rfind('/')]
return True
elif code == '408': # 登陆超时
return False, '登陆超时'
else: # 登陆异常
return False, '登陆异常'
def login(self):
data = self._get(self.redirect_uri)
soup = BeautifulSoup(data, "html.parser")
self.skey = soup.skey.text
self.sid = soup.wxsid.text
self.uin = soup.wxuin.text
self.pass_ticket = soup.pass_ticket.text
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.BaseRequest = {
'Uin': int(self.uin),
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.deviceId,
}
return True
def webwxinit(self):
url = self.base_uri + '/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (self.pass_ticket, self.skey, int(time.time()))
params = {
'BaseRequest': self.BaseRequest
}
dic = self._post(url, params)
self.SyncKey = dic['SyncKey']
self.User = dic['User']
# synckey for synccheck
self.synckey = '|'.join([str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic['BaseResponse']['Ret'] == 0
def webwxstatusnotify(self):
url = self.base_uri + '/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Code": 3,
"FromUserName": self.User['UserName'],
"ToUserName": self.User['UserName'],
"ClientMsgId": int(time.time())
}
dic = self._post(url, params)
return dic['BaseResponse']['Ret'] == 0
def webwxgetcontact(self):
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
dic = self._post(url, {})
self.MemberList = dic['MemberList']
ContactList = self.MemberList[:]
for i in range(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
if Contact['VerifyFlag'] & 8 != 0: # 公众号/服务号
ContactList.remove(Contact)
elif Contact['UserName'] in specialUsers: # 特殊账号
ContactList.remove(Contact)
elif Contact['UserName'].find('@@') != -1: # 群聊
self.GroupList.append(Contact)
ContactList.remove(Contact)
elif Contact['UserName'] == self.User['UserName']: # 自己
ContactList.remove(Contact)
self.ContactList = ContactList
return True
def webwxgetbatchcontact(self,groupid):
if groupid[:2] != '@@':
return None
url = self.base_uri + '/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": len(self.GroupList),
"List": [{"UserName": groupid, "EncryChatRoomId": ""}]
}
dic = self._post(url, params)
group = dic['ContactList'][0]
# 群联系人(todo)
return group
def webwxsendtextmsg(self,text,to = 'filehelper'):
url = self.base_uri + '/webwxsendmsg?pass_ticket=%s' % (self.pass_ticket)
clientMsgId = str(int(time.time()*1000)) + str(random.random())[:5].replace('.','')
params = {
'BaseRequest': self.BaseRequest,
'Msg': {
"Type": 1,
"Content": text,
"FromUserName": self.User['UserName'],
"ToUserName": to,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
# headers = {'content-type': 'application/json; charset=UTF-8'}
# data = json.dumps(params, ensure_ascii=False).encode('utf8')
# r = requests.post(url, data = data, headers = headers)
# dic = r.json()
# return dic['BaseResponse']['Ret'] == 0
dic = self._post(url,params)
return dic['BaseResponse']['Ret'] == 0
def webwxgeticon(self,id):
url = self.base_uri + '/webwxgeticon?username=%s&skey=%s' % (id, self.skey)
data = self._get(url,byte_ret=True)
icon_path = os.path.join(os.getcwd(),'icon_'+id+'.jpg')
with open(icon_path,'wb') as f:
f.write(data)
return icon_path
def webwxgetheading(self,id):
url = self.base_uri + '/webwxgetheadimg?username=%s&skey=%s' % (id, self.skey)
data = self._get(url,byte_ret=True)
head_path = os.path.join(os.getcwd(),'head_'+id+'.jpg')
with open(head_path,'wb') as f:
f.write(data)
return head_path
def webwxgetmsgimg(self,msgid):
url = self.base_uri + '/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
data = self._get(url,byte_ret=True)
return self._save_file(data,'msgimg_' + msgid + '.jpg')
def webwxgetmsgvideo(self,msgid):
url = self.base_uri + '/webwxgetvideo?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url,byte_ret=True)
return self._save_file(data,'msgvideo_'+msgid+'.mp4')
def webwxgetmsgvoice(self,msgid):
url = self.base_uri + '/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url,byte_ret=True)
return self._save_file(data,'msgvoice_'+msgid+'.mp3')
def testsynccheck(self):
syncHost = [
'webpush.weixin.qq.com',
'webpush1.weixin.qq.com',
'webpush2.weixin.qq.com',
'webpush.wechat.com',
'webpush1.wechat.com',
'webpush2.wechat.com',
'webpush1.wechatapp.com',
]
for host in syncHost:
self.syncHost = host
[retcode, selector] = self.synccheck()
if self.DEBUG:
print('[*] test',host,'->',retcode)
if retcode == '0': return True
return False
def synccheck(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.deviceId,
'synckey': self.synckey,
'_': int(time.time()),
}
url = 'https://' + self.syncHost + '/cgi-bin/mmwebwx-bin/synccheck?' + parse.urlencode(params)
data = self._get(url)
pm = re.search(r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
def webwxsync(self):
url = self.base_uri + '/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'SyncKey': self.SyncKey,
'rr': ~int(time.time())
}
dic = self._post(url, params)
if self.DEBUG:
print(json.dumps(dic, indent=4))
if dic['BaseResponse']['Ret'] == 0:
self.SyncKey = dic['SyncKey']
self.synckey = '|'.join([str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic
def sendTextMsg(self,name,text,isfile = False):
id = self.getUserId(name)
if id:
if self.webwxsendtextmsg(text,id):
return True,None
else:
return False,'api调用失败'
else:
return False,'用户不存在'
def listenMsgLoop(self, onExit, onMsgReceive, onPhoneInteract,onIdle,onSyncError):
# 测试同步线路
if not self.testsynccheck():
onSyncError()
while True:
[retcode, selector] = self.synccheck()
if retcode == '1100':
onExit()
break
elif retcode == '0':
if selector == '2':
onMsgReceive()
syncRet = self.webwxsync()
if syncRet is not None:
self.handleMsg(syncRet)
elif selector == '7':
onPhoneInteract()
elif selector == '0':
onIdle()
time.sleep(1)
def handleMsg(self, syncret):
for msg in syncret['AddMsgList']:
message = self._process_message(msg)
handlers = self.get_handler(message.type)
for handler, args_count in handlers:
#filte message
filters = self.get_filter(handler)
is_match = self._filte(message,*filters)
if not is_match:
continue
args = [message, ][:args_count]
reply = handler(*args)
if reply:
self._process_reply(reply,message)
def getUserRemarkName(self, id):
name = '未知群' if id[:2] == '@@' else '陌生人'
if id in specialUsers:
return specialUsersDict[id]
for member in self.MemberList:
if self.User['UserName'] == id: # 自己
return '我'
if member['UserName'] == id:
name = member['RemarkName'] if member['RemarkName'] else member['NickName']
return name
if id[:2] == '@@': #没加入通讯录的群
newGroup = self.webwxgetbatchcontact(id)
if not newGroup['RemarkName'] and not newGroup['NickName']:
return '未命名群'
self.GroupList.append(newGroup)
name = newGroup['RemarkName'] if newGroup['RemarkName'] else newGroup['NickName']
return name
return name
def getUserId(self,name):
for member in self.MemberList:
if name == member['RemarkName'] or name == member['NickName']:
return member['UserName']
return None
def createChatroom(self,userNames):
memberList = [{'UserName':username} for username in userNames]
url = self.base_uri + '/webwxcreatechatroom?pass_ticket=%s&r=%s' % (self.pass_ticket,int(time.time()))
params = {
'BaseRequest':self.BaseRequest,
'MemberCount':len(memberList),
'MemberList':memberList,
'Topic':'',
}
dic = self._post(url = url, params = params)
state = True if dic['BaseResponse']['Ret'] == 0 else False
errMsg = dic['BaseResponse']['ErrMsg']
chatRoomName = dic['ChatRoomName']
memberList = dic['MemberList']
deletedList = []
blockedList = []
for member in memberList:
if member['MemberStatus'] == 4: #被对方删除了
deletedList.append(member['UserName'])
elif member['MemberStatus'] == 3: #被加入黑名单
blockedList.append(member['UserName'])
return state,errMsg,chatRoomName,deletedList,blockedList
def addChatroomMember(self,chatRoomName,userNames):
url = self.base_uri + '/webwxupdatechatroom?fun=addmember&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'ChatRoomName': chatRoomName,
'AddMemberList': ','.join(userNames),
}
dic = self._post(url,params)
state = True if dic['BaseResponse']['Ret'] == 0 else False
errMsg = dic['BaseResponse']['ErrMsg']
memberList = dic['MemberList']
deletedList = []
blockedList = []
for member in memberList:
if member['MemberStatus'] == 4: #被对方删除了
deletedList.append(member['UserName'])
elif member['MemberStatus'] == 3: #被加入黑名单
blockedList.append(member['UserName'])
return state,errMsg,deletedList,blockedList
def delChatroomMember(self,chatRoomName,userNames):
url = self.base_uri + '/webwxupdatechatroom?fun=delmember&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'ChatRoomName': chatRoomName,
'DelMemberList': ','.join(userNames),
}
dic = self._post(url,params)
return dic['BaseResponse']['Ret'] == 0
def getBatchMemberRemarkName(self,groupid,memberid):
name = '陌生人'
for group in self.GroupList:
if group['UserName'] == groupid:
for member in group['MemberList']:
if member['UserName'] == memberid:
name = member['DisplayName'] if member['DisplayName'] else member['NickName']
return name
new_group = self.webwxgetbatchcontact(groupid)
if new_group:
for member in new_group['MemberList']:
if member['UserName'] == memberid:
name = member['DisplayName'] if member['DisplayName'] else member['NickName']
return name
return name
def _process_message(self, message):
message['type'] = self.message_types_dict.get(message.pop('MsgType'), None)
message['FromUserId'] = message.get('FromUserName',None)
message['FromUserName'] = self.getUserRemarkName(message.pop('FromUserName'))
message['ToUserId'] = message.get('ToUserName',None)
message['ToUserName'] = self.getUserRemarkName(message.pop('ToUserName'))
message['Content'] = message.pop('Content').replace('<br/>', '\n').replace('<', '<').replace('>', '>')
if message['FromUserId'][:2] == '@@': #群消息
fromMemberId = message['Content'].split(':')[0]
message['FromMemberId'] = fromMemberId
message['FromMemberName'] = self.getBatchMemberRemarkName(message['FromUserId'],fromMemberId)
message['Content'] = ''.join(message['Content'].split(':')[1:])
if message['type'] == 'text' and message['Content'].find( #位置消息
'http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
message['type'] = 'location'
data = self._get(message['Content'],encoding='gbk')
location = self._searchContent('title',data,fmat='xml')
message['location'] = location
message_type = MESSAGE_TYPES.get(message['type'], UnKnownMessage)
return message_type(self,message)
def _process_reply(self,reply,message):
if isinstance(reply,tuple):
for r in reply:
self._process_reply(r,message)
elif isinstance(reply,str):
self.sendTextMsg(message.fromUserName,reply)
elif isinstance(reply,WeChatReply):
if isinstance(reply,TextReply): #文本回复
self.sendTextMsg(message.fromUserName,reply.content)
else:
raise TypeError('your reply is a %s,reply should be str or WechatReply instance'%type(reply))
def _str2QRMat(self, str):
qr = qrcode.QRCode()
qr.border = 1
qr.add_data(str)
mat = qr.get_matrix()
return mat
def _genQRCodeImg(self):
url = 'https://login.weixin.qq.com/qrcode/' + self.uuid
params = {
't': 'webwx',
'_': int(time.time()),
}
req = request.Request(url=url, data=parse.urlencode(params).encode('utf-8'))
response = request.urlopen(req)
data = response.read()
with open(QRCODE_PATH,'wb') as f:
f.write(data)
def _printQR(self, mat):
BLACK = '\033[40m \033[0m'
WHITE = '\033[47m \033[0m'
for row in mat:
print(''.join([BLACK if item else WHITE for item in row]))
def _get(self, url,encoding='utf-8',byte_ret = False):
req = request.Request(url)
response = request.urlopen(req)
if byte_ret:
return response.read()
data = response.read().decode(encoding)
return data
def _post(self, url, params, jsonfmt=True,encoding='utf-8',byte_ret = False):
if jsonfmt:
req = request.Request(url=url, data=json.dumps(params,ensure_ascii=False).encode('utf-8'))
req.add_header('ContentType', 'application/json; charset=UTF-8')
else:
req = request.Request(url=url, data=parse.urlencode(params).encode('utf-8'))
response = request.urlopen(req)
if byte_ret:
return response.read()
data = response.read().decode(encoding)
return json.loads(data) if jsonfmt else data
def _filte(self,message,*filters):
is_match = True
if len(filters) == 0:
return is_match
for filter in filters:
if not is_match:
break
if filter[0] and not message.fromUserName in filter[0]: #filte fromUserName
is_match = False
if filter[1] and not message.toUserName in filter[1]: #filte toUserName
is_match = False
if filter[2] and not self._filte_content(message,*filter[2]):
is_match = False
is_match = not is_match if filter[3] else is_match
return is_match
def _filte_content(self,message,*args):
if len(args) > 1:
for x in args:
if self._filte_content(message,x):
return True
return False
else:
target_content = args[0]
if isinstance(target_content,str):
return target_content == message.content
elif hasattr(target_content, "match") and callable(target_content.match): #正则匹配
return target_content.match(message.content)
else:
raise TypeError("%s is not a valid target_content" % target_content)
def allMsg(self,func):
self.add_handler(func)
return func
def textMsg(self, func):
self.add_handler(func, type='text')
return func
def imageMsg(self, func):
self.add_handler(func, type='image')
return func
def videoMsg(self, func):
self.add_handler(func, type='video')
return func
def voiceMsg(self, func):
self.add_handler(func, type='voice')
return func
def sharelocation(self, func):
self.add_handler(func, type='sharelocation')
return func
def location(self, func):
self.add_handler(func, type='location')
return func
def recommend(self,func):
self.add_handler(func,type='recommend')
return func
def revoke(self,func):
self.add_handler(func,type='revoke')
return func
def initMsg(self, func):
self.add_handler(func, type='initmsg')
return func
def textFilter(self,*args,beside = False):
def wrapper(func):
self.add_filter(func,content=args,beside=beside)
return func
return wrapper
def sourceFilter(self,*fromUserNames,beside = False):
def wrapper(func):
self.add_filter(func,fromUserNames=fromUserNames,beside=beside)
return func
return wrapper
def targetFilter(self,*toUserNames,beside = False):
def wrapper(func):
self.add_filter(func,toUserNames=toUserNames,beside=beside)
return func
return wrapper
def filter(self,*args,beside = False):
args_is_list = False
if len(args) > 1:
args_is_list = True
elif len(args) == 0:
raise ValueError('filter should have 1 argments at least')
else:
target_content = args[0]
if isinstance(target_content,str):
def _compareContent(message):
compareResult = (target_content == message.content)
return compareResult if not beside else not compareResult
elif hasattr(target_content, "match") and callable(target_content.match): #正则匹配
def _compareContent(message):
compareResult = target_content.match(message.content)
return compareResult if not beside else not compareResult
else:
raise TypeError("%s is not a valid target_content" % target_content)
def wrapper(f):
if args_is_list:
for x in args:
self.filter(x)(f)
return f
@self.textMsg
def _f(message):
if _compareContent(message):
return f(message)
return f
return wrapper
def add_handler(self, func, type='all'):
if not callable(func):
raise ValueError("{} is not callable".format(func))
self._handlers[type].append((func, len(inspect.getargspec(func).args)))
def add_filter(self,func,fromUserNames = None,toUserNames = None,content = None,beside = False):
fromUserNames = None if isinstance(fromUserNames,tuple) and len(fromUserNames) == 0 else fromUserNames
toUserNames = None if isinstance(toUserNames,tuple) and len(toUserNames) == 0 else toUserNames
content = None if isinstance(content,tuple) and len(content) == 0 else content
if not self._filters.get(func):
self._filters[func] = []
self._filters[func].append((fromUserNames,toUserNames,content,beside))
def get_handler(self, type):
return self._handlers.get(type, []) + self._handlers['all']
def get_filter(self,func):
return self._filters.get(func,[])
def _searchContent(self, key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm: return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if pm: return pm.group(1)
return '未知'
def _save_file(self,data,file_name):
file_type = file_name[:file_name.find('_')]
if file_type == 'msgimg':
path = self._mkdir(os.path.join(os.getcwd(),'images'))
elif file_type == 'msgvoice':
path = self._mkdir(os.path.join(os.getcwd(),'voices'))
elif file_type == 'msgvideo':
path = self._mkdir(os.path.join(os.getcwd(),'videos'))
elif file_type == 'icon':
path = self._mkdir(os.path.join(os.getcwd(),'icons'))
else:
path = self._mkdir(os.path.join(os.getcwd(),'tmp'))
path = os.path.join(path,file_name)
with open(path,'wb') as f:
f.write(data)
return path
def _mkdir(self,path):
if not os.path.exists(path):
self._mkdir(os.path.split(path)[0])
os.mkdir(path)
elif not os.path.isdir(path):
return False
return path
def _safe_open(self,file_path):
try:
if sys.platform.find('darwin') >= 0:
subprocess.call(['open',file_path])
elif sys.platform.find('linux') >= 0:
subprocess.call(['xdg-open',file_path])
else:
os.startfile(file_path)
return True
except:
return False
specialUsers = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'fmessage', 'tmessage', 'qmessage',
'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp', 'blogapp',
'facebookapp', 'masssendapp', 'meishiapp', 'feedsapp', 'voip', 'blogappweixin', 'weixin',
'brandsessionholder', 'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c',
'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'wxitil',
'userexperience_alarm', 'notification_messages']
specialUsersDict = {
'blogapp': '微博阅读',
'blogappweixin': '微博阅读',
'brandsessionholder': 'brandsessionholder',
'facebookapp': 'Facebook',
'feedsapp': '朋友圈',
'filehelper': '文件传输助手',
'floatbottle': '漂流瓶',
'fmessage': '朋友圈推荐消息',
'gh_22b87fa7cb3c': 'gh_22b87fa7cb3c',
'lbsapp': 'lbsapp',
'masssendapp': '群发助手',
'medianote': '语音记事本',
'meishiapp': 'meishiapp',
'newsapp': '腾讯新闻',
'notification_messages': 'notification_messages',
'officialaccounts': 'officialaccounts',
'qmessage': 'QQ离线助手',
'qqfriend': 'qqfriend',
'qqmail': 'QQ邮箱',
'qqsync': '通讯录同步助手',
'readerapp': 'readerapp',
'shakeapp': '摇一摇',
'tmessage': 'tmessage',
'userexperience_alarm': '用户体验报警',
'voip': 'voip',
'weibo': 'weibo',
'weixin': '微信',
'weixinreminder': 'weixinreminder',
'wxid_novlwrv3lqwv11': 'wxid_novlwrv3lqwv11',
'wxitil': '微信小管家'
} | sharpdeep/WxRobot | WxRobot/webwxapi.py | Python | mit | 28,922 |
__author__ = 'Anton'
import webapp2
#from google.appengine.ext import webapp
class Error404(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Error 404: Page is not found') | sloot14/flexifod | view/error.py | Python | mit | 270 |
""" The Read-Evaluate-Print loop.
"""
import platform
import sys
import select
import time
import traceback
from ..util.StringBuffer import StringBuffer
from ..visitor.Parser import parseFromString
from ..eval.Interpreter import Interpreter
from ..eval.PLambdaException import PLambdaException
from ..version import plambda_version
def main():
return rep(sys.argv[1] if len(sys.argv) == 2 else None)
def snarf(delay):
""" read as much as possible without blocking. (won't work on windows) """
if platform.system() == 'Windows':
return sys.stdin.readline().strip()
sb = StringBuffer()
count = 0
while True:
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if line:
count += 1
sb.append(line)
if sb.isempty():
time.sleep(delay)
else:
if count > 1:
return str(sb)
return str(sb).strip()
def rep(filename):
"""The Read Eval Print loop for the plambda language.
"""
interpreter = Interpreter()
debug = False
try:
try:
interpreter.load(filename)
except Exception as e:
print(f'Loading {filename} failed because ', e)
return 1
sys.stdout.write(WELCOME)
while True:
try:
sys.stdout.write('> ')
sys.stdout.flush()
line = snarf(0.2)
if line == 'q':
return 0
if line == 'v':
debug = not debug
elif line == '?':
sys.stdout.write(INSTRUCTIONS)
elif line == 'd':
interpreter.showDefinitions()
elif line == 'u':
interpreter.showUIDs()
else:
if line:
if debug:
print('rep: line = ', line)
code = parseFromString(line)
for c in code:
if c is not None:
if debug:
print('rep: sexp = ', c)
value = interpreter.evaluate(c)
if debug:
print('rep: value = ', value)
print(value)
except PLambdaException as e:
print('PLambda.rep PLambdaException: ', e)
except Exception as e:
print('PLambda.rep Exception: ', e)
traceback.print_exc(file=sys.stderr)
except KeyboardInterrupt:
return 0
WELCOME = f'\nWelcome to the PLambda interface to Python (version {plambda_version}), type ? for help.\n'
INSTRUCTIONS = """
Type one of the following:
\tany valid plambda expression to be evaluated, or
\tq to quit
\t? to see these instructions
\td to see the current definitions
\tu to see the current uids
\tv to toggle the degree of verbosity in error reports
"""
| SRI-CSL/PLambda | plambda/eval/PLambda.py | Python | mit | 3,131 |
"""
MIT License
Copyright (c) 2013 Scott Kuroda <[email protected]>
SHA: 623a4c1ec46dbbf3268bd88131bf0dfc845af787
"""
import sublime
import os
import zipfile
import tempfile
import re
import codecs
__all__ = [
"get_resource",
"get_binary_resource",
"find_resource",
"list_package_files",
"get_package_and_resource_name",
"get_packages_list",
"extract_package",
"get_sublime_packages"
]
VERSION = int(sublime.version())
def get_resource(package_name, resource, encoding="utf-8"):
return _get_resource(package_name, resource, encoding=encoding)
def get_binary_resource(package_name, resource):
return _get_resource(package_name, resource, return_binary=True)
def _get_resource(package_name, resource, return_binary=False, encoding="utf-8"):
packages_path = sublime.packages_path()
content = None
if VERSION > 3013:
try:
if return_binary:
content = sublime.load_binary_resource("Packages/" + package_name + "/" + resource)
else:
content = sublime.load_resource("Packages/" + package_name + "/" + resource)
except IOError:
pass
else:
path = None
if os.path.exists(os.path.join(packages_path, package_name, resource)):
path = os.path.join(packages_path, package_name, resource)
content = _get_directory_item_content(path, return_binary, encoding)
if VERSION >= 3006:
sublime_package = package_name + ".sublime-package"
packages_path = sublime.installed_packages_path()
if content is None:
if os.path.exists(os.path.join(packages_path, sublime_package)):
content = _get_zip_item_content(os.path.join(packages_path, sublime_package), resource, return_binary, encoding)
packages_path = os.path.dirname(sublime.executable_path()) + os.sep + "Packages"
if content is None:
if os.path.exists(os.path.join(packages_path, sublime_package)):
content = _get_zip_item_content(os.path.join(packages_path, sublime_package), resource, return_binary, encoding)
return content
def find_resource(resource_pattern, package=None):
file_set = set()
if package == None:
for package in get_packages_list():
file_set.update(find_resource(resource_pattern, package))
ret_list = list(file_set)
else:
file_set.update(_find_directory_resource(os.path.join(sublime.packages_path(), package), resource_pattern))
if VERSION >= 3006:
zip_location = os.path.join(sublime.installed_packages_path(), package + ".sublime-package")
file_set.update(_find_zip_resource(zip_location, resource_pattern))
zip_location = os.path.join(os.path.dirname(sublime.executable_path()), "Packages", package + ".sublime-package")
file_set.update(_find_zip_resource(zip_location, resource_pattern))
ret_list = map(lambda e: package + "/" + e, file_set)
return sorted(ret_list)
def list_package_files(package, ignore_patterns=[]):
"""
List files in the specified package.
"""
package_path = os.path.join(sublime.packages_path(), package, "")
path = None
file_set = set()
file_list = []
if os.path.exists(package_path):
for root, directories, filenames in os.walk(package_path):
temp = root.replace(package_path, "")
for filename in filenames:
file_list.append(os.path.join(temp, filename))
file_set.update(file_list)
if VERSION >= 3006:
sublime_package = package + ".sublime-package"
packages_path = sublime.installed_packages_path()
if os.path.exists(os.path.join(packages_path, sublime_package)):
file_set.update(_list_files_in_zip(packages_path, sublime_package))
packages_path = os.path.dirname(sublime.executable_path()) + os.sep + "Packages"
if os.path.exists(os.path.join(packages_path, sublime_package)):
file_set.update(_list_files_in_zip(packages_path, sublime_package))
file_list = []
for filename in file_set:
if not _ignore_file(filename, ignore_patterns):
file_list.append(_normalize_to_sublime_path(filename))
return sorted(file_list)
def _ignore_file(filename, ignore_patterns=[]):
ignore = False
directory, base = os.path.split(filename)
for pattern in ignore_patterns:
if re.match(pattern, base):
return True
if len(directory) > 0:
ignore = _ignore_file(directory, ignore_patterns)
return ignore
def _normalize_to_sublime_path(path):
path = os.path.normpath(path)
path = re.sub(r"^([a-zA-Z]):", "/\\1", path)
path = re.sub(r"\\", "/", path)
return path
def get_package_and_resource_name(path):
"""
This method will return the package name and resource name from a path.
Arguments:
path Path to parse for package and resource name.
"""
package = None
resource = None
path = _normalize_to_sublime_path(path)
if os.path.isabs(path):
packages_path = _normalize_to_sublime_path(sublime.packages_path())
if path.startswith(packages_path):
package, resource = _search_for_package_and_resource(path, packages_path)
if int(sublime.version()) >= 3006:
packages_path = _normalize_to_sublime_path(sublime.installed_packages_path())
if path.startswith(packages_path):
package, resource = _search_for_package_and_resource(path, packages_path)
packages_path = _normalize_to_sublime_path(os.path.dirname(sublime.executable_path()) + os.sep + "Packages")
if path.startswith(packages_path):
package, resource = _search_for_package_and_resource(path, packages_path)
else:
path = re.sub(r"^Packages/", "", path)
split = re.split(r"/", path, 1)
package = split[0]
package = package.replace(".sublime-package", "")
resource = split[1]
return (package, resource)
def get_packages_list(ignore_packages=True, ignore_patterns=[]):
"""
Return a list of packages.
"""
package_set = set()
package_set.update(_get_packages_from_directory(sublime.packages_path()))
if int(sublime.version()) >= 3006:
package_set.update(_get_packages_from_directory(sublime.installed_packages_path(), ".sublime-package"))
executable_package_path = os.path.dirname(sublime.executable_path()) + os.sep + "Packages"
package_set.update(_get_packages_from_directory(executable_package_path, ".sublime-package"))
if ignore_packages:
ignored_list = sublime.load_settings(
"Preferences.sublime-settings").get("ignored_packages", [])
else:
ignored_list = []
for package in package_set:
for pattern in ignore_patterns:
if re.match(pattern, package):
ignored_list.append(package)
break
for ignored in ignored_list:
package_set.discard(ignored)
return sorted(list(package_set))
def get_sublime_packages(ignore_packages=True, ignore_patterns=[]):
package_list = get_packages_list(ignore_packages, ignore_patterns)
extracted_list = _get_packages_from_directory(sublime.packages_path())
return [x for x in package_list if x not in extracted_list]
def _get_packages_from_directory(directory, file_ext=""):
package_list = []
for package in os.listdir(directory):
if not package.endswith(file_ext):
continue
else:
package = package.replace(file_ext, "")
package_list.append(package)
return package_list
def _search_for_package_and_resource(path, packages_path):
"""
Derive the package and resource from a path.
"""
relative_package_path = path.replace(packages_path + "/", "")
package, resource = re.split(r"/", relative_package_path, 1)
package = package.replace(".sublime-package", "")
return (package, resource)
def _list_files_in_zip(package_path, package):
if not os.path.exists(os.path.join(package_path, package)):
return []
ret_value = []
with zipfile.ZipFile(os.path.join(package_path, package)) as zip_file:
ret_value = zip_file.namelist()
return ret_value
def _get_zip_item_content(path_to_zip, resource, return_binary, encoding):
if not os.path.exists(path_to_zip):
return None
ret_value = None
with zipfile.ZipFile(path_to_zip) as zip_file:
namelist = zip_file.namelist()
if resource in namelist:
ret_value = zip_file.read(resource)
if not return_binary:
ret_value = ret_value.decode(encoding)
return ret_value
def _get_directory_item_content(filename, return_binary, encoding):
content = None
if os.path.exists(filename):
if return_binary:
mode = "rb"
encoding = None
else:
mode = "r"
with codecs.open(filename, mode, encoding=encoding) as file_obj:
content = file_obj.read()
return content
def _find_zip_resource(path_to_zip, pattern):
ret_list = []
if os.path.exists(path_to_zip):
with zipfile.ZipFile(path_to_zip) as zip_file:
namelist = zip_file.namelist()
for name in namelist:
if re.search(pattern, name):
ret_list.append(name)
return ret_list
def _find_directory_resource(path, pattern):
ret_list = []
if os.path.exists(path):
path = os.path.join(path, "")
for root, directories, filenames in os.walk(path):
temp = root.replace(path, "")
for filename in filenames:
if re.search(pattern, os.path.join(temp, filename)):
ret_list.append(os.path.join(temp, filename))
return ret_list
def extract_zip_resource(path_to_zip, resource, extract_dir=None):
if extract_dir is None:
extract_dir = tempfile.mkdtemp()
file_location = None
if os.path.exists(path_to_zip):
with zipfile.ZipFile(path_to_zip) as zip_file:
file_location = zip_file.extract(resource, extract_dir)
return file_location
def extract_package(package):
if VERSION >= 3006:
package_location = os.path.join(sublime.installed_packages_path(), package + ".sublime-package")
if not os.path.exists(package_location):
package_location = os.path.join(os.path.dirname(sublime.executable_path()), "Packages", package + ".sublime-package")
if not os.path.exists(package_location):
package_location = None
if package_location:
with zipfile.ZipFile(package_location) as zip_file:
extract_location = os.path.join(sublime.packages_path(), package)
zip_file.extractall(extract_location)
####################### Force resource viewer to reload ########################
import sys
if VERSION > 3000:
from imp import reload
if "PackageResourceViewer.package_resource_viewer" in sys.modules:
reload(sys.modules["PackageResourceViewer.package_resource_viewer"])
else:
if "package_resource_viewer" in sys.modules:
reload(sys.modules["package_resource_viewer"])
| albertyw/sublime-settings | PackageResourceViewer/package_resources.py | Python | mit | 11,378 |
from django.views.generic import TemplateView, ListView, DetailView, CreateView
from .forms import ThreadCreateUpdateForm, ThreadReplyForm
from .mixins import DetailWithListMixin, RequestForFormMixIn
from .models import ForumPost, ForumThread, ForumCategory
class ForumHome(ListView):
model = ForumCategory
def get_queryset(self):
queryset = super(ForumHome, self).get_queryset()
return queryset.filter(parent=None)
class ForumCategoryHome(DetailWithListMixin, DetailView):
model = ForumCategory
def dispatch(self, request, *args, **kwargs):
self.list_model = self.get_list_model()
self.list_attribute = self.get_list_attribute()
return super(ForumCategoryHome, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ForumCategoryHome, self).get_context_data(**kwargs)
if self.list_attribute == 'forumthread_set':
context['list_type'] = 'threads'
else:
context['list_type'] = 'categories'
context['extra_title'] = ' - %s' % self.object.name
return context
def get_list_attribute(self):
if ForumCategory.objects.filter(parent=self.get_object()).count() == 0:
return 'forumthread_set'
else:
return 'children'
def get_list_model(self):
if ForumCategory.objects.filter(parent=self.get_object()).count() == 0:
return ForumThread
else:
return ForumCategory
def get_list_queryset(self):
if ForumCategory.objects.filter(parent=self.get_object()).count() == 0:
return ForumThread.objects.filter(category=self.get_object())
else:
return ForumCategory.objects.filter(parent=self.get_object())
class ForumThreadHome(DetailWithListMixin, DetailView):
model = ForumThread
list_model = ForumPost
list_attribute = 'forumpost_set'
def get_list_queryset(self):
return (self.list_model.objects.filter(thread=self.get_object(),
is_thread_starter=False).order_by('created'))
def get_context_data(self, **kwargs):
context = super(ForumThreadHome, self).get_context_data(**kwargs)
context['extra_title'] = ' - %s' % self.object.title
return context
class ForumThreadCreateView(RequestForFormMixIn, CreateView):
model = ForumThread
form_class = ThreadCreateUpdateForm
def get_form_kwargs(self):
kwargs = super(ForumThreadCreateView, self).get_form_kwargs()
kwargs.update({'category': ForumCategory.objects.get(slug=self.kwargs['slug'])})
return kwargs
def get_context_data(self, **kwargs):
context = super(ForumThreadCreateView, self).get_context_data(**kwargs)
context['extra_title'] = ' - New Thread'
return context
class ForumThreadReply(RequestForFormMixIn, CreateView):
model = ForumThread
form_class = ThreadReplyForm
template_name = 'thatforum/forumpost_form.html'
def get_context_data(self, **kwargs):
context = super(ForumThreadReply, self).get_context_data(**kwargs)
context['posts'] = (
self.get_object().forumpost_set.all().order_by('-created')[:5]
)
context['object'] = self.get_object()
context['show_cancel'] = self.get_object().get_absolute_url()
context['extra_title'] = ' - Reply'
return context
def get_form_kwargs(self):
kwargs = super(ForumThreadReply, self).get_form_kwargs()
kwargs.update({'thread': self.get_object()})
return kwargs
def get_success_url(self):
return self.get_object().get_absolute_url()
# class HomeView(TemplateView):
# template_name = 'home.html'
| hellsgate1001/thatforum_django | thatforum/views.py | Python | mit | 3,749 |
"""Placeholder for scripts to inherit cli interface
"""
import argparse
import sys
import logging
import json
from sforce.client import sf_session
from sforce.models import SObj
class App(object):
def __init__(self, commons):
self.commons = commons
self.args = self.parse_options(sys.argv)
self.session = sf_session(self.commons)
if self.args.debug:
logging.getLogger().setLevel(logging.DEBUG)
def cmd_account(self, options):
""" parses account cli
"""
logging.debug("Running Account Queries")
sobj = SObj('Account', commons=self.commons)
if options.id:
print(json.dumps(sobj.by_id(options.id)))
def cmd_case(self, options):
""" parses case cli
"""
sobj = SObj('Case', commons=self.commons)
if options.number:
print(json.dumps(case.by_id(options.number)))
def parse_options(self, *args, **kwds):
parser = argparse.ArgumentParser(description='Salesforce CLI',
prog='sf-cli')
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommends',
help='additional help')
# Account
parser_a = subparsers.add_parser('account', help='Query Account help')
parser_a.add_argument('--id', dest='id', help='Account ID')
parser_a.add_argument('--name', dest='name', help='Account Name')
parser_a.set_defaults(func=self.cmd_account)
# Case
parser_b = subparsers.add_parser('case', help='Query Case help')
parser_b.add_argument('--number', dest='number', help='Case number to lookup')
parser_b.set_defaults(func=self.cmd_case)
# Debug
parser.add_argument('-d', '--debug', action='store_true',
dest='debug', default=False, help='Run in debug mode')
return parser.parse_args()
def run(self):
self.args.func(self.args)
| battlemidget/python-salesforce | sforce/cmd.py | Python | mit | 2,079 |
#!/usr/bin/env python
########################
# PriorityQueue Tool #
# Author: Yudong Qiu #
########################
from heapq import heappush, heappop
import itertools
class PriorityQueue(object):
def __len__(self):
return len(self._pq)
def __iter__(self):
return iter(task for priority, count, task in self._pq)
def __init__(self):
self._pq = []
self.counter = itertools.count()
def push(self, task, priority=0):
"""
Add one task to priority queue
When priority is the same, count ensures the earlier added tasks first
"""
count = next(self.counter)
entry = [priority, count, task]
heappush(self._pq, entry)
def pop(self):
"""
Pop the task that was pushed with highest priority value
"""
if len(self._pq) == 0:
raise RuntimeError("pop from an empty priority queue")
priority, count, task = heappop(self._pq)
return task
def push_many(self, tasks, priority=0):
for task in tasks:
self.push(task, priority)
| leeping/crank | crank/PriorityQueue.py | Python | mit | 1,114 |
import arcpy, sys, os
sys.path.append('.')
import common
from networking import BulkConnectionCreator
arcpy.env.overwriteOutput = 1
# Creates a neighbourhood table of provided zones according to the principle of network neighbourhood - two zones are neighbours if they are connected by a route along the network that does not pass through a third settlement and that is most reasonable according to the provided impedance
# Settlement areas must not cross zone boundaries!
TMP_ZONE_NEGBUF = 'tmp_zonecore'
TMP_SETTL_NEGBUF = 'tmp_settlcore'
TMP_ROUTES = 'tmp_routes'
TMP_ROUTE_ER = 'tmp_rerase'
TMP_ROUTE_SINGLE = 'tmp_rsing'
TMP_RSING_LAY = 'tmp_rsing_lay'
common.progress('parsing attributes')
zoneAreas, zonePts, zoneIDFld, settlAreas, network, impedance, cutoff, cutoffFld, numToFind, location, outputName = common.parameters(11)
common.progress('initializing route creator')
# create network connections - see comments for create_network_connections
conn = BulkConnectionCreator(zonePts, network, impedance, cutoff, numToFind, location)
common.progress('loading data')
conn.load()
common.progress('solving routes')
conn.solve()
common.progress('joining attributes')
conn.joinFields([zoneIDFld])
common.progress('creating routes')
conn.output(TMP_ROUTES) # routes between the zone central points
conn.close()
arcpy.env.workspace = location
# prepare the settlement areas - remove all lying close to the border
common.progress('clipping settlement areas')
arcpy.Buffer_analysis(zoneAreas, TMP_ZONE_NEGBUF, '-' + borderDist)
arcpy.Clip_analysis(settlAreas, TMP_ZONE_NEGBUF, TMP_SETTL_NEGBUF)
# cut the routes by settlement areas -> connections between them (most expensive)
common.progress('creating settlement connections')
arcpy.Erase_analysis(TMP_ROUTES, TMP_SETTL_NEGBUF, TMP_ROUTE_ER)
# explode multipart routes (pass through a settlement)
common.progress('exploding multipart routes')
arcpy.MultipartToSinglepart_management(TMP_ROUTE_ER, TMP_ROUTE_SINGLE)
# disregard all route parts contained entirely within a single zone
common.progress('selecting routes between zones')
arcpy.MakeFeatureLayer_management(TMP_ROUTE_SINGLE, TMP_RSING_LAY)
arcpy.SelectLayerByLocation_management(TMP_RSING_LAY, 'COMPLETELY_WITHIN', zoneAreas)
arcpy.SelectLayerByAttribute_management(TMP_RSING_LAY, 'SWITCH_SELECTION')
# non-duplicate entries for the zone IDs are the searched connections
# create an output table (fields will be added later)
common.progress('creating output table')
outputPath = arcpy.CreateTable_management(location, outputName).getOutput(0)
oIDFld = 'O_%s' % zoneIDFld
dIDFld = 'D_%s' % zoneIDFld
# order the rows so that identical tuples of O_ID, D_ID are next to each other
sorter = '%s; %s' % (dIDFld, oIDFld) # should be the same as input ordering
common.progress('starting route search')
connRows = arcpy.SearchCursor(TMP_RSING_LAY, '', '', '', sorter)
prevOID = None
prevDID = None
prevImp = None
start = True
sequence = False # if a sequence of identical tuples of O_ID, D_ID has been detected
for connRow in connRows:
oID = connRow.getValue(oIDFld)
dID = connRow.getValue(dIDFld)
impedVal = connRow.getValue(impedance)
if start: # start at the second row
start = False
# add fields to output table and open cursor
common.progress('preparing output table')
arcpy.AddField_management(outputPath, common.NEIGH_FROM_FLD, common.fieldType(type(oID)))
arcpy.AddField_management(outputPath, common.NEIGH_TO_FLD, common.fieldType(type(dID)))
arcpy.AddField_management(outputPath, impedance, common.fieldType(type(impedVal)))
common.progress('opening output table')
outputRows = arcpy.InsertCursor(outputPath)
common.progress('writing output')
else:
if oID == prevOID and dID == prevDID: # same as previous, detect sequence
sequence = True
else:
if sequence: # end of sequence, disregard it
sequence = False
else: # unique record - add neighbour record
outRow = outputRows.newRow()
outRow.setValue(common.NEIGH_FROM_FLD, prevOID)
outRow.setValue(common.NEIGH_TO_FLD, prevDID)
outRow.setValue(impedance, prevImp)
outputRows.insertRow(outRow)
prevOID = oID
prevDID = dID
prevImp = impedVal
del connRows, outputRows
common.progress('deleting temporary files')
# try: # delete temporary files
# arcpy.Delete_management(TMP_ZONE_NEGBUF)
# arcpy.Delete_management(TMP_SETTL_NEGBUF)
# arcpy.Delete_management(TMP_ROUTES)
# arcpy.Delete_management(TMP_ROUTE_ER)
# arcpy.Delete_management(TMP_ROUTE_SINGLE)
# except:
# pass
common.done()
| simberaj/interactions | network_neighbour_table.py | Python | mit | 4,583 |
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from indico.util.i18n import make_bound_gettext
_ = make_bound_gettext('citadel')
| indico/indico-plugins | citadel/indico_citadel/__init__.py | Python | mit | 327 |
from subprocess import Popen, PIPE
import natlink
from dragonfly import (Grammar, AppContext, MappingRule, Dictation,
Key, Text, FocusWindow, Function, Mimic,
StartApp, IntegerRef)
grammar = Grammar("global")
def snore(**kw):
natlink.setMicState('sleeping')
example_rule = MappingRule(
name="global",
mapping={
'scratch': Mimic('scratch', 'that'),
'Pick <n>': Key('down/5:%(n)d, enter'),
'Pick Minus <n>': Key('up/5:%(n)d, enter'),
'swap': Key('w-tab/5'),
'<n> swap': Key('w-tab/5:%(n)d'),
'swap <text>': FocusWindow(title='%(text)s'),
'win left': Key('w-left'),
'win right': Key('w-right'),
'desk <n>': Key('w-%(n)d'),
'snore': Function(snore),
'Show task [manager]': Key('cs-escape'),
#
# Words and phrases
'import clarus': Text('import brighttrac2 as clarus'),
},
extras=[
Dictation("text"),
IntegerRef("n", 1, 100),
],
)
# Add the action rule to the grammar instance.
grammar.add_rule(example_rule)
#---------------------------------------------------------------------------
# Load the grammar instance and define how to unload it.
grammar.load()
# Unload function which will be called by natlink at unload time.
def unload():
global grammar
if grammar: grammar.unload()
grammar = None
| drocco007/vox_commands | _global.py | Python | mit | 1,431 |
# From https://stackoverflow.com/questions/5189699/how-to-make-a-class-property#answer-5191224
class ClassPropertyDescriptor(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("Can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
"""
Combines @classmethod and @property decorators to allow for properties at the
class level.
"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
| gamernetwork/gn-django | gn_django/decorators.py | Python | mit | 1,014 |
# -*- coding: utf-8 -*-
import numpy as np
class ClickModel(object):
'''
Class for cascading click-models used to simulate clicks.
'''
def __init__(self, name, data_type, PCLICK, PSTOP):
'''
Name is used for logging, data_type denotes the degrees of relevance the data uses.
PCLICK and PSTOP the probabilities used by the model.
'''
self.name = name
self.type = data_type
self.PCLICK = PCLICK
self.PSTOP = PSTOP
def get_name(self):
'''
Name that can be used for logging.
'''
return self.name + '_' + self.type
def generate_clicks(self, ranking, all_labels):
'''
Generates clicks for a given ranking and relevance labels.
ranking: np array of indices which correspond with all_labels
all_labels: np array of integers
'''
labels = all_labels[ranking]
coinflips = np.random.rand(*ranking.shape)
clicks = coinflips < self.PCLICK[labels]
coinflips = np.random.rand(*ranking.shape)
stops = coinflips < self.PSTOP[labels]
stopped_clicks = np.zeros(ranking.shape, dtype=np.int32)
if np.any(stops):
clicks_before_stop = np.logical_and(clicks, np.arange(ranking.shape[0])
<= np.where(stops)[0][0])
stopped_clicks[clicks_before_stop] = 1
return stopped_clicks
else:
return np.zeros(ranking.shape, dtype=np.int32) + clicks
# create synonyms for keywords to ease command line use
syn_tuples = [
('navigational', ['nav', 'navi', 'navig', 'navigat']),
('informational', ['inf', 'info', 'infor', 'informat']),
('perfect', ['per', 'perf']),
('almost_random', [
'alm',
'almost',
'alra',
'arand',
'almostrandom',
'almrand',
]),
('random', ['ran', 'rand']),
('binary', ['bin']),
('short', []),
('long', []),
]
synonyms = {}
for full, abrv_list in syn_tuples:
assert full not in synonyms or synonyms[full] == full
synonyms[full] = full
for abrv in abrv_list:
assert abrv not in synonyms or synonyms[abrv] == full
synonyms[abrv] = full
bin_models = {}
bin_models['navigational'] = np.array([.05, .95]), np.array([.2, .9])
bin_models['informational'] = np.array([.4, .9]), np.array([.1, .5])
bin_models['perfect'] = np.array([.0, 1.]), np.array([.0, .0])
bin_models['almost_random'] = np.array([.4, .6]), np.array([.5, .5])
bin_models['random'] = np.array([.5, .5]), np.array([.0, .0])
short_models = {}
short_models['navigational'] = np.array([.05, .5, .95]), np.array([.2, .5, .9])
short_models['informational'] = np.array([.4, .7, .9]), np.array([.1, .3, .5])
short_models['perfect'] = np.array([.0, .5, 1.]), np.array([.0, .0, .0])
short_models['almost_random'] = np.array([.4, .5, .6]), np.array([.5, .5, .5])
short_models['random'] = np.array([.5, .5, .5]), np.array([.0, .0, .0])
long_models = {}
long_models['navigational'] = np.array([.05, .3, .5, .7, .95]), np.array([.2, .3, .5, .7, .9])
long_models['informational'] = np.array([.4, .6, .7, .8, .9]), np.array([.1, .2, .3, .4, .5])
long_models['perfect'] = np.array([.0, .2, .4, .8, 1.]), np.array([.0, .0, .0, .0, .0])
long_models['almost_random'] = np.array([.4, .45, .5, .55, .6]), np.array([.5, .5, .5, .5, .5])
long_models['random'] = np.array([.5, .5, .5, .5, .5]), np.array([.0, .0, .0, .0, .0])
all_models = {'short': short_models, 'binary': bin_models, 'long': long_models}
def get_click_models(keywords):
'''
Convenience function which returns click models corresponding with keywords.
only returns click functions for one data type: (bin,short,long)
'''
type_name = None
type_keyword = None
for keyword in keywords:
assert keyword in synonyms
if synonyms[keyword] in all_models:
type_name = synonyms[keyword]
type_keyword = keyword
break
assert type_name is not None and type_keyword is not None
models_type = all_models[type_name]
full_names = [synonyms[key] for key in keywords if key != type_keyword]
return [ClickModel(full, type_name, *models_type[full]) for full in full_names]
| HarrieO/PairwisePreferenceMultileave | utils/clicks.py | Python | mit | 4,148 |
# -*- coding: utf-8 -*-
import math
import copy
# square root of 2 for diagonal distance
SQRT2 = math.sqrt(2)
def backtrace(node):
"""
Backtrace according to the parent records and return the path.
(including both start and end nodes)
"""
path = [(node.x, node.y)]
while node.parent:
node = node.parent
path.append((node.x, node.y))
path.reverse()
return path
def bi_backtrace(node_a, node_b):
"""
Backtrace from start and end node, returns the path for bi-directional A*
(including both start and end nodes)
"""
path_a = backtrace(node_a)
path_b = backtrace(node_b)
path_b.reverse()
return path_a + path_b
def raytrace(coords_a, coords_b):
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = x1 - x0
dy = y1 - y0
t = 0
grid_pos = [x0, y0]
t_for_one = \
abs(1.0 / dx) if dx > 0 else 10000, \
abs(1.0 / dy) if dy > 0 else 10000
frac_start_pos = (x0 + .5) - x0, (y0 + .5) - y0
t_for_next_border = [
(1 - frac_start_pos[0] if dx < 0 else frac_start_pos[0]) * t_for_one[0],
(1 - frac_start_pos[1] if dx < 0 else frac_start_pos[1]) * t_for_one[1]
]
step = \
1 if dx >= 0 else -1, \
1 if dy >= 0 else -1
while t <= 1:
line.append(copy.copy(grid_pos))
index = 0 if t_for_next_border[0] <= t_for_next_border[1] else 1
t = t_for_next_border[index]
t_for_next_border[index] += t_for_one[index]
grid_pos[index] += step[index]
return line
def bresenham(coords_a, coords_b):
'''
Given the start and end coordinates, return all the coordinates lying
on the line formed by these coordinates, based on Bresenham's algorithm.
http://en.wikipedia.org/wiki/Bresenham's_line_algorithm#Simplification
'''
line = []
x0, y0 = coords_a
x1, y1 = coords_b
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = 1 if x0 < x1 else -1
sy = 1 if y0 < y1 else -1
err = dx - dy
while True:
line += [[x0, y0]]
if x0 == x1 and y0 == y1:
break
e2 = err * 2
if e2 > -dy:
err = err - dy
x0 = x0 + sx
if e2 < dx:
err = err + dx
y0 = y0 + sy
return line
def expand_path(path):
'''
Given a compressed path, return a new path that has all the segments
in it interpolated.
'''
expanded = []
if len(path) < 2:
return expanded
for i in range(len(path)-1):
expanded += bresenham(path[i], path[i + 1])
expanded += [path[:-1]]
return expanded
def smoothen_path(grid, path, use_raytrace=False):
x0, y0 = path[0]
sx, sy = path[0]
new_path = [[sx, sy]]
interpolate = raytrace if use_raytrace else bresenham
last_valid = path[1]
for coord in path[2:-1]:
line = interpolate([sx, sy], coord)
blocked = False
for test_coord in line[1:]:
if not grid.walkable(test_coord[0], test_coord[1]):
blocked = True
break
if not blocked:
new_path.append(last_valid)
sx, sy = last_valid
last_valid = coord
new_path.append(path[-1])
return new_path
| brean/python-pathfinding | pathfinding/core/util.py | Python | mit | 3,262 |
"""prologues.py: identify function entry points in flat file binaries
"""
from binaryninja import *
class PrologSearch(BackgroundTaskThread):
"""Class that assists in locating function prologues in flat files binaries such as firmware
"""
def __init__(self, view):
BackgroundTaskThread.__init__(self, "", True)
self.view = view
self.signatures = {
'Intel x86 function prologue' : ["\x55\x89\xE5\x83\xEC", "\x55\x89\xE5\x57\x56"],
'Intel x86 NOP Instructions' : ["\x90\x90\x90\x90\x90\x90\x90\x90",],
'ARM big-endian function prologue' : ["\xe9\x2d",],
'ARM little-endian function prologue' : ["\x2d\xe9"],
}
self.max_sig_size = -8
self.hits = {}
def _search_for_func_prologues(self):
"""Iterate data a page at a time using BinaryReader and search for
function prologue signatures
"""
for desc, sigs in self.signatures.iteritems():
for sig in sigs:
nextaddr = 0
while True:
nextaddr = self.view.find_next_data(nextaddr, sig)
if nextaddr == None:
break
self.hits[nextaddr] = desc
nextaddr = nextaddr + len(sig)
def _display_report(self):
"""Generate and display the markdown report
"""
md = ""
for key, val in self.hits.iteritems():
md += "**{:08x}** {}\n\n".format(key, val)
self.view.show_markdown_report("Function Prologue Search", md)
def run(self):
"""Locate prologues containined in binary
"""
self._search_for_func_prologues()
if self.hits != {}:
self._display_report()
else:
show_message_box(
"binjago: Function Prologue Search",
"Could not find any function prologues"
)
| zznop/binjago | binjago/prologues.py | Python | mit | 2,001 |
# Convolution of spectra to a Instrument profile of resolution R.
#
# The spectra does not have to be equidistant in wavelength.
from __future__ import division, print_function
import logging
from datetime import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
def wav_selector(wav, flux, wav_min, wav_max):
"""Wavelenght selector.
It will return arrays.
"""
wav = np.asarray(wav)
flux = np.asarray(flux)
# Super Fast masking with numpy
mask = (wav > wav_min) & (wav < wav_max)
wav_sel = wav[mask]
flux_sel = flux[mask]
return [wav_sel, flux_sel]
def unitary_Gauss(x, center, fwhm):
"""Gaussian_function of area=1.
p[0] = A;
p[1] = mean;
p[2] = fwhm;
"""
sigma = np.abs(fwhm) / (2 * np.sqrt(2 * np.log(2)))
Amp = 1.0 / (sigma * np.sqrt(2 * np.pi))
tau = -((x - center)**2) / (2 * (sigma**2))
result = Amp * np.exp(tau)
return result
def fast_convolve(wav_val, R, wav_extended, flux_extended, fwhm_lim):
"""IP convolution multiplication step for a single wavelength value."""
fwhm = wav_val / R
# Mask of wavelength range within 5 fwhm of wav
index_mask = ((wav_extended > (wav_val - fwhm_lim * fwhm)) &
(wav_extended < (wav_val + fwhm_lim * fwhm)))
flux_2convolve = flux_extended[index_mask]
# Gausian Instrument Profile for given resolution and wavelength
inst_profile = unitary_Gauss(wav_extended[index_mask], wav_val, fwhm)
sum_val = np.sum(inst_profile * flux_2convolve)
# Correct for the effect of convolution with non-equidistant postions
unitary_val = np.sum(inst_profile * np.ones_like(flux_2convolve))
return sum_val / unitary_val
def ip_convolution(wav, flux, chip_limits, R, fwhm_lim=5.0, plot=True,
verbose=True):
"""Spectral convolution which allows non-equidistance step values."""
# Make sure they are numpy arrays
wav = np.asarray(wav, dtype='float64')
flux = np.asarray(flux, dtype='float64')
timeInit = dt.now()
wav_chip, flux_chip = wav_selector(wav, flux, chip_limits[0],
chip_limits[1])
# We need to calculate the fwhm at this value in order to set the starting
# point for the convolution
fwhm_min = wav_chip[0] / R # fwhm at the extremes of vector
fwhm_max = wav_chip[-1] / R
# Wide wavelength bin for the resolution_convolution
wav_min = wav_chip[0] - fwhm_lim * fwhm_min
wav_max = wav_chip[-1] + fwhm_lim * fwhm_max
wav_ext, flux_ext = wav_selector(wav, flux, wav_min, wav_max)
print("Starting the Resolution convolution...")
# Predefine array space
flux_conv_res = np.empty_like(wav_chip, dtype="float64")
size = len(wav_chip)
base_val = size // 20 # Adjust here to change % between reports
if base_val == 0:
base_val = 1 # Cannot be zero
for n, wav in enumerate(wav_chip):
# Put convolution value directly into the array
flux_conv_res[n] = fast_convolve(wav, R, wav_ext, flux_ext, fwhm_lim)
if (n % base_val == 0) and verbose:
print("Resolution Convolution at {0}%%...".format(100* n / size))
timeEnd = dt.now()
print("Single-Process convolution has been completed in"
" {}.\n".format(timeEnd - timeInit))
if plot:
plt.figure(1)
plt.xlabel(r"wavelength [ nm ])")
plt.ylabel(r"flux [counts] ")
plt.plot(wav_chip, flux_chip / np.max(flux_chip), color='k',
linestyle="-", label="Original spectra")
plt.plot(wav_chip, flux_conv_res / np.max(flux_conv_res), color='r',
linestyle="-", label="Spectrum observed at R={0}.".format(R))
plt.legend(loc='best')
plt.title(r"Convolution by an Instrument Profile ")
plt.show()
return [wav_chip, flux_conv_res]
def IPconvolution(wav, flux, chip_limits, R, FWHM_lim=5.0, plot=True,
verbose=True):
"""Wrapper of ip_convolution for backwards compatibility.
Lower case of variable name of FWHM.
"""
logging.warning("IPconvolution is depreciated, should use ip_convolution instead."
"IPconvolution is still available for compatibility.")
return ip_convolution(wav, flux, chip_limits, R, fwhm_lim=FWHM_lim, plot=plot,
verbose=verbose)
if __name__ == "__main__":
# Example useage of this convolution
wav = np.linspace(2040, 2050, 30000)
flux = (np.ones_like(wav) - unitary_Gauss(wav, 2045, .6) -
unitary_Gauss(wav, 2047, .9))
# Range in which to have the convoled values. Be careful of the edges!
chip_limits = [2042, 2049]
R = 1000
convolved_wav, convolved_flux = ip_convolution(wav, flux, chip_limits, R,
fwhm_lim=5.0, plot=True,
verbose=True)
| jason-neal/equanimous-octo-tribble | octotribble/Convolution/IP_Convolution.py | Python | mit | 4,929 |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import sorts
def main(argv):
line = files.read_line(argv[0])
perm = [int(val) for val in line[1:-1].split(' ')]
print sorts.count_signed_breaks(perm)
if __name__ == "__main__":
main(sys.argv[1:])
| cowboysmall/rosalind | src/textbook/rosalind_ba6b.py | Python | mit | 323 |
import tornado.ioloop
import tornado.web
from handlers import BlockHandler, NonBlockHandler, DefaultHandler, RedisHandler
import logging
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
def make_app():
return tornado.web.Application([
(r"/block-request", BlockHandler),
(r"/non-block-request", NonBlockHandler),
(r"/normal-request", DefaultHandler),
(r"/redis-request", RedisHandler),
], debug=True)
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start() | joelmir/tornado-simple-api | app.py | Python | mit | 724 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-25 08:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("chroma_core", "0009_django_content_type_json_notify"),
]
operations = [
migrations.CreateModel(
name="GrantRevokedTicketJob",
fields=[
(
"job_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="chroma_core.Job",
),
),
("old_state", models.CharField(max_length=32)),
],
options={
"abstract": False,
},
bases=("chroma_core.job",),
),
migrations.CreateModel(
name="RevokeGrantedTicketJob",
fields=[
(
"job_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="chroma_core.Job",
),
),
("old_state", models.CharField(max_length=32)),
],
options={
"abstract": False,
},
bases=("chroma_core.job",),
),
migrations.CreateModel(
name="Ticket",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("state_modified_at", models.DateTimeField()),
("state", models.CharField(max_length=32)),
("immutable_state", models.BooleanField(default=False)),
(
"ha_label",
models.CharField(
blank=True,
help_text=b"Label used for HA layer; human readable but unique",
max_length=64,
null=True,
),
),
("name", models.CharField(help_text=b"Name of ticket", max_length=64)),
(
"resource_controlled",
models.BooleanField(
default=True, help_text=b"Ticket is controlled by a resources named in `ha_label`"
),
),
("not_deleted", models.NullBooleanField(default=True)),
],
options={
"ordering": ["id"],
},
),
migrations.CreateModel(
name="FilesystemTicket",
fields=[
(
"ticket_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="chroma_core.Ticket",
),
),
(
"filesystem",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="chroma_core.ManagedFilesystem"),
),
],
options={
"ordering": ["id"],
},
bases=("chroma_core.ticket",),
),
migrations.CreateModel(
name="MasterTicket",
fields=[
(
"ticket_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="chroma_core.Ticket",
),
),
("mgs", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="chroma_core.ManagedMgs")),
],
options={
"ordering": ["id"],
},
bases=("chroma_core.ticket",),
),
migrations.AddField(
model_name="ticket",
name="content_type",
field=models.ForeignKey(
editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to="contenttypes.ContentType"
),
),
migrations.AddField(
model_name="revokegrantedticketjob",
name="ticket",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="chroma_core.Ticket"),
),
migrations.AddField(
model_name="grantrevokedticketjob",
name="ticket",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="chroma_core.Ticket"),
),
migrations.CreateModel(
name="ForgetTicketJob",
fields=[
(
"job_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="chroma_core.Job",
),
),
("old_state", models.CharField(max_length=32)),
("ticket", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="chroma_core.Ticket")),
],
options={
"ordering": ["id"],
},
bases=("chroma_core.job",),
),
]
| intel-hpdd/intel-manager-for-lustre | chroma_core/migrations/0010_tickets.py | Python | mit | 6,224 |
#import libraries
import pywaves as pw
from decimal import *
from Tkinter import *
import ttk
import tkMessageBox
import webbrowser
def callback(leaseId):
webbrowser.open_new(r"http://www.wavesgo.com/transactions.html?"+leaseId)
def dolease(myKey,myAmount):
msg=ttk.Label(mainframe, text="Broadcasting lease...")
msg.grid(columnspan=5, row=8, sticky=W)
msg.pack()
try:
myAddress = pw.Address(privateKey = myKey.get())
except:
tkMessageBox.showwarning("Private key incorrect","This key doesn't seem valid. Make sure you are entering your private key.")
msg.pack_forget()
return
try:
myAmount_send=int(Decimal(myAmount.get())*Decimal(100000000))
leaseId = myAddress.lease(minerAddress, myAmount_send)
#tkMessageBox.showwarning("About to send...",myAmount_send)
msg.pack_forget()
if leaseId:
tkMessageBox.showinfo("Yeah!","Lease successful")
successmsg="TX ID: "+str(leaseId)
ttk.Label(mainframe, text=successmsg).grid(columnspan=5, row=8, sticky=W)
ttk.Button(mainframe, text="See transaction in WavesGo", command= lambda: callback(leaseId)).grid(columnspan=5, row=9, sticky=W)
else:
tkMessageBox.showwarning("Uh oh","Something went wrong :(")
msg.pack_forget()
except:
tkMessageBox.showwarning("Uh oh","Something went very wrong :((")
msg.pack_forget()
return
#configure parameters
pw.setNode(node = 'http://node.wavesgo.com:6869', chain = 'mainnet')
minerAddress = pw.Address('3P2HNUd5VUPLMQkJmctTPEeeHumiPN2GkTb')
#UI Stuff
root = Tk()
root.title("WavesGo Leasing")
mainframe = ttk.Frame(root, padding="8 8 32 32")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
myKey = StringVar()
myAmount = StringVar()
leaseId = StringVar()
myKey = ttk.Entry(mainframe, width=46,textvariable = myKey)
myKey.grid(column=2,columnspan=2,row=5, sticky=(W, E))
myAmount_entry = ttk.Entry(mainframe, width=7, textvariable=myAmount)
myAmount_entry.grid(column=2, row=6, sticky=(W, E))
ttk.Label(mainframe, text="You can find your private key in the lite client inside the backup icon").grid(columnspan=4, row=1, sticky=W)
ttk.Label(mainframe, text="Your Private Key").grid(column=1, row=5, sticky=W)
ttk.Label(mainframe, text="Amount to lease").grid(column=1, row=6, sticky=E)
ttk.Label(mainframe, text="Waves").grid(column=3, row=6, sticky=W)
ttk.Button(mainframe, text="Lease", command= lambda: dolease(myKey,myAmount)).grid(column=2, row=7, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
myKey.focus()
root.bind('<Return>', lambda:dolease(myKey,myAmount))
root.mainloop()
| amichaelix/wavesgo-leasing | lease-gui.py | Python | mit | 2,665 |
"""
End-point to callback mapper
"""
__author__ = "Rinzler<github.com/feliphebueno>"
class RouteMapping(object):
"""
RouteMapping
"""
__routes = dict()
def __init__(self):
self.__routes = dict()
def get(self, route: str, callback: object()):
"""
Binds a GET route with the given callback
:rtype: object
"""
self.__set_route('get', {route: callback})
return RouteMapping
def post(self, route: str, callback: object()):
"""
Binds a POST route with the given callback
:rtype: object
"""
self.__set_route('post', {route: callback})
return RouteMapping
def put(self, route: str, callback: object()):
"""
Binds a PUT route with the given callback
:rtype: object
"""
self.__set_route('put', {route: callback})
return RouteMapping
def patch(self, route: str, callback: object()):
"""
Binds a PATCH route with the given callback
:rtype: object
"""
self.__set_route('patch', {route: callback})
return RouteMapping
def delete(self, route: str, callback: object()):
"""
Binds a PUT route with the given callback
:rtype: object
"""
self.__set_route('delete', {route: callback})
return RouteMapping
def head(self, route: str, callback: object()):
"""
Binds a HEAD route with the given callback
:rtype: object
"""
self.__set_route('head', {route: callback})
return RouteMapping
def options(self, route: str, callback: object()):
"""
Binds a OPTIONS route with the given callback
:rtype: object
"""
self.__set_route('options', {route: callback})
return RouteMapping
def __set_route(self, type_route, route):
"""
Sets the given type_route and route to the route mapping
:rtype: object
"""
if type_route in self.__routes:
if not self.verify_route_already_bound(type_route, route):
self.__routes[type_route].append(route)
else:
self.__routes[type_route] = [route]
return RouteMapping
def verify_route_already_bound(self, type_route: str, route: dict) -> bool:
"""
:param type_route: str
:param route: dict
:return: bool
"""
for bound_route in self.__routes[type_route]:
bound_key = list(bound_route.keys())[0]
route_key = list(route.keys())[0]
if bound_key == route_key:
return True
def get_routes(self):
"""
Gets the mapped routes
:rtype: dict
"""
return self.__routes
def flush_routes(self):
"""
:return: self\
"""
self.__routes = dict()
return self
| feliphebueno/Rinzler | rinzler/core/route_mapping.py | Python | mit | 2,934 |
from setuptools import setup
# Python 2.6 will error exiting nosetests via multiprocessing without this
# import, as arbitrary as it seems.
#
import multiprocessing # noqa
from pybutton import VERSION
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='pybutton',
version=VERSION,
description='Client for the Button API',
long_description=long_description,
author='Button',
author_email='[email protected]',
url='https://www.usebutton.com/developers/api-reference/',
packages=['pybutton', 'pybutton/resources'],
include_package_data=False,
license='MIT',
test_suite='nose.collector',
tests_require=['nose', 'mock', "flake8-quotes==2.1.0"],
zip_safe=True,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
| button/button-client-python | setup.py | Python | mit | 1,364 |
import numpy as np
from nd2reader import ND2Reader
from frapalyzer.errors import InvalidROIError
from scipy.optimize import least_squares
class FRAPalyzer(object):
"""
Analyze Nikon ND2 stimulation FRAP experiments automatically
"""
def __init__(self, nd2_filename):
self._file = ND2Reader(nd2_filename)
self._micron_per_pixel = self._file.metadata["pixel_microns"]
self.background_roi = self._get_roi('background')
self.reference_roi = self._get_roi('reference')
self.stimulation_roi = self._get_roi('stimulation')
self.bleach_time_index = self._get_bleach_time_index()
self.timesteps = self._get_timesteps()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._file:
self._file.close()
def _get_roi(self, roi_type='background'):
"""
Get the background ROI
:return:
"""
if 'rois' not in self._file.metadata:
return None
for roi in self._file.metadata['rois']:
if roi['type'] == roi_type:
return roi
return None
@property
def metadata(self):
return self._file.metadata
def get_normalized_stimulation(self):
"""
Get the normalized and reference corrected stimulation signal
:return:
"""
reference = self.get_mean_intensity(self.reference_roi, keep_time=True)
stimulated = self.get_mean_intensity(self.stimulation_roi, keep_time=True)
# before this index: pre-bleach scan, after: post-bleach
bleach_time_index = self.bleach_time_index
ref_pre_bleach = reference[:bleach_time_index]
stim_pre_bleach = stimulated[:bleach_time_index]
# normalize both with the pre-bleach scans
ref_norm = np.divide(reference, np.mean(ref_pre_bleach))
stim_norm = np.divide(stimulated, np.mean(stim_pre_bleach))
# correct stimulated signal for acquisition bleaching using the reference
corrected = np.divide(stim_norm, ref_norm)
return corrected
def fit_exponential_recovery(self):
"""
Fit an exponential recovery function
:return:
"""
data = self.get_normalized_stimulation()
bleach_time = self.bleach_time_index
timesteps = self.timesteps
# Everything after bleach
recovery_data = data[bleach_time:]
# Guess for recovery and half time
recovery = np.max(recovery_data)
half_time_index = np.argmin(np.abs(recovery_data - recovery / 2.0))
try:
half_time = timesteps[half_time_index]
except IndexError:
half_time = timesteps[bleach_time]
# Make least squares fit
def frap_fit_function(params, t, y):
ln_half = np.log(0.5)
return params[0] * (1 - np.exp(ln_half / params[1] * t)) - y
res_lsq = least_squares(frap_fit_function, (recovery, half_time), args=(timesteps[bleach_time:], recovery_data))
if res_lsq.success:
recovery = res_lsq.x[0]
half_time = res_lsq.x[1]
return recovery, half_time
def _get_bleach_time_index(self):
"""
Get the time index after which bleaching was performed
:return:
"""
current_index = 0
for loop in self._file.metadata['experiment']['loops']:
if loop['stimulation']:
return int(np.round(current_index))
current_index += loop['duration'] / loop['sampling_interval']
return int(np.round(current_index))
def _get_timesteps(self):
"""
Get the time index after which bleaching was performed
:return:
"""
timesteps = np.array([])
current_time = 0.0
for loop in self._file.metadata['experiment']['loops']:
if loop['stimulation']:
continue
timesteps = np.concatenate(
(timesteps, np.arange(current_time, current_time + loop['duration'], loop['sampling_interval'])))
current_time += loop['duration']
# if experiment did not finish, number of timesteps is wrong. Take correct amount of leading timesteps.
return timesteps[:self._file.metadata['num_frames']]
@staticmethod
def _check_roi(roi):
"""
Checks if this is a valid ROI
:param roi:
:return:
"""
if roi is None or 'shape' not in roi:
raise InvalidROIError('Invalid ROI specified')
def get_mean_intensity(self, roi, keep_time=False, subtract_background=True, only_gt_zero=True):
"""
Calculate the mean background intensity
:return:
"""
self._check_roi(roi)
image = self._get_slice_from_roi(roi)
if subtract_background:
background = self.get_mean_intensity(self.background_roi, keep_time=False, subtract_background=False,
only_gt_zero=True)
image = np.subtract(image, background)
if only_gt_zero:
image[np.isnan(image)] = -1
image[image <= 0] = np.nan
if keep_time:
return np.nanmean(np.nanmean(image, axis=2), axis=1)
else:
return np.nanmean(image)
def _to_pixel(self, micron):
return np.round(np.divide(micron, self._micron_per_pixel)).astype(np.int)
def _get_slice_from_roi(self, roi):
"""
Get the part of the image that is this ROI
:param roi:
:return:
"""
if roi['shape'] == 'circle':
image = self._get_circular_slice_from_roi(roi)
elif roi['shape'] == 'rectangle':
image = self._get_rectangular_slice_from_roi(roi)
else:
raise InvalidROIError('Only circular and rectangular ROIs are supported')
return image
def _get_circular_slice_from_roi(self, roi):
"""
Get mean intensity of a circular ROI
:param roi:
:return:
"""
center = self._to_pixel(roi["positions"][0])
radius = self._to_pixel(roi["sizes"][0, 0])
coordinates = np.round(np.add(np.repeat(center[0:2], 2), np.multiply(radius, np.tile([-1, 1], (2,)))))
rect = self._get_rect_from_images(coordinates.astype(np.int))
# Put NaNs on places that are not inside the circle
x, y = np.meshgrid(*map(np.arange, rect.shape[1:]), indexing='ij')
mask = ((x - radius) ** 2 + (y - radius) ** 2) > radius ** 2
rect[:, mask] = np.nan
return rect
def _get_rectangular_slice_from_roi(self, roi):
"""
Return a rectangular slice of the ROI
:param roi:
:return:
"""
coordinates = np.round(np.add(np.repeat(roi['positions'][0, 0:2], 2),
np.multiply(np.repeat(roi["sizes"][0, 0:2], 2), np.tile([-0.5, 0.5], (2,)))))
return self._get_rect_from_images(coordinates.astype(np.int))
def _get_rect_from_images(self, rect):
"""
Rect: (left, right, bottom, top)
:param rect:
:return:
"""
images = []
for t in range(self._file.sizes['t']):
image = self._file[int(t)][rect[2]:rect[3], rect[0]:rect[1]]
images.append(image)
return np.array(images, dtype=self._file.pixel_type)
| rbnvrw/FRAPalyzer | frapalyzer/frapalyzer.py | Python | mit | 7,514 |
from __future__ import absolute_import
import hashlib
from unittest import TestCase
import time
class TestHandle(TestCase):
def test_handle(self):
timestamp = str(time.time())
token = "weilaidav2017"
nonce = "123321"
list = [token, timestamp, nonce]
list.sort()
sha1 = hashlib.sha1()
map(sha1.update, list)
hashcode = sha1.hexdigest()
print list
print hashcode
def test_for(self):
future_class_attr = {'__name__': 'name', '_name_': '_name', "name": "name", "__start__": "start"}
attrs = ((name, value) for name, value in future_class_attr.items() if not name.startswith('__'))
for attr in attrs:
print attr
| tiantaozhang/wxmp | test/test_handle.py | Python | mit | 735 |
#!/usr/bin/python
from __future__ import division
import matplotlib.pyplot as plt
from pylab import savefig
from random import randint
from time import time
import sys
filelocation = "/Users/jack.sarick/Desktop/Program/Python/pi/"
filename = filelocation+"pianswer.txt"
temppoint = []
loopcounter = 0
k50, k10, k5 = 50000, 10000, 5000
looptime = sys.argv[1]
def makepi(loop):
global filelocation
global filename
counter = 0
#Starts timer for loop
looptime = time()
#Generates points
for i in range(k50):
temppoint = [randint(0, k10), randint(0, k10)]
if ((((temppoint[0]-k5)**2) + ((temppoint[1]-k5)**2)) <= k5**2):
plt.plot(temppoint[0], temppoint[1], 'bo')
counter += 1
else:
plt.plot(temppoint[0], temppoint[1], 'ro')
#Draws and saves file
plt.axis([0, k10, 0, k10])
savefig(filelocation + 'pi' + str(loop) + '.png', bbox_inches='tight')
#writes estimation and loop time to file
with open(filename,'ab') as f:
f.write(str((counter/k50)*4) + "," + str(time()-looptime) + "\n")
f.close()
#Runs makepi()
makepi(looptime) | jacksarick/My-Code | Python/pi/piguess.py | Python | mit | 1,059 |
__author__ = 'Алексей'
import codecs
from preparation.resources.synonyms import import_from_site
f = codecs.open('synonyms_raw.txt', mode='w', encoding='utf-8')
site = 'http://synonymonline.ru/'
import_from_site(site, f)
| hatbot-team/hatbot_resources | preparation/resources/synonyms/raw_data/extract_synonyms_raw.py | Python | mit | 232 |
import logging
import re
import socket
try:
import ssl as _ssl
_hush_pyflakes = [_ssl]
del _hush_pyflakes
except ImportError:
_ssl = None # No SSL support
from kitnirc.events import NUMERIC_EVENTS
from kitnirc.user import User
_log = logging.getLogger(__name__)
class Channel(object):
"""Information about an IRC channel.
This class keeps track of things like who is in a channel, the channel
topic, modes, and so on.
"""
def __init__(self, name):
self.name = name.lower()
self.topic = None
self.members = {}
self.modes = {}
def __str__(self):
return self.name
def __repr__(self):
return "kitnirc.client.Channel(%r)" % self.name
def add_user(self, user):
"""Adds a user to the channel."""
if not isinstance(user, User):
user = User(user)
if user.nick in self.members:
_log.warning("Ignoring request to add user '%s' to channel '%s' "
"because that user is already in the member list.",
user, self.name)
return
self.members[user.nick] = user
_log.debug("Added '%s' to channel '%s'", user, self.name)
def remove_user(self, user):
"""Removes a user from the channel."""
if not isinstance(user, User):
user = User(user)
if user.nick not in self.members:
_log.warning("Ignoring request to remove user '%s' from channel "
"'%s' because that user is already not in the member "
"list.", user, self.name)
return
del self.members[user.nick]
_log.debug("Removed '%s' from channel '%s'", user, self.name)
class Host(object):
"""Information about an IRC server.
This class keeps track of things like what channels a client is in,
who is in those channels, and other such details.
"""
def __init__(self, host, port):
self.host = host
# We also keep track of the host we originally connected to - e.g.
# if we connected to a round-robin alias.
self.original_host = host
self.port = port
self.password = None
self.motd = "" # The full text of the MOTD, once received
self._motd = [] # Receive buffer; do not use for reading
# Buffer for information from WHOIS response lines
self._whois = {}
# The channels we're in, keyed by channel name
self.channels = {}
# What features modes are available on the server
self.features = dict()
self.user_modes = set()
self.channel_modes = set()
# Miscellaneous information about the server
self.version = None
self.created = None
def __str__(self):
return self.host
def __repr__(self):
return "kitnirc.client.Host(%r, %r)" % (self.host, self.port)
def add_channel(self, channel):
if not isinstance(channel, Channel):
channel = Channel(channel)
if channel.name in self.channels:
_log.warning("Ignoring request to add a channel that has already "
"been added: '%s'", channel)
return
self.channels[channel.name] = channel
_log.info("Entered channel %s.", channel)
def remove_channel(self, channel):
if isinstance(channel, Channel):
channel = channel.name
channel = channel.lower()
if channel not in self.channels:
_log.warning("Ignoring request to remove a channel that hasn't "
"been added: '%s'", channel)
return
del self.channels[channel]
_log.info("Left channel %s.", channel)
def get_channel(self, channel):
if isinstance(channel, Channel):
channel = channel.name
channel = channel.lower()
if channel not in self.channels:
_log.warning("Ignoring request to get a channel that hasn't "
"been added: '%s'", channel)
return None
return self.channels[channel]
def in_channel(self, channel):
channel = str(channel).lower()
return channel in self.channels
class Client(object):
"""An IRC client.
This class wraps a connection to a single IRC network and provides
additional functionality (e.g. tracking of nicks and channels).
"""
def __init__(self, host=None, port=6667):
if host:
self.server = Host(host, port)
else:
self.server = None
self.connected = False
self.socket = None
self._stop = False
self._buffer = ""
# Queues for event dispatching.
self.event_handlers = {
###### CONNECTION-LEVEL EVENTS ######
# Fires while the client is connecting, when a password should be
# supplied. If nothing supplies a password, the password argument
# of connect() will be used (if set).
"PASSWORD": [],
# Fires after the client's socket connects.
"CONNECTED": [on_connect],
# Fires every time a line is received
"LINE": [on_line],
# Fires whenever a line isn't handled by LINE
"RAWLINE": [],
# Fires whenever we see incoming network activity
"ACTIVITY": [],
###### IRC-LEVEL EVENTS ######
# Fires when receiving the 001 RPL_WELCOME message upon
# being recognized as a valid user by the IRC server.
"WELCOME": [],
# Fires when a privmsg is received
"PRIVMSG": [], # actor, recipient
# Fires when a notice is received
"NOTICE": [],
# Fires when a complete MOTD is received
"MOTD": [],
# Fires when a user joins a channel
"JOIN": [],
# Fires when a user parts a channel
"PART": [],
# Fires when a user quits the server
"QUIT": [],
# Fires when a user is kicked from a channel
"KICK": [],
# Fires when the list of users in a channel has been updated
"MEMBERS": [],
# Fires whenever a mode change occurs
"MODE": [],
# Fires when a WHOIS response is complete
"WHOIS": [],
# Fires when a channel topic changes
"TOPIC": [],
# Fires when someone invites us to a channel
"INVITE": [],
}
def add_handler(self, event, handler):
"""Adds a handler for a particular event.
Handlers are appended to the list, so a handler added earlier
will be called before a handler added later. If you wish to
insert a handler at another position, you should modify the
event_handlers property directly:
my_client.event_handlers['PRIVMSG'].insert(0, my_handler)
"""
if event not in self.event_handlers:
_log.info("Adding event handler for new event %s.", event)
self.event_handlers[event] = [handler]
else:
self.event_handlers[event].append(handler)
def dispatch_event(self, event, *args):
"""Dispatches an event.
Returns a boolean indicating whether or not a handler
suppressed further handling of the event (even the last).
"""
if event not in self.event_handlers:
_log.error("Dispatch requested for unknown event '%s'", event)
return False
elif event != "LINE":
_log.debug("Dispatching event %s %r", event, args)
try:
for handler in self.event_handlers[event]:
# (client, server, *args) : args are dependent on event
if handler(self, *args):
# Returning a truthy value supresses further handlers
# for this event.
return True
except Exception as e:
_log.exception("Error while processing event '%s': %r", event, e)
# Fall back to the RAWLINE event if LINE can't process it.
if event == "LINE":
return self.dispatch_event("RAWLINE", *args)
return False
def connect(self, nick, username=None, realname=None, password=None,
host=None, port=6667, ssl=None):
"""Connect to the server using the specified credentials.
Note: if host is specified here, both the host and port arguments
passed to Client.__init__ will be ignored.
If the 'ssl' argument is boolean true, will use SSL. If it is a
dictionary, will both use SSL and pass the contents as kwargs to
the ssl.wrap_socket() call.
"""
if host:
self.server = Host(host, port)
if self.server is None:
_log.error("Can't connect() without a host specified.")
return
self.user = User(nick)
self.user.username = username or nick
self.user.realname = realname or username or nick
_log.info("Connecting to %s as %s ...", self.server.host, nick)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if ssl and _ssl:
ssl_kwargs = ssl if isinstance(ssl, dict) else {}
self.socket = _ssl.wrap_socket(self.socket, **ssl_kwargs)
elif ssl:
_log.error("SSL requested but no SSL support available!")
return
self.socket.connect((self.server.host, self.server.port))
self.connected = True
_log.info("Connected to %s.", self.server.host)
# Allow an event handler to supply a password instead, if it wants
suppress_password = self.dispatch_event("PASSWORD")
if password and not suppress_password:
# We bypass our own send() function here to avoid logging passwords
_log.info("Sending server password.")
self.socket.send("PASS %s\r\n" % password)
self.server.password = password
self.dispatch_event('CONNECTED')
def disconnect(self, msg="Shutting down..."):
if not self.connected:
_log.warning("Disconnect requested from non-connected client (%s)",
self.server.host)
return
_log.info("Disconnecting from %s ...", self.server.host)
self._stop = True
self.send("QUIT", ":" + msg)
try:
self.socket.close()
except socket.error:
pass
def run(self):
"""Process events such as incoming data.
This method blocks indefinitely. It will only return after the
connection to the server is closed.
"""
self._stop = False # Allow re-starting the event loop
while not self._stop:
try:
self._buffer += self.socket.recv(4096)
except socket.error:
raise
lines = self._buffer.split("\n")
self._buffer = lines.pop() # Last line may not have been fully read
for line in lines:
line = line.rstrip("\r")
_log.debug("%s --> %s", self.server.host, line)
self.dispatch_event("LINE", line)
self.dispatch_event("ACTIVITY")
def ping(self):
"Convenience method to send a PING to server"
self.send("PING " + self.server.host)
def send(self, *args):
"""Sends a single raw message to the IRC server.
Arguments are automatically joined by spaces. No newlines are allowed.
"""
msg = " ".join(a.nick if isinstance(a, User) else str(a) for a in args)
if "\n" in msg:
raise ValueError("Cannot send() a newline. Args: %s" % repr(args))
_log.debug("%s <-- %s", self.server.host, msg)
self.socket.send(msg + "\r\n")
def nick(self, nick):
"""Attempt to set the nickname for this connection."""
_log.info("Requesting nick change to '%s'", nick)
self.send("NICK", nick)
def userinfo(self, username, realname=None):
"""Set the username and realname for this connection.
Note: this should only be called once, on connect. (The default
on-connect routine calls this automatically.)
"""
realname = realname or username
_log.info("Requesting user info update: username=%s realname=%s",
username, realname)
self.send("USER", username, socket.getfqdn(), self.server.host,
":%s" % realname) # Realname should always be prefixed by a colon
self.user.username = username
self.user.realname = realname
def msg(self, target, message):
"""Send a message to a user or channel."""
self.send("PRIVMSG", target, ":" + message)
def reply(self, incoming, user, message, prefix=None):
"""Replies to a user in a given channel or PM.
If the specified incoming is a user, simply sends a PM to user.
If the specified incoming is a channel, prefixes the message with the
user's nick and sends it to the channel.
This is specifically useful in creating responses to commands that can
be used in either a channel or in a PM, and responding to the person
who invoked the command.
"""
if not isinstance(user, User):
user = User(user)
if isinstance(incoming, User):
if prefix:
self.msg(user, "%s: %s" % (user.nick, message))
else:
self.msg(user, message)
else:
if prefix is not False:
self.msg(incoming, "%s: %s" % (user.nick, message))
else:
self.msg(incoming, message)
def notice(self, target, message):
"""Send a NOTICE to a user or channel."""
self.send("NOTICE", target, ":" + message)
def topic(self, target, message):
"""Sets TOPIC for a channel."""
self.send("TOPIC", target, ":" + message)
def ctcp(self, target, message):
"""Send a CTCP message to a user or channel."""
self.msg(target, "\x01%s\x01" % message)
def emote(self, target, message):
"""Sends an emote (/me ...) to a user or channel."""
self.ctcp(target, "ACTION %s" % message)
def join(self, target, key=None):
"""Attempt to join a channel.
The optional second argument is the channel key, if needed.
"""
chantypes = self.server.features.get("CHANTYPES", "#")
if not target or target[0] not in chantypes:
# Among other things, this prevents accidentally sending the
# "JOIN 0" command which actually removes you from all channels
_log.warning("Refusing to join channel that does not start "
"with one of '%s': %s", chantypes, target)
return False
if self.server.in_channel(target):
_log.warning("Ignoring request to join channel '%s' because we "
"are already in that channel.", target)
return False
_log.info("Joining channel %s ...", target)
self.send("JOIN", target, *([key] if key else []))
return True
def invite(self, channel, nick):
"""Attempt to invite a user to a channel."""
self.send("INVITE", nick, channel)
def part(self, target, message=None):
"""Part a channel."""
if not self.server.in_channel(target):
_log.warning("Ignoring request to part channel '%s' because we "
"are not in that channel.", target)
return
return False
self.send("PART", target, *([message] if message else []))
return True
def quit(self, message=None):
"""Quit the server (and stop the event loop).
This actually just calls .disconnect() with the provided message."""
self.disconnect(message or "Bye")
def kick(self, channel, nick, message=None):
"""Attempt to kick a user from a channel.
If a message is not provided, defaults to own nick.
"""
self.send("KICK", channel, nick, ":%s" % (message or self.user.nick))
def whois(self, nick):
"""Request WHOIS information about a user."""
self.send("WHOIS", nick)
def mode(self, channel, add='', remove=''):
"""Add and/or remove modes for a given channel.
The 'add' and 'remove' arguments may, if specified, be either
sequences or dictionaries. If a dictionary is specified, the
corresponding values will be passed as arguments (with expansion
if necessary - {'b': ['foo','bar']} will result in two bans:
MODE <channel> +bb foo bar
(Values for modes which do not take arguments are ignored.)
"""
if not self.server.in_channel(channel):
_log.warning("Ignoring request to set modes in channel '%s' "
"because we are not in that channel.", channel)
return
chanmodes = self._get_chanmodes()
list_modes, always_arg_modes, set_arg_modes, toggle_modes = chanmodes
# User privilege levels are not always included in channel modes list
always_arg_modes |= set(self._get_prefixes().itervalues())
def _arg_to_list(arg, argument_modes, toggle_modes):
if not isinstance(arg, dict):
modes = set(arg)
invalid_modes = modes - toggle_modes
if invalid_modes:
_log.warning("Ignoring the mode(s) '%s' because they are "
"missing required arguments.",
"".join(invalid_modes))
return modes & toggle_modes, []
# Okay, so arg is a dict
modes_with_args = []
modes_without_args = set()
for k,v in arg.iteritems():
if isinstance(v, str):
v = [v]
if k in argument_modes:
for val in v:
modes_with_args.append((k,val))
elif k in toggle_modes:
modes_without_args.add(k)
else:
_log.warning("Ignoring request to set channel mode '%s' "
"because it is not a recognized mode.", k)
return modes_without_args, modes_with_args
add_modes, add_modes_args = _arg_to_list(
add, list_modes | always_arg_modes | set_arg_modes, toggle_modes)
remove_modes, remove_modes_args = _arg_to_list(
remove, list_modes | always_arg_modes, set_arg_modes | toggle_modes)
max_arg = self.server.features.get("MODES") or 3
def _send_modes(op, toggle_modes, arg_modes):
while toggle_modes or arg_modes:
modes = "".join(toggle_modes)
toggle_modes = ""
now_modes, arg_modes = arg_modes[:max_arg], arg_modes[max_arg:]
modes += "".join(mode for mode,arg in now_modes)
modes += "".join(" %s" % arg for mode,arg in now_modes)
self.send("MODE", channel, "%s%s" % (op, modes))
_send_modes("+", add_modes, add_modes_args)
_send_modes("-", remove_modes, remove_modes_args)
def handle(self, event):
"""Decorator for adding a handler function for a particular event.
Usage:
my_client = Client()
@my_client.handle("WELCOME")
def welcome_handler(client, *params):
# Do something with the event.
pass
"""
def dec(func):
self.add_handler(event, func)
return func
return dec
def _get_prefixes(self):
"""Get the possible nick prefixes and associated modes for a client."""
prefixes = {
"@": "o",
"+": "v",
}
feature_prefixes = self.server.features.get('PREFIX')
if feature_prefixes:
modes = feature_prefixes[1:len(feature_prefixes)//2]
symbols = feature_prefixes[len(feature_prefixes)//2+1:]
prefixes = dict(zip(symbols, modes))
return prefixes
def _get_chanmodes(self):
chanmodes = self.server.features.get('CHANMODES')
if not chanmodes:
# Defaults from RFC 2811
list_modes = set("beI")
always_arg_modes = set()
set_arg_modes = set("kl")
toggle_modes = set("aimnqpsrt")
else:
chanmodes = chanmodes.split(",")
list_modes = set(chanmodes[0])
always_arg_modes = set(chanmodes[1])
set_arg_modes = set(chanmodes[2])
toggle_modes = set(chanmodes[3])
return list_modes, always_arg_modes, set_arg_modes, toggle_modes
################################################################################
# DEFAULT LOW-LEVEL EVENT HANDLERS
################################################################################
def on_connect(client):
"""Default on-connect actions."""
client.nick(client.user.nick)
client.userinfo(client.user.username, client.user.realname)
def on_line(client, line):
"""Default handling for incoming lines.
This handler will automatically manage the following IRC messages:
PING:
Responds with a PONG.
PRIVMSG:
Dispatches the PRIVMSG event.
NOTICE:
Dispatches the NOTICE event.
MOTDSTART:
Initializes MOTD receive buffer.
MOTD:
Appends a line to the MOTD receive buffer.
ENDOFMOTD:
Joins the contents of the MOTD receive buffer, assigns the result
to the .motd of the server, and dispatches the MOTD event.
"""
if line.startswith("PING"):
client.send("PONG" + line[4:])
return True
if line.startswith(":"):
actor, _, line = line[1:].partition(" ")
else:
actor = None
command, _, args = line.partition(" ")
command = NUMERIC_EVENTS.get(command, command)
parser = PARSERS.get(command, False)
if parser:
parser(client, command, actor, args)
return True
elif parser is False:
# Explicitly ignored message
return True
################################################################################
# COMMAND PARSERS
################################################################################
# Holds a mapping of IRC commands to functions that will parse them and
# take any necessary action. We define some ignored events here as well.
PARSERS = {
"YOURHOST": False,
}
def parser(*events):
"""Decorator for convenience - adds a function as a parser for event(s)."""
def dec(func):
for event in events:
PARSERS[event] = func
return func
return dec
@parser("PRIVMSG", "NOTICE")
def _parse_msg(client, command, actor, args):
"""Parse a PRIVMSG or NOTICE and dispatch the corresponding event."""
recipient, _, message = args.partition(' :')
chantypes = client.server.features.get("CHANTYPES", "#")
if recipient[0] in chantypes:
recipient = client.server.get_channel(recipient) or recipient.lower()
else:
recipient = User(recipient)
client.dispatch_event(command, actor, recipient, message)
@parser("MOTDSTART", "ENDOFMOTD", "MOTD")
def _parse_motd(client, command, actor, args):
if command == "MOTDSTART":
client.server._motd = []
if command == "ENDOFMOTD":
client.server.motd = "\n".join(client.server._motd)
client.dispatch_event("MOTD", client.server.motd)
if command == "MOTD": # MOTD line
client.server._motd.append(args.partition(":")[2])
@parser("JOIN")
def _parse_join(client, command, actor, args):
"""Parse a JOIN and update channel states, then dispatch events.
Note that two events are dispatched here:
- JOIN, because a user joined the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
channel = args.lstrip(' :').lower()
if actor.nick == client.user.nick:
client.server.add_channel(channel)
client.user.host = actor.host # now we know our host per the server
channel = client.server.get_channel(channel)
channel.add_user(actor)
client.dispatch_event("JOIN", actor, channel)
if actor.nick != client.user.nick:
# If this is us joining, the namreply will trigger this instead
client.dispatch_event("MEMBERS", channel)
@parser("PART")
def _parse_part(client, command, actor, args):
"""Parse a PART and update channel states, then dispatch events.
Note that two events are dispatched here:
- PART, because a user parted the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
channel, _, message = args.partition(' :')
channel = client.server.get_channel(channel)
channel.remove_user(actor)
if actor.nick == client.user.nick:
client.server.remove_channel(channel)
client.dispatch_event("PART", actor, channel, message)
if actor.nick != client.user.nick:
client.dispatch_event("MEMBERS", channel)
@parser("QUIT")
def _parse_quit(client, command, actor, args):
"""Parse a QUIT and update channel states, then dispatch events.
Note that two events are dispatched here:
- QUIT, because a user quit the server
- MEMBERS, for each channel the user is no longer in
"""
actor = User(actor)
_, _, message = args.partition(':')
client.dispatch_event("QUIT", actor, message)
for chan in client.server.channels.itervalues():
if actor.nick in chan.members:
chan.remove_user(actor)
client.dispatch_event("MEMBERS", chan)
@parser("KICK")
def _parse_kick(client, command, actor, args):
"""Parse a KICK and update channel states, then dispatch events.
Note that two events are dispatched here:
- KICK, because a user was kicked from the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
args, _, message = args.partition(' :')
channel, target = args.split()
channel = client.server.get_channel(channel)
channel.remove_user(target)
target = User(target)
if target.nick == client.user.nick:
client.server.remove_channel(channel)
client.dispatch_event("KICK", actor, target, channel, message)
client.dispatch_event("MEMBERS", channel)
@parser("TOPIC")
def _parse_topic(client, command, actor, args):
"""Parse a TOPIC and update channel state, then dispatch a TOPIC event."""
channel, _, topic = args.partition(" :")
channel = client.server.get_channel(channel)
channel.topic = topic or None
if actor:
actor = User(actor)
client.dispatch_event("TOPIC", actor, channel, topic)
@parser("WELCOME")
def _parse_welcome(client, command, actor, args):
"""Parse a WELCOME and update user state, then dispatch a WELCOME event."""
_, _, hostmask = args.rpartition(' ')
client.user.update_from_hostmask(hostmask)
client.dispatch_event("WELCOME", hostmask)
@parser("CREATED")
def _parse_created(client, command, actor, args):
"""Parse CREATED and update the Host object."""
m = re.search("This server was created (.+)$", args)
if m:
client.server.created = m.group(1)
@parser("MYINFO")
def _parse_myinfo(client, command, actor, args):
"""Parse MYINFO and update the Host object."""
_, server, version, usermodes, channelmodes = args.split(None, 5)[:5]
s = client.server
s.host = server
s.version = version
s.user_modes = set(usermodes)
s.channel_modes = set(channelmodes)
@parser("FEATURELIST")
def _parse_featurelist(client, command, actor, args):
"""Parse FEATURELIST and update the Host object."""
# Strip off ":are supported by this server"
args = args.rsplit(":", 1)[0]
# Strip off the nick; we know it's addressed to us.
_, _, args = args.partition(' ')
items = args.split()
for item in items:
feature, _, value = item.partition("=")
# Convert integer values to actual integers for convenience
try:
value = int(value)
except (ValueError, TypeError):
pass
client.server.features[feature] = value
@parser("NAMREPLY")
def _parse_namreply(client, command, actor, args):
"""Parse NAMREPLY and update a Channel object."""
prefixes = client._get_prefixes()
channelinfo, _, useritems = args.partition(' :')
_, _, channel = channelinfo.rpartition(' ') # channeltype channelname
c = client.server.get_channel(channel)
if not c:
_log.warning("Ignoring NAMREPLY for channel '%s' which we are not in.",
channel)
return
# We bypass Channel.add_user() here because we just want to sync in any
# users we don't already have, regardless of if other users exist, and
# we don't want the warning spam.
for nick in useritems.split():
modes = set()
while nick[0] in prefixes:
modes.add(prefixes[nick[0]])
nick = nick[1:]
user = c.members.get(nick)
if not user:
user = c.members[nick] = User(nick)
_log.debug("Added user %s to channel %s", user, channel)
user.modes |= modes
@parser("ENDOFNAMES")
def _parse_endofnames(client, command, actor, args):
"""Parse an ENDOFNAMES and dispatch a NAMES event for the channel."""
args = args.split(" :", 1)[0] # Strip off human-readable message
_, _, channel = args.rpartition(' ')
channel = client.server.get_channel(channel) or channel.lower()
client.dispatch_event('MEMBERS', channel)
@parser("MODE")
def _parse_mode(client, command, actor, args):
"""Parse a mode changes, update states, and dispatch MODE events."""
chantypes = client.server.features.get("CHANTYPES", "#")
channel, _, args = args.partition(" ")
args = args.lstrip(":")
if channel[0] not in chantypes:
# Personal modes
for modes in args.split():
op, modes = modes[0], modes[1:]
for mode in modes:
if op == "+":
client.user.modes.add(mode)
else:
client.user.modes.discard(mode)
client.dispatch_event("MODE", actor, client.user, op, mode, None)
return
# channel-specific modes
chan = client.server.get_channel(channel)
user_modes = set(client._get_prefixes().itervalues())
chanmodes = client._get_chanmodes()
list_modes, always_arg_modes, set_arg_modes, toggle_modes = chanmodes
argument_modes = list_modes | always_arg_modes | set_arg_modes
tokens = args.split()
while tokens:
modes, tokens = tokens[0], tokens[1:]
op, modes = modes[0], modes[1:]
for mode in modes:
argument = None
if mode in (user_modes | argument_modes):
argument, tokens = tokens[0], tokens[1:]
if mode in user_modes:
user = client.server.get_channel(channel).members[argument]
if op == "+":
user.modes.add(mode)
else:
user.modes.discard(mode)
if op == "+":
if mode in (always_arg_modes | set_arg_modes):
chan.modes[mode] = argument
elif mode in toggle_modes:
chan.modes[mode] = True
else:
if mode in (always_arg_modes | set_arg_modes | toggle_modes):
if mode in chan.modes:
del chan.modes[mode]
# list-type modes (bans+exceptions, invite masks) aren't stored,
# but do generate MODE events.
client.dispatch_event("MODE", actor, chan, op, mode, argument)
@parser("WHOISUSER", "WHOISCHANNELS", "WHOISIDLE", "WHOISSERVER",
"WHOISOPERATOR", "WHOISACCOUNT", "WHOISBOT", "WHOISREGNICK",
"ENDOFWHOIS")
def _parse_whois(client, command, actor, args):
"""Parse the content responses from a WHOIS query.
Individual response lines are parsed and used to fill in data in a buffer,
the full contents of which are then sent as the argument to the WHOIS
event dispatched when an ENDOFWHOIS line is received from the server.
"""
_, _, args = args.partition(" ") # Strip off recipient, we know it"s us
nick, _, args = args.partition(" ")
if client.server._whois.get("nick") != nick:
client.server._whois = {"nick": nick}
response = client.server._whois
if command == "WHOISUSER":
first, _, response["realname"] = args.partition(":")
response["username"], response["host"] = first.split()[:2]
return
if command == "WHOISISSERVER":
response["server"], _, response["serverinfo"] = args.partition(" :")
return
if command == "WHOISOPERATOR":
response["oper"] = True
return
if command == "WHOISIDLE":
response["idle"], _, _ = args.partition(" :")
response["idle"] = int(response["idle"])
return
if command == "WHOISCHANNELS":
modes = "".join(client._get_prefixes())
print repr(modes)
channels = args.lstrip(":").split()
response["channels"] = dict(
(chan.lstrip(modes), chan[0] if chan[0] in modes else "")
for chan in channels)
return
if command == "WHOISACCOUNT":
response["account"], _, _ = args.partition(" :")
return
if command == "WHOISBOT":
response["bot"] = True
return
if command == "WHOISREGNICK":
response["registered"] = True
return
if command == "ENDOFWHOIS":
client.dispatch_event("WHOIS", response)
@parser("NICK")
def _parse_nick(client, command, actor, args):
"""Parse a NICK response, update state, and dispatch events.
Note: this function dispatches both a NICK event and also one or more
MEMBERS events for each channel the user that changed nick was in.
"""
old_nick, _, _ = actor.partition('!')
new_nick = args
if old_nick == client.user.nick:
client.user.nick = new_nick
modified_channels = set()
for channel in client.server.channels.itervalues():
user = channel.members.get(old_nick)
if user:
user.nick = new_nick
channel.members[new_nick] = user
del channel.members[old_nick]
modified_channels.add(channel.name)
client.dispatch_event("NICK", old_nick, new_nick)
for channel in modified_channels:
client.dispatch_event("MEMBERS", channel)
@parser("INVITE")
def _parse_invite(client, command, actor, args):
"""Parse an INVITE and dispatch an event."""
target, _, channel = args.rpartition(" ")
client.dispatch_event("INVITE", actor, target, channel.lower())
@parser("NICKNAMEINUSE")
def _parse_nicknameinuse(client, command, actor, args):
"""Parse a NICKNAMEINUSE message and dispatch an event.
The parameter passed along with the event is the nickname
which is already in use.
"""
nick, _, _ = args.rpartition(" ")
client.dispatch_event("NICKNAMEINUSE", nick)
# vim: set ts=4 sts=4 sw=4 et:
| ayust/kitnirc | kitnirc/client.py | Python | mit | 35,529 |
"""
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'hrzeqwz0@nps2#ns3_qkqz*#5=)1bxcdwa*h__hta0f1bqr2e!'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_DIRS = ("tests/templates", )
INSTALLED_APPS = (
'django_nose',
'django.contrib.contenttypes',
'django.contrib.auth'
)
for dir in os.listdir("tests/apps"):
if os.path.isfile("tests/apps/%s/urls.py" % dir):
INSTALLED_APPS += ( "tests.apps.%s" % dir, )
MIDDLEWARE_CLASSES = (
)
ROOT_URLCONF = 'tests.urls'
WSGI_APPLICATION = 'tests.wsgi.application'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
| alisaifee/djlimiter | tests/settings.py | Python | mit | 865 |
"""
Meraki Config Templates API Resource
"""
from .meraki_api_resource import MerakiAPIResource
class ConfigTemplates(MerakiAPIResource):
""" Meraki Config Templates API Resource. """
resource = "configTemplates"
def __init__(self, key, prefix=None, resource_id=None):
MerakiAPIResource.__init__(self, key, prefix, resource_id)
| guzmonne/meraki_api | meraki_api/config_templates.py | Python | mit | 352 |
# -*- coding: utf-8 -*-
import scrapy
class QasneteaseSpider(scrapy.Spider):
name = "QASnetease"
allowed_domains = ["163.com"]
start_urls = ['http://163.com/']
def parse(self, response):
pass
| lijiabogithub/QUANTAXIS | QUANTAXIS/QASpider/QAS/QAS/spiders/QASnetease.py | Python | mit | 219 |
from typing import Union
from merakicommons.cache import lazy_property
from ..data import Region, Platform
from .common import CoreData, CassiopeiaGhost, ghost_load_on
from .summoner import Summoner
##############
# Data Types #
##############
class VerificationStringData(CoreData):
_renamed = {}
##############
# Core Types #
##############
class VerificationString(CassiopeiaGhost):
_data_types = {VerificationStringData}
def __init__(self, summoner: Summoner, region: Union[Region, str]):
self.__summoner = summoner
kwargs = {"region": region}
super().__init__(**kwargs)
def __get_query__(self):
return {"region": self.region, "summoner.id": self.summoner.id}
@classmethod
def __get_query_from_kwargs__(
cls, *, summoner: Summoner, region: Union[Region, str]
) -> dict:
return {"region": region, "summoner.id": summoner.id}
@lazy_property
def region(self) -> Region:
return Region(self._data[VerificationStringData].region)
@lazy_property
def platform(self) -> Platform:
return self.region.platform
@CassiopeiaGhost.property(VerificationStringData)
@ghost_load_on
def string(self) -> str:
return self._data[VerificationStringData].string
@property
def summoner(self):
return self.__summoner
| meraki-analytics/cassiopeia | cassiopeia/core/thirdpartycode.py | Python | mit | 1,356 |
#!/usr/bin/env python3
import argparse
import datetime
from html.parser import HTMLParser
import http.client
import sys
class TopListHTMLParser(HTMLParser):
def __init__(self):
super().__init__()
self.state = 'ready'
self.tmp_url = None
self.tmp_title = None
self.tmp_date = None
self.titles = []
def handle_starttag(self, tag, attrs):
if self.state == 'ready' and tag == 'td':
attrs = dict(attrs)
if 'titleColumn' in attrs.get('class', []):
self.state = 'title-found'
elif self.state == 'title-found' and tag == 'a':
attrs = dict(attrs)
self.tmp_url = attrs.get('href', '<<INVALID_ANCHOR_LINK>>')
idx = self.tmp_url.rfind('/?')
if idx > -1:
self.tmp_url = self.tmp_url[:idx]
self.state = 'link-found'
elif self.state in ['title-found', 'link-found'] and tag == 'span':
attrs = dict(attrs)
if attrs.get('name', 'not-interested') == 'rd':
self.tmp_date = attrs.get('data-value', '?')
if self.tmp_date != '?':
try:
self.tmp_date = datetime.date(*[int(x) for x in self.tmp_date.split('-')])
except ValueError:
print('Invalid date value: {0}'.format(self.tmp_date))
def handle_endtag(self, tag):
if self.state == 'link-found' and tag == 'a':
self.state = 'title-found'
elif self.state == 'title-found' and tag == 'td':
if self.tmp_url and self.tmp_title and self.tmp_title:
self.titles.append((self.tmp_title, self.tmp_date, self.tmp_url))
self.tmp_url = None
self.tmp_title = None
self.tmp_date = None
else:
print('malformed entry {0} {1} {2}'.format(self.tmp_title, self.tmp_url, self.tmp_date))
self.state = 'ready'
def handle_data(self, data):
if self.state == 'link-found':
self.tmp_title = data
class CastHTMLParser(HTMLParser):
def __init__(self):
super().__init__()
self.state = 'ready'
self.tmp_name = None
self.actors = []
def handle_starttag(self, tag, attrs):
if self.state == 'ready' and tag == 'table':
attrs = dict(attrs)
if 'cast_list' in attrs.get('class', []):
self.state = 'list-found'
elif self.state == 'list-found' and tag == 'td':
attrs = dict(attrs)
if 'itemprop' in attrs.get('class', []) and 'actor' == attrs.get('itemprop', None):
self.state = 'actor-found'
elif self.state == 'actor-found' and tag == 'span':
attrs = dict(attrs)
if 'name' == attrs.get('itemprop', None):
self.state = 'actor-name-found'
def handle_endtag(self, tag):
if self.state == 'actor-name-found' and tag == 'span':
if self.tmp_name:
self.actors.append(self.tmp_name)
self.tmp_name = None
self.state = 'ready'
def handle_data(self, data):
if self.state == 'actor-name-found':
self.tmp_name = data
def fetch_cast(title):
conn = http.client.HTTPConnection('www.imdb.com')
conn.request('GET', title + '/fullcredits')
r1 = conn.getresponse()
if r1.status != 200:
return []
parser = CastHTMLParser()
parser.feed(r1.read().decode('utf-8'))
return parser.actors
def fetch_top_actors(start_year=None, end_year=None):
conn = http.client.HTTPConnection('www.imdb.com')
conn.request('GET', '/chart/top')
r1 = conn.getresponse()
if r1.status != 200:
return []
parser = TopListHTMLParser()
parser.feed(r1.read().decode('utf-8'))
titles = parser.titles
if start_year:
start_year = datetime.date(start_year, 1, 1)
if end_year:
end_year = datetime.date(end_year + 1, 1, 1)
else:
end_year = datetime.date(datetime.MAXYEAR, 1, 1)
titles = [(title, date, url) for (title, date, url) in titles if date >= start_year and date < end_year]
actors = {}
print('Working', end='', file=sys.stderr, flush=True)
for title, _, url in titles:
cast = fetch_cast(url)
for actor in cast:
actors[actor] = actors.setdefault(actor, 0) + 1
print('.', end='', file=sys.stderr, flush=True)
print('', file=sys.stderr, flush=True)
return [(count, name) for (count, name) in reversed(sorted([(count, name) for (name, count) in actors.items()]))]
def main():
parser = argparse.ArgumentParser(description='Find top actors.')
parser.add_argument('-b', '--start', dest='start', type=int, default=0, help='start year')
parser.add_argument('-e', '--end', dest='end', type=int, default=0, help='end year (inclusive)')
parser.add_argument('-n', '--limit', dest='limit', type=int, default=0, help='max number of actors to display')
args = parser.parse_args()
actors = fetch_top_actors(args.start, args.end)
if args.limit:
actors = actors[:args.limit]
print('Number of Movies\tName')
for count, name in actors:
print('\t{0}\t\t{1}'.format(count, name))
if __name__ == '__main__':
main()
| asgeir/old-school-projects | python/verkefni5/popular/popular.py | Python | mit | 5,406 |
# coding: utf-8
from __future__ import unicode_literals
from django.core.urlresolvers import reverse_lazy
from django.db.models.loading import get_model
from django.test import TestCase
import json
class ViewsTests(TestCase):
fixtures = ['test_data']
def test_task_view_200(self):
response = self.client.get(reverse_lazy('TestTask'))
self.assertTrue('models' in response.context)
self.assertTrue(len(response.context['models']), 3)
self.assertEqual(response.status_code, 200)
def test_get_model_data_200(self):
response = self.client.get(reverse_lazy('ModelData', kwargs={'model_name': 'HobbiesDynamicModel'}))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertTrue('fields' in data)
self.assertTrue('qs' in data)
self.assertEqual(len(data['fields']), len(data['qs'][0]))
def test_get_model_data_400(self):
response = self.client.get(reverse_lazy('ModelData', kwargs={'model_name': 'SomeModel'}))
data = json.loads(response.content)
self.assertEqual(response.status_code, 400)
self.assertTrue('error' in data)
self.assertEqual(data['error'], "App 'testtask' doesn't have a 'somemodel' model.")
def test_update_model_data_200(self):
data = 'field={}&id={}&data={}'.format('title', 1, 'Test')
response = self.client.put(reverse_lazy('ModelData', kwargs={'model_name': 'HobbiesDynamicModel'}), data=data)
status = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertTrue('status' in status)
self.assertEqual(status['status'], 'ok')
def test_update_model_data_200_error(self):
data = 'field={}&id={}&data={}'.format('date_joined', 1, 'dummy')
response = self.client.put(reverse_lazy('ModelData', kwargs={'model_name': 'UsersDynamicModel'}), data=data)
status = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertTrue('status' in status)
self.assertEqual(status['status'], 'error')
self.assertTrue('message' in status)
def test_update_model_data_400(self):
data = 'field={}&id={}&data={}'.format('title', 1, 'Test')
response = self.client.put(reverse_lazy('ModelData', kwargs={'model_name': 'SomeModel'}), data=data)
status = json.loads(response.content)
self.assertEqual(response.status_code, 400)
self.assertTrue('error' in status)
self.assertEqual(status['error'], "App 'testtask' doesn't have a 'somemodel' model.")
| ToxicWar/travail-de-tests | testtask/tests/test_views.py | Python | mit | 2,637 |
import os
import sys
import numpy as np
from scipy.misc import imsave
import scipy.ndimage
import pydicom
training_dicom_dir = "./test/a"
training_labels_dir = "./test/b"
training_png_dir = "./Data/Training/Images/Sunnybrook_Part2"
training_png_labels_dir = "./Data/Training/Labels/Sunnybrook_Part2"
for root, dirs, files in os.walk(training_labels_dir):
for file in files:
if file.endswith("-icontour-manual.txt"):
try:
prefix, _ = os.path.split(root)
prefix, _ = os.path.split(prefix)
_, patient = os.path.split(prefix)
file_fn = file.strip("-icontour-manual.txt") + ".dcm"
print(file_fn)
print(patient)
dcm = pydicom.read_file(os.path.join(training_dicom_dir, patient, file_fn))
print(dcm.pixel_array.shape)
img = np.concatenate((dcm.pixel_array[...,None], dcm.pixel_array[...,None], dcm.pixel_array[...,None]), axis=2)
labels = np.zeros_like(dcm.pixel_array)
print(img.shape)
print(labels.shape)
with open(os.path.join(root, file)) as labels_f:
for line in labels_f:
x, y = line.split(" ")
labels[int(float(y)), int(float(x))] = 128
labels = scipy.ndimage.binary_fill_holes(labels)
img_labels = np.concatenate((labels[..., None], labels[..., None], labels[..., None]), axis=2)
imsave(os.path.join(training_png_dir, patient + "-" + file_fn + ".png"), img)
imsave(os.path.join(training_png_labels_dir, patient + "-" + file_fn + ".png"), img_labels)
except Exception as e:
print(e)
| mshunshin/SegNetCMR | prepare_data.py | Python | mit | 1,788 |
import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URI')
SQLALCHEMY_TRACK_MODIFICATIONS = False
WTF_CSRF_ENABLED = True
class DevelopmentConfig(Config):
DEBUG = True
class ProductionConfig(Config):
pass
class TestingConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
MAIL_SERVER = 'localhost'
MAIL_PORT = 5001
MAIL_DEFAULT_SENDER = 'Data Visualization - noreply <noreply@localhost>'
config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'testing': TestingConfig
}
| danielrenes/data-visualization | config.py | Python | mit | 627 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import time
from extensions import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), nullable=False)
mail = db.Column(db.String(64), nullable=False, unique=True)
token = db.Column(db.String(64), nullable=False)
description = db.Column(db.TEXT)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), nullable=False)
description = db.Column(db.TEXT)
cate_id = db.Column(db.Integer, db.ForeignKey("category.id"))
category = db.relationship("Category", uselist=False)
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), nullable=False, unique=True)
del db
| elvis-macak/flask-restful-scaffold | scaffold/models.py | Python | mit | 1,010 |
from chocobros import app
if __name__ == "__main__":
app.run()
| insacuri/chocobros | wsgi.py | Python | mit | 68 |
from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
# checkin
urlpatterns = [
url(
regex=r'^new/$',
view=views.NewCheckin.as_view(),
name='new'
),
url(
regex=r'^list/$',
view=views.ListCheckin.as_view(),
name='list'
),
url(
regex=r'^detail/(?P<pk>[\d]+)$',
view=views.DetailCheckin.as_view(),
name='detail'
),
url(
regex=r'^setDates$',
view=views.SetDates.as_view(),
name='setDates'
),
url(
regex=r'^uploadCheckinImages',
view=views.UploadCheckinImages.as_view(),
name='uploadCheckinImages'
),
]
| airportmarc/the416life | src/apps/checkin/urls.py | Python | mit | 704 |
import logging
import os
from openpyxl import Workbook
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s \t', level=logging.INFO)
EXCEL_DIR = '/home/lucasx/PycharmProjects/DataHouse/DataSet/'
def write_excel(list_, filename):
mkdirs_if_not_exists(EXCEL_DIR)
wb = Workbook()
ws = wb.active
ws.title = "HouseInfo"
ws.cell(row=1, column=1).value = 'address'
ws.cell(row=1, column=2).value = 'area'
ws.cell(row=1, column=3).value = 'block'
ws.cell(row=1, column=4).value = 'buildYear'
ws.cell(row=1, column=5).value = 'image'
ws.cell(row=1, column=6).value = 'midPrice'
ws.cell(row=1, column=7).value = 'name'
ws.cell(row=1, column=8).value = 'saleNum'
ws.cell(row=1, column=9).value = 'url'
rownum = 2
for each_item in list_:
ws.cell(row=rownum, column=1).value = each_item.address
ws.cell(row=rownum, column=2).value = each_item.area
ws.cell(row=rownum, column=3).value = each_item.block
ws.cell(row=rownum, column=4).value = each_item.buildYear
ws.cell(row=rownum, column=5).value = each_item.image
ws.cell(row=rownum, column=6).value = each_item.midPrice
ws.cell(row=rownum, column=7).value = each_item.name
ws.cell(row=rownum, column=8).value = each_item.saleNum
ws.cell(row=rownum, column=9).value = each_item.url
rownum += 1
wb.save(EXCEL_DIR + filename + '.xlsx')
logging.info('Excel生成成功!')
def mkdirs_if_not_exists(directory_):
"""create a new folder if it does not exist"""
if not os.path.exists(directory_) or not os.path.isdir(directory_):
os.makedirs(directory_)
| EclipseXuLu/DataHouse | DataHouse/crawler/file_helper.py | Python | mit | 1,669 |
from datetime import datetime
from dateutil.parser import parse
import inspect
import itertools
import json
import pytz
import re
from requests.exceptions import HTTPError
import six
import sys
from onecodex.exceptions import (
MethodNotSupported,
OneCodexException,
PermissionDenied,
ServerError,
)
from onecodex.models.helpers import (
check_bind,
generate_potion_sort_clause,
generate_potion_keyword_where,
)
from onecodex.vendored.potion_client.converter import PotionJSONEncoder
from onecodex.vendored.potion_client.resource import Resource
DEFAULT_PAGE_SIZE = 200
class ResourceList(object):
"""Wrapper around lists of onecodex-wrapped potion objects.
Parameters
----------
_resource : `list`
A list of potion objects, which are generally stored in `OneCodexBase._resource`.
oc_model : `OneCodexBase`
A class which inherits from `OneCodexBase`, for example, `models.Tags`.
Notes
-----
In OneCodexBase, when attributes are lists (e.g., `Samples.tags`), actions performed on the
returned lists are not passed through to the underlying potion object's list. This class passes
those actions through, and will generally act like a list.
See https://github.com/onecodex/onecodex/issues/40
"""
def _update(self):
self._res_list = [self._oc_model(x) for x in self._resource]
def _check_valid_resource(self, other, check_for_dupes=True):
try:
other = iter(other)
except TypeError:
other = [other]
other_ids = []
for o in other:
if not isinstance(o, self._oc_model):
raise ValueError(
"Expected object of type '{}', got '{}'".format(
self._oc_model.__name__, type(o).__name__
)
)
other_ids.append(o.id)
if check_for_dupes:
# duplicates are not allowed
self_ids = [s.id for s in self._resource]
if len(set(self_ids + other_ids)) != len(self_ids + other_ids):
raise OneCodexException(
"{} cannot contain duplicate objects".format(self.__class__.__name__)
)
def __init__(self, _resource, oc_model, **kwargs):
if not issubclass(oc_model, OneCodexBase):
raise ValueError(
"Expected object of type '{}', got '{}'".format(
OneCodexBase.__name__, oc_model.__name__
)
)
# turn potion Resource objects into OneCodex objects
self._resource = _resource
self._oc_model = oc_model
self._kwargs = kwargs
self._update()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# two ResourceLists are equal if they refer to the same underlying Resource
return id(self._resource) == id(other._resource)
def __contains__(self, other):
return other.__hash__() in [x.__hash__() for x in self._res_list]
@property
def __repr__(self):
return self._res_list.__repr__
@property
def __len__(self):
return self._res_list.__len__
def __getitem__(self, x):
wrapped = self._res_list[x]
if isinstance(wrapped, list):
return self.__class__(self._resource[x], self._oc_model, **self._kwargs)
else:
return wrapped
def __setitem__(self, k, v):
self._check_valid_resource(v)
self._resource[k] = v._resource
self._update()
def __delitem__(self, x):
del self._resource[x]
self._update()
@property
def __iter__(self):
return self._res_list.__iter__
@property
def __reversed__(self):
return self._res_list.__reversed__
def __add__(self, other):
if not isinstance(other, self.__class__):
raise TypeError(
'can only concatenate {} (not "{}") to {}'.format(
self.__class__.__name__, type(other), self.__class__.__name__
)
)
new_obj = self.copy()
new_obj.extend(other._res_list)
return new_obj
def append(self, x):
self._check_valid_resource(x)
self._resource.append(x._resource)
self._update()
def clear(self):
self._resource.clear()
self._res_list.clear()
def copy(self):
new_obj = self.__class__(self._resource[:], self._oc_model, **self._kwargs)
return new_obj
def count(self, x):
# assume that ResourceList objects are identical if they share the same underlying resource
self._check_valid_resource(x, check_for_dupes=False)
n = 0
for res_obj in self._resource:
if res_obj == x._resource:
n += 1
return n
def extend(self, iterable):
self._check_valid_resource(iterable)
self._resource.extend([x._resource for x in iterable])
self._update()
def index(self, x):
# assume that ResourceList objects are identical if they share the same underlying resource
self._check_valid_resource(x, check_for_dupes=False)
for res_obj_idx, res_obj in enumerate(self._resource):
if res_obj == x._resource:
return res_obj_idx
raise ValueError("{} is not in list".format(x))
def insert(self, idx, x):
self._check_valid_resource(x)
self._resource.insert(idx, x._resource)
self._update()
def pop(self):
self._resource.pop()
return self._res_list.pop()
def remove(self, x):
del self._resource[self.index(x)]
self._update()
class OneCodexBase(object):
"""Parent of all the One Codex objects that wraps the Potion-Client API."""
def __init__(self, _resource=None, **kwargs):
# FIXME: allow setting properties via kwargs?
# FIXME: get a resource from somewhere instead of setting to None (lots of stuff assumes
# non-None) if we have a class.resource?
if _resource is not None:
if not isinstance(_resource, Resource):
raise TypeError("Use the .get() method to fetch an individual resource.")
self._resource = _resource
elif hasattr(self.__class__, "_resource"):
for key, val in kwargs.items():
# This modifies kwargs in place to be the underlying
# Potion-Client resource
if isinstance(val, OneCodexBase):
kwargs[key] = val._resource
self._resource = self.__class__._resource(**kwargs)
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.id)
def _repr_html_(self):
return self._resource._repr_html_()
def __dir__(self):
# this only gets called on instances, so we're okay to add all the properties because
# this won't appear when you call, e.g. dir(ocx.Samples)
fields = [
str(f) if f != "$uri" else "id" for f in self.__class__._resource._schema["properties"]
]
# this might be a little too clever, but we mask out class methods/fxns from the instances
base_object_names = []
for name, obj in inspect.getmembers(self.__class__):
if inspect.isfunction(obj): # .save() and .delete() are functions in Py3
base_object_names.append(name)
if inspect.ismethod(obj) and obj.__self__ is not self.__class__:
base_object_names.append(name)
return fields + base_object_names
def __getattr__(self, key):
if hasattr(self, "_resource") and hasattr(self.__class__, "_resource"):
schema_key = key if key != "id" else "$uri"
schema = self.__class__._resource._schema["properties"].get(schema_key)
if schema is not None:
value = getattr(self._resource, key)
if isinstance(value, Resource):
# convert potion resources into wrapped ones
resource_path = value._uri.rsplit("/", 1)[0]
return _model_lookup[resource_path](_resource=value)
elif isinstance(value, list):
if schema["items"]["type"] == "object":
# convert lists of potion resources into wrapped ones
compiled_re = re.compile(schema["items"]["properties"]["$ref"]["pattern"])
# if the list we're returning is empty, we can't just infer what type of
# object belongs in this list from its contents. to account for this, we'll
# instead try to match the object's URI to those in our lookup table
for route, obj in _model_lookup.items():
if compiled_re.match("{}/dummy_lookup".format(route)):
return ResourceList(value, obj)
raise OneCodexException(
"No object found for {}".format(compiled_re.pattern)
)
else:
# otherwise, just return a regular list
return value
else:
if key == "id":
# undo the bad coercion from potion_client/resource.py#L111
if value is None:
return None
else:
return str(value)
if schema.get("format") == "date-time" and value is not None:
datetime_value = parse(value)
if datetime_value.tzinfo is None:
return pytz.utc.localize(datetime_value)
else:
return datetime_value.astimezone(pytz.utc)
return value
elif key == "id" or key in self.__class__._resource._schema["properties"]:
# make fields appear blank if there's no _resource bound to me
return None
raise AttributeError(
"'{}' object has no attribute '{}'".format(self.__class__.__name__, key)
)
def __setattr__(self, key, value):
if key.startswith("_"): # Allow directly setting _attributes, incl. _resource
# these are any fields that have to be settable normally
super(OneCodexBase, self).__setattr__(key, value)
return
elif key == "id":
raise AttributeError("can't set attribute")
elif isinstance(value, OneCodexBase) or isinstance(value, ResourceList):
self._resource[key] = value._resource
return
elif isinstance(value, (list, tuple)):
# convert any fancy items into their underlying resources
new_value = []
for v in value:
new_value.append(v._resource if isinstance(v, OneCodexBase) else v)
# coerce back to the value passed in
self._resource[key] = type(value)(new_value)
return
elif hasattr(self, "_resource") and hasattr(self.__class__, "_resource"):
schema = self.__class__._resource._schema["properties"].get(key)
if schema is not None:
# do some type checking against the schema
if not self.__class__._has_schema_method("update"):
raise MethodNotSupported(
"{} do not support editing.".format(self.__class__.__name__)
)
if schema.get("readOnly", False):
raise MethodNotSupported("{} is a read-only field".format(key))
if schema.get("format") == "date-time":
if isinstance(value, datetime):
if value.tzinfo is None:
value = value.isoformat() + "Z"
else:
value = value.isoformat()
# changes on this model also change the potion resource
self._resource[key] = value
return
raise AttributeError(
"'{}' object has no attribute '{}'".format(self.__class__.__name__, key)
)
def __delattr__(self, key):
if not self.__class__._has_schema_method("update"):
raise MethodNotSupported("{} do not support editing.".format(self.__class__.__name__))
if hasattr(self, "_resource") and key in self._resource.keys():
# changes on this model also change the potion resource
del self._resource[key]
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# TODO: We should potentially check that both resources are up-to-date
return self._resource._uri == other._resource._uri
def _to_json(self, include_references=True):
"""Convert model to JSON using the PotionJSONEncode.
Automatically resolves the resource as needed (`_properties` call handles this).
"""
if include_references:
return json.dumps(self._resource._properties, cls=PotionJSONEncoder)
else:
return json.dumps(
{
k: v
for k, v in self._resource._properties.items()
if not isinstance(v, Resource) and not k.startswith("$")
},
cls=PotionJSONEncoder,
)
@classmethod
def _convert_id_to_uri(cls, uuid):
base_uri = cls._resource._schema["_base_uri"]
if not uuid.startswith(base_uri):
uuid = "{}/{}".format(base_uri, uuid)
return uuid
@classmethod
def _has_schema_method(cls, method_name):
# potion-client is too stupid to check the schema before allowing certain operations
# so we manually check it before allowing some instance methods
# FIXME: this doesn't actually work though, because potion creates these routes for all
# items :/
method_links = cls._resource._schema["links"]
return any(True for link in method_links if link["rel"] == method_name)
@classmethod
def all(cls, sort=None, limit=None):
"""Return all objects of this type. Alias for where() (without filter arguments).
See `where` for documentation on the `sort` and `limit` parameters.
"""
return cls.where(sort=sort, limit=limit)
@classmethod
def where(cls, *filters, **keyword_filters):
"""Retrieve objects (Samples, Classifications, etc.) from the One Codex server.
Parameters
----------
filters : `object`
Advanced filters to use (not implemented)
sort : `str` or `list`, optional
Sort the results by this field (or list of fields). By default in descending order,
but if any of the fields start with the special character ^, sort in ascending order.
For example, sort=['size', '^filename'] will sort by size from largest to smallest and
filename from A-Z for items with the same size.
limit : `int`, optional
Number of records to return. For smaller searches, this can reduce the number of
network requests made.
keyword_filters : `str` or `object`
Filter the results by specific keywords (or filter objects, in advanced usage)
Examples
--------
You can filter objects that are returned locally using a lambda function:
# returns only samples with a filename ending in '.gz'
my_samples = Samples.where(filter=lambda s: s.filename.endswith('.gz'))
Returns
-------
`list`
A list of all objects matching these filters. If no filters are passed, this
matches all objects.
"""
check_bind(cls)
# do this here to avoid passing this on to potion
filter_func = keyword_filters.pop("filter", None)
public = False
if any(x["rel"] == "instances_public" for x in cls._resource._schema["links"]):
public = keyword_filters.pop("public", False)
instances_route = keyword_filters.pop(
"_instances", "instances" if not public else "instances_public"
)
schema = next(
link for link in cls._resource._schema["links"] if link["rel"] == instances_route
)
sort_schema = schema["schema"]["properties"]["sort"]["properties"]
where_schema = schema["schema"]["properties"]["where"]["properties"]
sort = generate_potion_sort_clause(keyword_filters.pop("sort", None), sort_schema)
limit = keyword_filters.pop("limit", None if not public else 1000)
where = {}
# we're filtering by fancy objects (like SQLAlchemy's filter)
if len(filters) > 0:
if len(filters) == 1 and isinstance(filters[0], dict):
where = filters[0]
elif all(isinstance(f, six.string_types) for f in filters):
# if it's a list of strings, treat it as an multiple "get" request
where = {"$uri": {"$in": [cls._convert_id_to_uri(f) for f in filters]}}
else:
# we're doing some more advanced filtering
raise NotImplementedError("Advanced filtering hasn't been implemented yet")
# we're filtering by keyword arguments (like SQLAlchemy's filter_by)
if len(keyword_filters) > 0:
for k, v in generate_potion_keyword_where(keyword_filters, where_schema, cls).items():
if k in where:
raise AttributeError("Multiple definitions for same field {}".format(k))
where[k] = v
# the potion-client method returns an iterator (which lazily fetchs the records
# using `per_page` instances per request) so for limiting we only want to fetch the first
# n (and not instantiate all the available which is what would happen if we just sliced)
cursor = getattr(cls._resource, instances_route)(
where=where, sort=sort, per_page=DEFAULT_PAGE_SIZE
)
if limit is not None:
cursor = itertools.islice(cursor, limit)
# finally, apply local filtering function on objects before returning
wrapped = [cls(_resource=r) for r in cursor]
if filter_func:
if callable(filter_func):
wrapped = [obj for obj in wrapped if filter_func(obj) is True]
else:
raise OneCodexException(
"Expected callable for filter, got: {}".format(type(filter_func).__name__)
)
return wrapped
@classmethod
def get(cls, uuid):
"""Retrieve one specific object from the server by its UUID (unique 16-character id).
UUIDs are found in the web browser's address bar while viewing analyses and other objects.
Parameters
----------
uuid : `str`
UUID of the object to retrieve.
Returns
-------
`OneCodexBase` or `None`
The object with that UUID or None if no object could be found.
Examples
--------
>>> api.Samples.get('xxxxxxxxxxxxxxxx')
<Sample xxxxxxxxxxxxxxxx>
"""
check_bind(cls)
# we're just retrieving one object from its uuid
try:
resource = cls._resource.fetch(uuid)
if isinstance(resource, list):
# TODO: Investigate why potion .fetch()
# method is occassionally returning a list here...
if len(resource) == 1:
resource = resource[0]
else:
raise TypeError("Potion-Client error in fetching resource")
except HTTPError as e:
# 404 error means this doesn't exist
if e.response.status_code == 404:
return None
else:
raise e
return cls(_resource=resource)
def delete(self):
"""Delete this object from the One Codex server."""
check_bind(self)
if self.id is None:
raise ServerError("{} object does not exist yet".format(self.__class__.name))
elif not self.__class__._has_schema_method("destroy"):
raise MethodNotSupported("{} do not support deletion.".format(self.__class__.__name__))
try:
self._resource.delete()
except HTTPError as e:
if e.response.status_code == 403:
raise PermissionDenied("") # FIXME: is this right?
else:
raise e
def save(self):
"""Either create or persist changes on this object back to the One Codex server."""
check_bind(self)
creating = self.id is None
if creating and not self.__class__._has_schema_method("create"):
raise MethodNotSupported("{} do not support creating.".format(self.__class__.__name__))
if not creating and not self.__class__._has_schema_method("update"):
raise MethodNotSupported("{} do not support updating.".format(self.__class__.__name__))
try:
self._resource.save()
except HTTPError as e:
if e.response.status_code == 400:
err_json = e.response.json().get("errors", [])
msg = pretty_print_error(err_json)
raise ServerError(msg)
elif e.response.status_code == 404:
action = "creating" if creating else "updating"
raise MethodNotSupported(
"{} do not support {}.".format(self.__class__.__name__, action)
)
elif e.response.status_code == 409:
raise ServerError("This {} object already exists".format(self.__class__.__name__))
else:
raise e
from onecodex.models.analysis import ( # noqa
Analyses,
Classifications,
Alignments,
Panels,
)
from onecodex.models.collection import SampleCollection # noqa
from onecodex.models.misc import Jobs, Projects, Tags, Users, Documents # noqa
from onecodex.models.sample import Samples, Metadata # noqa
__all__ = [
"Alignments",
"Classifications",
"Documents",
"Jobs",
"Metadata",
"Panels",
"Projects",
"Samples",
"SampleCollection",
"Tags",
"Users",
]
# import and expose experimental models
from onecodex.models.experimental import ( # noqa
AnnotationSets,
Assemblies,
FunctionalProfiles,
Genomes,
Taxa,
)
__all__.extend(["AnnotationSets", "Assemblies", "FunctionalProfiles", "Genomes", "Taxa"])
def pretty_print_error(err_json):
"""Pretty print Flask-Potion error messages for the user."""
# Special case validation errors
if len(err_json) == 1 and "validationOf" in err_json[0]:
required_fields = ", ".join(err_json[0]["validationOf"]["required"])
return "Validation error. Requires properties: {}.".format(required_fields)
# General error handling
msg = "; ".join(err.get("message", "") for err in err_json)
# Fallback
if not msg:
msg = "Bad request."
return msg
# go through all the models and generate a lookup table (to use in binding in the API and elsewhere)
def is_oc_class(cls):
return inspect.isclass(cls) and issubclass(cls, OneCodexBase)
_model_lookup = {}
for name, obj in inspect.getmembers(sys.modules[__name__], is_oc_class):
if hasattr(obj, "_resource_path"):
_model_lookup[obj._resource_path] = obj
| onecodex/onecodex | onecodex/models/__init__.py | Python | mit | 23,807 |
from __future__ import division
from __future__ import print_function
import numpy as np
from acq4.util import Qt
Ui_Form = Qt.importTemplate(".contrast_ctrl_template")
class ContrastCtrl(Qt.QWidget):
"""Widget for controlling contrast with rapidly updating image content.
Provides:
* image histogram
* contrast control
* color lookup table
* automatic gain control
* center weighted gain control
* zoom-to-image button
"""
def __init__(self, parent=None):
Qt.QWidget.__init__(self, parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.imageItem = None
self.lastMinMax = None # Records most recently measured maximum/minimum image values
self.autoGainLevels = [0.0, 1.0]
self.ignoreLevelChange = False
self.alpha = 1.0
self.lastAGCMax = None
# Connect DisplayGain dock
self.ui.histogram.sigLookupTableChanged.connect(self.levelsChanged)
self.ui.histogram.sigLevelsChanged.connect(self.levelsChanged)
self.ui.btnAutoGain.toggled.connect(self.toggleAutoGain)
self.ui.btnAutoGain.setChecked(True)
self.ui.zoomLiveBtn.clicked.connect(self.zoomToImage)
self.ui.alphaSlider.valueChanged.connect(self.alphaChanged)
def setImageItem(self, item):
"""Sets the ImageItem that will be affected by the contrast / color controls
"""
self.imageItem = item
self.ui.histogram.setImageItem(item)
self.ui.histogram.fillHistogram(False) # for speed
def zoomToImage(self):
"""Zoom the image's view such that the image fills most of the view.
"""
self.imageItem.getViewBox().autoRange(items=[self.imageItem])
def levelsChanged(self):
if self.ui.btnAutoGain.isChecked() and not self.ignoreLevelChange:
if self.lastMinMax is None:
return
bl, wl = self.getLevels()
mn, mx = self.lastMinMax
rng = float(mx - mn)
if rng == 0:
return
newLevels = [(bl - mn) / rng, (wl - mn) / rng]
self.autoGainLevels = newLevels
def alphaChanged(self, val):
self.alpha = val / self.ui.alphaSlider.maximum() # slider only works in integers and we need a 0 to 1 value
self.imageItem.setOpacity(self.alpha)
def getLevels(self):
return self.ui.histogram.getLevels()
def toggleAutoGain(self, b):
if b:
self.lastAGCMax = None
self.ui.histogram.vb.setMouseEnabled(x=False, y=False)
else:
self.ui.histogram.vb.setMouseEnabled(x=False, y=True)
def resetAutoGain(self):
"""Causes the AGC to immediately scale to the next frame that arrives. This is called
when a sudden change in the image values is expected.
"""
self.lastMinMax = None
def processImage(self, data):
# Update auto gain for new image
# Note that histogram is linked to image item; this is what determines
# the final appearance of the image.
if self.ui.btnAutoGain.isChecked():
cw = self.ui.spinAutoGainCenterWeight.value()
(w, h) = data.shape
center = data[w // 2 - w // 6 : w // 2 + w // 6, h // 2 - h // 6 : h // 2 + h // 6]
reduced = data
while reduced.size > 2 ** 16:
ax = np.argmax(reduced.shape)
sl = [slice(None, None)] * data.ndim
sl[ax] = slice(None, None, 2)
reduced = reduced[tuple(sl)]
minVal = reduced.min() * (1.0 - cw) + center.min() * cw
maxVal = reduced.max() * (1.0 - cw) + center.max() * cw
# If there is inf/nan in the image, strip it out before computing min/max
if any([np.isnan(minVal), np.isinf(minVal), np.isnan(minVal), np.isinf(minVal)]):
nanMask = np.isnan(reduced)
infMask = np.isinf(reduced)
valid = reduced[~nanMask * ~infMask]
minVal = valid.min() * (1.0 - cw) + center.min() * cw
maxVal = valid.max() * (1.0 - cw) + center.max() * cw
# Smooth min/max range to avoid noise
if self.lastMinMax is None:
minVal = minVal
maxVal = maxVal
else:
s = 1.0 - 1.0 / (self.ui.spinAutoGainSpeed.value() + 1.0)
minVal = self.lastMinMax[0] * s + minVal * (1.0 - s)
maxVal = self.lastMinMax[1] * s + maxVal * (1.0 - s)
self.lastMinMax = [minVal, maxVal]
# and convert fraction of previous range into new levels
bl = self.autoGainLevels[0] * (maxVal - minVal) + minVal
wl = self.autoGainLevels[1] * (maxVal - minVal) + minVal
self.ignoreLevelChange = True
try:
self.ui.histogram.setLevels(bl, wl)
self.ui.histogram.setHistogramRange(minVal, maxVal, padding=0.05)
finally:
self.ignoreLevelChange = False
self.imageItem.setOpacity(self.alpha)
| acq4/acq4 | acq4/util/imaging/contrast_ctrl.py | Python | mit | 5,136 |
import pyclbr
import sys
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine('sqlite:///database.db', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import models
from sqlalchemy import Table
for class_name in pyclbr.readmodule(models.__name__).keys():
try:
table_class = getattr(sys.modules[models.__name__], class_name)
if not table_class.__table__.exists(bind=engine):
table_class.__table__.create(bind=engine)
db_session.commit()
except AttributeError:
pass
for table_object in [class_object for class_object in models.__dict__.values() if type(class_object) == Table]:
try:
if not table_object.exists(bind=engine):
table_object.create(bind=engine)
db_session.commit()
except AttributeError:
pass
| NemoNessuno/SecretSanta | db_handler.py | Python | mit | 1,228 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Paper
admin.site.register(Paper)
| genonfire/bbgo | papers/admin.py | Python | mit | 153 |
from collections import OrderedDict
from itertools import chain
import pytest
from lxml import html
from app import content_loader
from app.main.helpers.diff_tools import html_diff_tables_from_sections_iter
from .helpers import BaseApplicationTest
class TestHtmlDiffTablesFromSections(BaseApplicationTest):
_expected_removal_content_column = 1
_expected_addition_content_column = 3
@pytest.mark.parametrize("framework_slug,lot_slug,service_data_a,service_data_b,expected_rem_qs,expected_add_qs", (
(
# framework_slug
"g-cloud-9",
# lot_slug
"cloud-software",
# service_data_a
{
"serviceName": "Metempsychosis",
"serviceDescription": "Only born male transubstantial heir of Rudolf Virag.",
},
# service_data_b
{
"serviceName": "Metempsychosis",
"serviceDescription": "Only born male transubstantial heir of Rudolf Bloom.",
},
# expected_rem_qs (question ids expected to create diffs with "added" fragments)
("serviceDescription",),
# expected_rem_qs (question ids expected to create diffs with "removed" fragments)
("serviceDescription",),
),
(
"g-cloud-9",
"cloud-support",
{
"serviceName": "Ellen Higgins, second daughter of Julius Karoly",
"serviceDescription": "Fanny Hegarty\nRichard Goulding\n\nChristina Grier\n",
},
{
"serviceName": "Ellen Higgins, second daughter of Julius Higgins",
"serviceDescription": "Fanny Higgins\n\nSimon Dedalus of Cork\nRichard Goulding\n\nChristina Grier\n",
},
("serviceName", "serviceDescription",),
("serviceName", "serviceDescription",),
),
(
"g-cloud-8",
"iaas",
{
"serviceName": u"GENTLEMEN\u2751OF\u2751THE\u2751PRESS",
"serviceSummary": "Grossbooted draymen rolled barrels dullthudding out of Prince's\n" +
"stores and bumped them up on the brewery float. On the brewery float bumped\n" +
"dullthudding barrels rolled by grossbooted draymen out of Prince's stores.",
},
{
"serviceName": u"GENTLEMEN \u2751 OF \u2751 THE \u2751 PRESS",
"serviceSummary": "Grossbooted draymen rolled barrels dullthudding out of Prince's\n" +
"stores and bumped them up on the brewery float. On the brewery float bumped\n" +
"dullthudding barrels rolled by grossbooted draymen out of Prince's stores.",
},
(),
("serviceName",),
),
(
"g-cloud-9",
"cloud-hosting",
{
"serviceName": "Infinitely preceding deletion",
"serviceDescription": "To reflect that each <del> who enters imagines himself to be the first to\n" +
"</del> whereas he <ins> always the last term of a preceding series",
"serviceFeaturesHostingAndSoftware": [
"Burke",
"Joseph Cuffe",
"Wisdom Hely",
"Alderman John Hooper",
"Dr Francis Brady",
],
"somethingIrrelevant": "decomposed vegetable missiles",
},
{
"serviceName": "Infinitely preceding deletion",
"serviceDescription": "He is neither first <ins> nor </ins> <del> nor only nor alone in a series\n" +
"originating in & repeated to </del>.",
"serviceFeaturesHostingAndSoftware": [
"Burke",
"Joseph Cuffe",
"Wisdom Hely",
"Alderman John Hooper",
"Dr. Francis Brady",
"Father Sebastian of Mount Argus",
"A bootblack at the General Post Office",
],
},
("serviceDescription",),
("serviceDescription", "serviceFeaturesHostingAndSoftware",),
),
(
"g-cloud-9",
"cloud-support",
{
"serviceName": "An unsatisfactory equation",
"serviceDescription": "An exodus and return\n in time\n through reversible space",
"serviceBenefitsSupport": [
"The lateness of the hour, rendering procrastinatory ",
"The obscurity of the night, rendering invisible",
"The uncertainty of thoroughfares, rendering perilous",
"The necessity for repose, obviating movement",
"The proximity of an occupied bed, obviating research",
"The anticipation of warmth (human) tempered with coolness (linen)",
"The statue of Narcissus, sound without echo, desired desire",
],
},
{
"serviceName": "An unsatisfactory equation",
"serviceDescription": "\nAn exodus and return\n in space\n through irreversible time",
"serviceBenefitsSupport": [
"The anticipation of warmth (human) tempered with coolness (linen)",
"The lateness of the hour, rendering procrastinatory ",
"The necessity for repose, obviating movement",
"The obscurity of the night, rendering invisible",
"The proximity of an occupied bed, obviating research",
"The statue of Narcissus, sound without echo, desired desire",
"The uncertainty of thoroughfares, rendering perilous",
],
"serviceFeaturesSupport": [
"The removal of nocturnal solitude",
],
},
("serviceDescription", "serviceBenefitsSupport",),
("serviceDescription", "serviceBenefitsSupport", "serviceFeaturesSupport",),
),
))
@pytest.mark.parametrize("table_preamble_template", (None, "diff_table/_table_preamble.html",))
def test_common_properties(
self,
framework_slug,
lot_slug,
service_data_a,
service_data_b,
expected_rem_qs,
expected_add_qs,
table_preamble_template,
):
# because there is no single canonical "correct" representation of a diff between two documents, we can't just
# test the output verbatim as it would be a fragile test. instead we can test for a bunch of properties that
# must always be true of an output we would consider valid
service_data_a, service_data_b = (dict(s, lot=lot_slug) for s in (service_data_a, service_data_b,))
content_sections = content_loader.get_manifest(
framework_slug,
'edit_service_as_admin',
).filter(service_data_b).sections
with self.app.app_context():
diffs = OrderedDict((q_id, html_diff) for sec_slug, q_id, html_diff in html_diff_tables_from_sections_iter(
content_sections,
service_data_a,
service_data_b,
table_preamble_template=table_preamble_template,
))
for question_id, html_diff in diffs.items():
table_element = html.fragment_fromstring(html_diff)
# these should all have been removed
assert not table_element.xpath(".//a")
assert not table_element.xpath(".//colgroup")
assert not table_element.xpath(".//*[@id]")
# there should be a non-empty caption tag if and only if table_preamble_template is supplied
if table_preamble_template is None:
assert not table_element.xpath(".//caption")
else:
assert table_element.xpath("./caption[normalize-space(string())]")
# all td.line-content.removal elements should appear in the same (expected) column
for tr in table_element.xpath(
"./tbody/tr[./td[contains(@class, 'line-content')][contains(@class, 'removal')]]"
):
assert len(tr.xpath("./td[contains(@class, 'line-content')][contains(@class, 'removal')]")) == 1
assert len(tr.xpath(
"./td[contains(@class, 'line-content')][contains(@class, 'removal')]/preceding-sibling::*"
)) == self._expected_removal_content_column
# all td.line-content.addition elements should appear in the same (expected) column
for tr in table_element.xpath(
"./tbody/tr[./td[contains(@class, 'line-content')][contains(@class, 'addition')]]"
):
assert len(tr.xpath("./td[contains(@class, 'line-content')][contains(@class, 'addition')]")) == 1
assert len(tr.xpath(
"./td[contains(@class, 'line-content')][contains(@class, 'addition')]/preceding-sibling::*"
)) == self._expected_addition_content_column
# the only del elements should appear in td.line-content.removal elements
assert len(table_element.xpath(".//del")) == len(
table_element.xpath(
"./tbody/tr/td[contains(@class, 'line-content')][contains(@class, 'removal')]/del"
)
)
# and there shouldn't be any td.line-content.removal elements that don't have at least one del element
assert not table_element.xpath(
".//td[contains(@class, 'line-content')][contains(@class, 'removal')][not(.//del)]"
)
# the only ins elements should appear in td.line-content.addition elements
assert len(table_element.xpath(".//ins")) == len(
table_element.xpath(
"./tbody/tr/td[contains(@class, 'line-content')][contains(@class, 'addition')]/ins"
)
)
# and there shouldn't be any td.line-content.addition elements that don't have at least one ins element
assert not table_element.xpath(
".//td[contains(@class, 'line-content')][contains(@class, 'addition')][not(.//ins)]"
)
# content should have been purged of all nbsps
assert not table_element.xpath(
".//td[contains(@class, 'line-content')][contains(string(), $nbsp)]",
nbsp=u"\u00a0",
)
# yes, this is awfully familiar code from the innards of html_diff_tables_from_sections_iter so there's a
# degree to which we're marking our own homework with this, but it's a little difficult to see an
# alternative
expected_content_a, expected_content_b = (
[
(line or " ") # diff outputs an extraneous space in some blank line cases, which is ok by us
for line in (q.splitlines() if isinstance(q, str) else q)
]
for q in (r.get(question_id, []) for r in (service_data_a, service_data_b,))
)
# assert some things about the content in each line-content column
for expected_content, expected_content_column in (
(expected_content_a, self._expected_removal_content_column,),
(expected_content_b, self._expected_addition_content_column,),
):
# the collapsed string content of the collection of tds from the expected column which have a non-empty
# line-number td directly preceding them should equal the expected content. note here we're not giving
# any leeway for extra whitespace because the intention is to be able to display this with whitespace-
# preserving css. but that could always be relaxed if totally necessary. also note if there were nbsps
# in our data this would not work because they are purged unconditionally.
assert [
(elem.xpath("string()") or " ") # normalizing blank lines to single spaces, reason mentioned above
for elem in table_element.xpath(
"./tbody/tr/td[$i][contains(@class, 'line-content')]"
"[normalize-space(string(./preceding-sibling::td[1][contains(@class, 'line-number')]))]",
# xpath's element indexing is 1-based
i=expected_content_column + 1,
)
] == expected_content
# assert some things about each row
for tr in table_element.xpath("./tbody/tr"):
# note here how xpath's element indexing is 1-based
content_remside = tr.xpath("string(./td[$i])", i=self._expected_removal_content_column + 1)
content_addside = tr.xpath("string(./td[$i])", i=self._expected_addition_content_column + 1)
# in lines where we have additions/removals,,,
if tr.xpath(
"./td[contains(@class, 'line-content')]" +
"[contains(@class, 'addition') or contains(@class, 'removal')]"
):
# row should have content on at least one side
assert content_addside or content_remside
# if no content on one side, all content on other side should be in a del/ins
if not content_remside:
assert content_addside == tr.xpath(
"string(./td[contains(@class, 'line-content')][contains(@class, 'addition')]/ins)"
)
if not content_addside:
assert content_remside == tr.xpath(
"string(./td[contains(@class, 'line-content')][contains(@class, 'removal')]/del)"
)
# line number should be on a side if and only if there is content on that side
assert bool(tr.xpath(
"string(./td[contains(@class, 'line-content')][contains(@class, 'removal')])"
)) == bool(tr.xpath(
"normalize-space(string(./td[contains(@class, 'line-number')]" +
"[contains(@class, 'line-number-removal')]))"
))
assert bool(tr.xpath(
"string(./td[contains(@class, 'line-content')][contains(@class, 'addition')])"
)) == bool(tr.xpath(
"normalize-space(string(./td[contains(@class, 'line-number')]" +
"[contains(@class, 'line-number-add')]))"
))
# line-content tds which are empty should have line-non-existent class
assert all(
bool("line-non-existent" in td.attrib.get("class", "")) == (not td.xpath("string()"))
for td in tr.xpath("./td[contains(@class, 'line-content')]")
)
else: # but if there aren't any additions/removals...
# the content should be equal on both sides
assert content_remside == content_addside
# there shouldn't be any line-non-existent tds
assert not tr.xpath("./td[contains(@class, 'line-non-existent')]")
for question in chain.from_iterable(section.questions for section in content_sections):
# check a question we expect to have removals does and ones we expect not to ...doesn't.
assert bool((question_id in diffs) and html.fragment_fromstring(diffs[question_id]).xpath(
"./tbody/tr/td[contains(@class, 'line-content')][contains(@class, 'removal')]"
)) == (question_id in expected_rem_qs)
# check a question we expect to have additions does and ones we expect not to ...doesn't.
assert bool((question_id in diffs) and html.fragment_fromstring(diffs[question_id]).xpath(
"./tbody/tr/td[contains(@class, 'line-content')][contains(@class, 'addition')]"
)) == (question_id in expected_add_qs)
# check a question we expect to have neither additions or removals to not be present in diffs at all
assert (question_id in diffs) == (question_id in expected_rem_qs or question_id in expected_add_qs)
def test_identical_data(self):
# these two should be identical in as far as the data we're concerned about
service_data_a = {
"lot": "cloud-support",
"serviceName": "On the range",
"serviceFeaturesSupport": [
"A blue enamelled saucepan",
"A black iron kettle",
],
"irrelevantThing": "Five coiled spring housebells",
}
service_data_b = {
"lot": "cloud-support",
"serviceName": "On the range",
"serviceFeaturesSupport": [
"A blue enamelled saucepan",
"A black iron kettle",
],
"irrelevantThing": "Six coiled spring housebells",
"anotherIrrelevancy": "A curvilinear rope",
}
content_sections = content_loader.get_manifest(
"g-cloud-9",
'edit_service_as_admin',
).filter(service_data_b).sections
assert not tuple(html_diff_tables_from_sections_iter(content_sections, service_data_a, service_data_b))
| alphagov/digitalmarketplace-admin-frontend | tests/app/test_diff_tool.py | Python | mit | 17,911 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choose',
fields=[
('chooseID', models.AutoField(serialize=False, primary_key=True)),
('chooseName', models.CharField(max_length=10)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Day',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Mon', models.BooleanField(default=False)),
('Tue', models.BooleanField(default=False)),
('Wed', models.BooleanField(default=False)),
('Thu', models.BooleanField(default=False)),
('Fri', models.BooleanField(default=False)),
('Sat', models.BooleanField(default=False)),
('Sun', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Group',
fields=[
('groupID', models.AutoField(serialize=False, primary_key=True)),
('groupName', models.CharField(max_length=30)),
('groupPushTime', models.CharField(max_length=8)),
('defaultValue', models.CharField(max_length=40)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Join',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('isJoin', models.BooleanField(default=True)),
('groupID', models.ForeignKey(to='mobile.Group')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='User',
fields=[
('userID', models.CharField(max_length=10, serialize=False, primary_key=True)),
('userName', models.CharField(max_length=8)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('chooseID', models.ForeignKey(to='mobile.Choose')),
('userID', models.ForeignKey(to='mobile.User')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='join',
name='userID',
field=models.ForeignKey(to='mobile.User'),
preserve_default=True,
),
migrations.AddField(
model_name='group',
name='owner_id',
field=models.ForeignKey(to='mobile.User'),
preserve_default=True,
),
migrations.AddField(
model_name='day',
name='groupID',
field=models.ForeignKey(to='mobile.Group'),
preserve_default=True,
),
migrations.AddField(
model_name='choose',
name='group',
field=models.ForeignKey(to='mobile.Group'),
preserve_default=True,
),
]
| g82411/Hackgen2015 | server/mobile/migrations/0001_initial.py | Python | mit | 3,639 |
import shutil
from buildall import Task, Path
class Copy(Task):
def __init__(self, destination):
self._destination = Path(destination)
def target(self):
return self._destination
def build(self, source):
assert source.exists()
self.debug('Copying %s to %s' % (source,
self._destination))
shutil.copyfile(str(source), str(self._destination))
| rayene/buildall | buildall/contrib/copy.py | Python | mit | 438 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0046_auto_20160108_0511'),
]
operations = [
migrations.AddField(
model_name='studygroup',
name='deleted_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='studygroup',
name='updated_at',
field=models.DateTimeField(default=datetime.datetime(2016, 1, 11, 13, 35, 21, 421885, tzinfo=utc), auto_now=True),
preserve_default=False,
),
migrations.AlterField(
model_name='location',
name='contact',
field=models.CharField(help_text=b'Email or phone for the contact person.', max_length=256),
),
]
| p2pu/learning-circles | studygroups/migrations/0047_auto_20160111_0735.py | Python | mit | 908 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2d.hoverlabel"
_path_str = "histogram2d.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2d.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| plotly/plotly.py | packages/python/plotly/plotly/graph_objs/histogram2d/hoverlabel/_font.py | Python | mit | 11,235 |
from astrosage import Horoscope
from pprint import pprint
h = Horoscope('aquarius')
pprint(h.daily())
# pprint(h.weekly())
# pprint(h.monthly())
# pprint(h.yearly())
# pprint(h.weekly_love()) | sandipbgt/astrosage-api | demo.py | Python | mit | 192 |
# coding: utf-8
RESOURCE_MAPPING = {
'sales_create': {
'resource': 'v2/sales/',
'docs': 'http://apidocs.braspag.com.br/'
#post
},
'sales_capture': {
'resource': 'v2/sales/{id}/capture',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'sales_cancel': {
'resource': 'v2/sales/{id}/void',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'sales_consult': {
'resource': 'v2/sales/{id}',
'docs': 'http://apidocs.braspag.com.br/'
#get
},
'recurrency_consult': {
'resource': 'v2/RecurrentPayment/{id}',
'docs': 'http://apidocs.braspag.com.br/'
#get
},
'recurrency_change_customer_data': {
'resource': 'v2/RecurrentPayment/{id}/Customer',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'recurrency_change_end_date': {
'resource': 'v2/RecurrentPayment/{id}/EndDate',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'recurrency_change_installments': {
'resource': 'v2/RecurrentPayment/{id}/Installments',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'recurrency_change_interval': {
'resource': 'v2/RecurrentPayment/{id}/Interval',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'recurrency_change_day': {
'resource': 'v2/RecurrentPayment/{id}/RecurrencyDay',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'recurrency_change_next_date': {
'resource': 'v2/RecurrentPayment/{id}/NextPaymentDate',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'recurrency_change_amount': {
'resource': 'v2/RecurrentPayment/{id}/Amount',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'recurrency_change_payment_data': {
'resource': 'v2/RecurrentPayment/{id}/Payment',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'recurrency_deactivate': {
'resource': 'v2/RecurrentPayment/{id}/Deactivate',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'recurrency_reactivate': {
'resource': 'v2/RecurrentPayment/{id}/Reactivate',
'docs': 'http://apidocs.braspag.com.br/'
#put
},
'merchant_consult_sales': {
'resource': 'v2/sales?merchantOrderId={id}',
'docs': 'http://apidocs.braspag.com.br/'
#get
},
}
| parafernalia/tapioca_braspag | tapioca_braspag/resource_mapping.py | Python | mit | 2,496 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'bootcamp.core.views.home', name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^login', 'django.contrib.auth.views.login', {'template_name': 'core/cover.html'}, name='login'),
url(r'^logout', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
url(r'^signup/$', 'bootcamp.auth.views.signup', name='signup'),
url(r'^settings/$', 'bootcamp.core.views.settings', name='settings'),
url(r'^settings/picture/$', 'bootcamp.core.views.picture', name='picture'),
url(r'^settings/upload_picture/$', 'bootcamp.core.views.upload_picture', name='upload_picture'),
url(r'^settings/save_uploaded_picture/$', 'bootcamp.core.views.save_uploaded_picture', name='save_uploaded_picture'),
url(r'^settings/password/$', 'bootcamp.core.views.password', name='password'),
url(r'^network/$', 'bootcamp.core.views.network', name='network'),
url(r'^feeds/', include('bootcamp.feeds.urls')),
url(r'^questions/', include('bootcamp.questions.urls')),
url(r'^articles/', include('bootcamp.articles.urls')),
url(r'^messages/', include('bootcamp.messages.urls')),
url(r'^notifications/$', 'bootcamp.activities.views.notifications', name='notifications'),
url(r'^notifications/last/$', 'bootcamp.activities.views.last_notifications', name='last_notifications'),
url(r'^notifications/check/$', 'bootcamp.activities.views.check_notifications', name='check_notifications'),
url(r'^search/$', 'bootcamp.search.views.search', name='search'),
url(r'^(?P<username>[^/]+)/$', 'bootcamp.core.views.profile', name='profile'),
url(r'^i18n/', include('django.conf.urls.i18n', namespace='i18n')),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| maxpinto/Ptz | bootcamp/urls.py | Python | mit | 1,953 |
from typing import List, Dict, Type
from collections import deque
import pyqtgraph as pg
from vnpy.trader.ui import QtGui, QtWidgets, QtCore
from vnpy.trader.object import BarData
from .manager import BarManager
from .base import (
GREY_COLOR, WHITE_COLOR, CURSOR_COLOR, BLACK_COLOR,
to_int, NORMAL_FONT
)
from .axis import DatetimeAxis
from .item import ChartItem, CandleItem, VolumeItem
pg.setConfigOptions(antialias=True)
class ChartWidget(pg.PlotWidget):
""""""
MIN_BAR_COUNT = 100
def __init__(self, parent: QtWidgets.QWidget = None, title: str = "ChartWidget of vn.py"):
""""""
super().__init__(parent)
self.title = title
self._manager: BarManager = BarManager()
self._plots: Dict[str, pg.PlotItem] = {}
self._items: Dict[str, ChartItem] = {}
self._item_plot_map: Dict[ChartItem, pg.PlotItem] = {}
self._first_plot: pg.PlotItem = None
self._cursor: ChartCursor = None
self._right_ix: int = 0 # Index of most right data
self._bar_count: int = self.MIN_BAR_COUNT # Total bar visible in chart
self._init_ui()
def _init_ui(self) -> None:
""""""
self.setWindowTitle(self.title)
self._layout = pg.GraphicsLayout()
self._layout.setContentsMargins(10, 10, 10, 10)
self._layout.setSpacing(0)
self._layout.setBorder(color=GREY_COLOR, width=0.8)
self._layout.setZValue(0)
self.setCentralItem(self._layout)
self._x_axis = DatetimeAxis(self._manager, orientation='bottom')
def add_cursor(self) -> None:
""""""
if not self._cursor:
self._cursor = ChartCursor(
self, self._manager, self._plots, self._item_plot_map)
def add_plot(
self,
plot_name: str,
minimum_height: int = 80,
maximum_height: int = None,
hide_x_axis: bool = False
) -> None:
"""
Add plot area.
"""
# Create plot object
plot = pg.PlotItem(axisItems={'bottom': self._x_axis})
plot.setMenuEnabled(False)
plot.setClipToView(True)
plot.hideAxis('left')
plot.showAxis('right')
plot.setDownsampling(mode='peak')
plot.setRange(xRange=(0, 1), yRange=(0, 1))
plot.hideButtons()
plot.setMinimumHeight(minimum_height)
if maximum_height:
plot.setMaximumHeight(maximum_height)
if hide_x_axis:
plot.hideAxis("bottom")
if not self._first_plot:
self._first_plot = plot
# Connect view change signal to update y range function
view = plot.getViewBox()
view.sigXRangeChanged.connect(self._update_y_range)
view.setMouseEnabled(x=True, y=False)
# Set right axis
right_axis = plot.getAxis('right')
right_axis.setWidth(60)
right_axis.tickFont = NORMAL_FONT
# Connect x-axis link
if self._plots:
first_plot = list(self._plots.values())[0]
plot.setXLink(first_plot)
# Store plot object in dict
self._plots[plot_name] = plot
# Add plot onto the layout
self._layout.nextRow()
self._layout.addItem(plot)
def add_item(
self,
item_class: Type[ChartItem],
item_name: str,
plot_name: str
):
"""
Add chart item.
"""
# 创建显示的对象,蜡烛图,bar图,散点,线等
item = item_class(self._manager)
self._items[item_name] = item
# 获取设置的显示区域,例如主图/volume/附图等
plot = self._plots.get(plot_name)
plot.addItem(item)
# 绑定显示对象与显示区域关系
self._item_plot_map[item] = plot
def get_plot(self, plot_name: str) -> pg.PlotItem:
"""
Get specific plot with its name.
"""
return self._plots.get(plot_name, None)
def get_all_plots(self) -> List[pg.PlotItem]:
"""
Get all plot objects.
"""
return self._plots.values()
def clear_all(self) -> None:
"""
Clear all data.
"""
self._manager.clear_all()
for item in self._items.values():
item.clear_all()
if self._cursor:
self._cursor.clear_all()
def update_history(self, history: List[BarData]) -> None:
"""
Update a list of bar data.
"""
self._manager.update_history(history)
for item in self._items.values():
item.update_history(history)
self._update_plot_limits()
self.move_to_right()
def update_bar(self, bar: BarData) -> None:
"""
Update single bar data.
"""
self._manager.update_bar(bar)
for item in self._items.values():
item.update_bar(bar)
# 刷新显示区域的最高/最低值
self._update_plot_limits()
if self._right_ix >= (self._manager.get_count() - self._bar_count / 2):
self.move_to_right()
def _update_plot_limits(self) -> None:
"""
Update the limit of plots.
"""
for item, plot in self._item_plot_map.items():
min_value, max_value = item.get_y_range()
plot.setLimits(
xMin=-1,
xMax=self._manager.get_count(),
yMin=min_value,
yMax=max_value
)
def _update_x_range(self) -> None:
"""
Update the x-axis range of plots.
"""
max_ix = self._right_ix
min_ix = self._right_ix - self._bar_count
for plot in self._plots.values():
plot.setRange(xRange=(min_ix, max_ix), padding=0)
def _update_y_range(self) -> None:
"""
Update the y-axis range of plots.
"""
view = self._first_plot.getViewBox()
view_range = view.viewRange()
min_ix = max(0, int(view_range[0][0]))
max_ix = min(self._manager.get_count(), int(view_range[0][1]))
# Update limit for y-axis
for item, plot in self._item_plot_map.items():
y_range = item.get_y_range(min_ix, max_ix)
plot.setRange(yRange=y_range)
def paintEvent(self, event: QtGui.QPaintEvent) -> None:
"""
Reimplement this method of parent to update current max_ix value.
"""
view = self._first_plot.getViewBox()
view_range = view.viewRange()
self._right_ix = max(0, view_range[0][1])
super().paintEvent(event)
def keyPressEvent(self, event: QtGui.QKeyEvent) -> None:
"""
Reimplement this method of parent to move chart horizontally and zoom in/out.
"""
if event.key() == QtCore.Qt.Key_Left:
self._on_key_left()
elif event.key() == QtCore.Qt.Key_Right:
self._on_key_right()
elif event.key() == QtCore.Qt.Key_Up:
self._on_key_up()
elif event.key() == QtCore.Qt.Key_Down:
self._on_key_down()
def wheelEvent(self, event: QtGui.QWheelEvent) -> None:
"""
Reimplement this method of parent to zoom in/out.
"""
delta = event.angleDelta()
if delta.y() > 0:
self._on_key_up()
elif delta.y() < 0:
self._on_key_down()
def _on_key_left(self) -> None:
"""
Move chart to left.
"""
self._right_ix -= 1
self._right_ix = max(self._right_ix, self._bar_count)
self._update_x_range()
self._cursor.move_left()
self._cursor.update_info()
def _on_key_right(self) -> None:
"""
Move chart to right.
"""
self._right_ix += 1
self._right_ix = min(self._right_ix, self._manager.get_count())
self._update_x_range()
self._cursor.move_right()
self._cursor.update_info()
def _on_key_down(self) -> None:
"""
Zoom out the chart.
"""
self._bar_count *= 1.2
self._bar_count = min(int(self._bar_count), self._manager.get_count())
self._update_x_range()
self._cursor.update_info()
def _on_key_up(self) -> None:
"""
Zoom in the chart.
"""
self._bar_count /= 1.2
self._bar_count = max(int(self._bar_count), self.MIN_BAR_COUNT)
self._update_x_range()
self._cursor.update_info()
def move_to_right(self) -> None:
"""
Move chart to the most right.
"""
self._right_ix = self._manager.get_count()
self._update_x_range()
self._cursor.update_info()
class ChartCursor(QtCore.QObject):
""""""
def __init__(
self,
widget: ChartWidget,
manager: BarManager,
plots: Dict[str, pg.GraphicsObject],
item_plot_map: Dict[ChartItem, pg.GraphicsObject]
):
""""""
super().__init__()
self._widget: ChartWidget = widget
self._manager: BarManager = manager
self._plots: Dict[str, pg.GraphicsObject] = plots
self._item_plot_map: Dict[ChartItem, pg.GraphicsObject] = item_plot_map
self._x: int = 0
self._y: int = 0
self._plot_name: str = ""
self._init_ui()
self._connect_signal()
def _init_ui(self):
""""""
self._init_line()
self._init_label()
self._init_info()
def _init_line(self) -> None:
"""
Create line objects.
"""
self._v_lines: Dict[str, pg.InfiniteLine] = {}
self._h_lines: Dict[str, pg.InfiniteLine] = {}
self._views: Dict[str, pg.ViewBox] = {}
pen = pg.mkPen(WHITE_COLOR)
for plot_name, plot in self._plots.items():
v_line = pg.InfiniteLine(angle=90, movable=False, pen=pen)
h_line = pg.InfiniteLine(angle=0, movable=False, pen=pen)
view = plot.getViewBox()
for line in [v_line, h_line]:
line.setZValue(0)
line.hide()
view.addItem(line)
self._v_lines[plot_name] = v_line
self._h_lines[plot_name] = h_line
self._views[plot_name] = view
def _init_label(self) -> None:
"""
Create label objects on axis.
"""
self._y_labels: Dict[str, pg.TextItem] = {}
for plot_name, plot in self._plots.items():
label = pg.TextItem(
plot_name, fill=CURSOR_COLOR, color=BLACK_COLOR)
label.hide()
label.setZValue(2)
label.setFont(NORMAL_FONT)
plot.addItem(label, ignoreBounds=True)
self._y_labels[plot_name] = label
self._x_label: pg.TextItem = pg.TextItem(
"datetime", fill=CURSOR_COLOR, color=BLACK_COLOR)
self._x_label.hide()
self._x_label.setZValue(2)
self._x_label.setFont(NORMAL_FONT)
plot.addItem(self._x_label, ignoreBounds=True)
def _init_info(self) -> None:
"""
"""
self._infos: Dict[str, pg.TextItem] = {}
for plot_name, plot in self._plots.items():
info = pg.TextItem(
"info",
color=CURSOR_COLOR,
border=CURSOR_COLOR,
fill=BLACK_COLOR
)
info.hide()
info.setZValue(2)
info.setFont(NORMAL_FONT)
plot.addItem(info) # , ignoreBounds=True)
self._infos[plot_name] = info
def _connect_signal(self) -> None:
"""
Connect mouse move signal to update function.
"""
self._widget.scene().sigMouseMoved.connect(self._mouse_moved)
def _mouse_moved(self, evt: tuple) -> None:
"""
Callback function when mouse is moved.
"""
if not self._manager.get_count():
return
# First get current mouse point
pos = evt
for plot_name, view in self._views.items():
rect = view.sceneBoundingRect()
if rect.contains(pos):
mouse_point = view.mapSceneToView(pos)
self._x = to_int(mouse_point.x())
self._y = mouse_point.y()
self._plot_name = plot_name
break
# Then update cursor component
self._update_line()
self._update_label()
self.update_info()
def _update_line(self) -> None:
""""""
for v_line in self._v_lines.values():
v_line.setPos(self._x)
v_line.show()
for plot_name, h_line in self._h_lines.items():
if plot_name == self._plot_name:
h_line.setPos(self._y)
h_line.show()
else:
h_line.hide()
def _update_label(self) -> None:
""""""
bottom_plot = list(self._plots.values())[-1]
axis_width = bottom_plot.getAxis("right").width()
axis_height = bottom_plot.getAxis("bottom").height()
axis_offset = QtCore.QPointF(axis_width, axis_height)
bottom_view = list(self._views.values())[-1]
bottom_right = bottom_view.mapSceneToView(
bottom_view.sceneBoundingRect().bottomRight() - axis_offset
)
for plot_name, label in self._y_labels.items():
if plot_name == self._plot_name:
label.setText(str(self._y))
label.show()
label.setPos(bottom_right.x(), self._y)
else:
label.hide()
dt = self._manager.get_datetime(self._x)
if dt:
self._x_label.setText(dt.strftime("%Y-%m-%d %H:%M:%S"))
self._x_label.show()
self._x_label.setPos(self._x, bottom_right.y())
self._x_label.setAnchor((0, 0))
def update_info(self) -> None:
""""""
buf = {}
for item, plot in self._item_plot_map.items():
item_info_text = item.get_info_text(self._x)
if plot not in buf:
buf[plot] = item_info_text
else:
if item_info_text:
buf[plot] += ("\n\n" + item_info_text)
for plot_name, plot in self._plots.items():
plot_info_text = buf.get(plot, None)
if not plot_info_text:
continue
info = self._infos[plot_name]
info.setText(plot_info_text)
info.show()
view = self._views[plot_name]
top_left = view.mapSceneToView(view.sceneBoundingRect().topLeft())
info.setPos(top_left)
def move_right(self) -> None:
"""
Move cursor index to right by 1.
"""
if self._x == self._manager.get_count() - 1:
return
self._x += 1
self._update_after_move()
def move_left(self) -> None:
"""
Move cursor index to left by 1.
"""
if self._x == 0:
return
self._x -= 1
self._update_after_move()
def _update_after_move(self) -> None:
"""
Update cursor after moved by left/right.
"""
bar = self._manager.get_bar(self._x)
self._y = bar.close_price
self._update_line()
self._update_label()
def clear_all(self) -> None:
"""
Clear all data.
"""
self._x = 0
self._y = 0
self._plot_name = ""
for line in list(self._v_lines.values()) + list(self._h_lines.values()):
line.hide()
for label in list(self._y_labels.values()) + [self._x_label]:
label.hide()
class KlineWidget(ChartWidget):
""" k线widget,支持多widget;主图/volume/附图"""
clsId = 0
def __init__(self, parent: QtWidgets.QWidget = None,
title: str = "kline",
display_volume: bool = False,
display_sub: bool = False):
super().__init__(parent, title)
KlineWidget.clsId += 1
self.windowId = str(KlineWidget.clsId)
# 所有K线上指标
self.main_color_pool = deque(['red', 'green', 'yellow', 'white'])
self.main_indicator_data = {} # 主图指标数据(字典,key是指标,value是list)
self.main_indicator_colors = {} # 主图指标颜色(字典,key是指标,value是list
self.main_indicator_plots = {} # 主图指标的所有画布(字典,key是指标,value是plot)
self.display_volume = display_volume
self.display_sub = display_sub
# 所有副图上指标
self.sub_color_pool = deque(['red', 'green', 'yellow', 'white'])
self.sub_indicator_data = {}
self.sub_indicator_colors = {}
self.sub_indicator_plots = {}
self.main_plot_name = f'{self.windowId}_main'
self.volume_plot_name = f'{self.windowId}_volume'
self.sub_plot_name = f'{self.windowId}_sub'
self.main_plot = None
self.volume_plot = None
self.sub_plot = None
if self.display_volume or self.display_sub:
self.add_plot(self.main_plot_name, hide_x_axis=True) # 主图
self.add_item(CandleItem, "candle", self.main_plot_name) # 往主图区域,加入
if self.display_volume:
self.add_plot(self.volume_plot_name, maximum_height=60) # volume 附图
self.add_item(VolumeItem, "volume", self.volume_plot_name)
self.volume_plot = self.get_plot(self.volume_plot_name)
if self.display_sub:
self.add_plot(self.sub_plot_name, maximum_height=180) # 附图
self.sub_plot = self.get_plot(self.sub_plot_name)
else:
self.add_plot(self.main_plot_name, hide_x_axis=False) # 主图
self.add_item(CandleItem, "candle", self.main_plot_name) # 往主图区域,加入
self.add_cursor()
self.main_plot = self.get_plot(self.main_plot_name)
def add_indicator(self, indicator: str, is_main: bool = True):
"""
新增指标信号图
:param indicator: 指标/信号的名称,如ma10,
:param is_main: 是否为主图
:return:
"""
if is_main:
if indicator in self.main_indicator_plots:
self.main_plot.removeItem(self.main_indicator_plots[indicator]) # 存在该指标/信号,先移除原有画布
self.main_indicator_plots[indicator] = self.main_plot.plot() # 为该指标/信号,创建新的主图画布,登记字典
self.main_indicator_colors[indicator] = self.main_color_pool[0] # 登记该指标/信号使用的颜色
self.main_color_pool.append(self.main_color_pool.popleft()) # 调整剩余颜色
if indicator not in self.main_indicator_data:
self.main_indicator_data[indicator] = []
else:
if indicator in self.sub_indicator_plots:
self.sub_plot.removeItem(self.sub_indicator_plots[indicator]) # 若存在该指标/信号,先移除原有的附图画布
self.sub_indicator_plots[indicator] = self.sub_plot.plot() # 为该指标/信号,创建新的主图画布,登记字典
self.sub_indicator_colors[indicator] = self.sub_color_pool[0] # 登记该指标/信号使用的颜色
self.sub_color_pool.append(self.sub_color_pool.popleft()) # 调整剩余颜色
if indicator not in self.sub_indicator_data:
self.sub_indicator_data[indicator] = []
def clear_indicator(self, main=True):
"""清空指标图形"""
# 清空信号图
if main:
for indicator in self.main_indicator_plots:
self.main_plot.removeItem(self.main_indicator_plots[indicator])
self.main_indicator_data = {}
self.main_indicator_plots = {}
else:
for indicator in self.sub_indicator_plots:
self.sub_plot.removeItem(self.sub_indicator_plots[indicator])
self.sub_indicator_data = {}
self.sub_indicator_plots = {}
def plot_indicator(self, datas: dict, is_main=True, clear=False):
"""
刷新指标/信号图( 新数据)
:param datas: 所有数据
:param is_main: 是否为主图
:param clear: 是否要清除旧数据
:return:
"""
if clear:
self.clear_indicator(is_main) # 清除主图/副图
if is_main:
for indicator in datas:
self.add_indicator(indicator, is_main) # 逐一添加主图信号/指标
self.main_indicator_data[indicator] = datas[indicator] # 更新组件数据字典
# 调用该信号/指标画布(plotDataItem.setData()),更新数据,更新画笔颜色,更新名称
self.main_indicator_plots[indicator].setData(datas[indicator],
pen=self.main_indicator_colors[indicator][0],
name=indicator)
else:
for indicator in datas:
self.add_indicator(indicator, is_main) # 逐一增加子图指标/信号
self.sub_indicator_data[indicator] = datas[indicator] # 更新组件数据字典
# 调用该信号/指标画布(plotDataItem.setData()),更新数据,更新画笔颜色,更新名称
self.sub_indicator_plots[indicator].setData(datas[indicator],
pen=self.sub_indicator_colors[indicator][0], name=indicator)
| msincenselee/vnpy | vnpy/chart/widget.py | Python | mit | 21,789 |
import matplotlib.pyplot as plt
import numpy as np
def plot():
fig = plt.figure()
N = 10
t = np.linspace(0, 1, N)
x = np.arange(N)
plt.plot(t, x, "-o", fillstyle="none")
plt.tight_layout()
return fig
def test():
from .helpers import assert_equality
assert_equality(plot, "test_fillstyle_reference.tex")
| nschloe/matplotlib2tikz | tests/test_fillstyle.py | Python | mit | 345 |
class Parent(object):
def override(self):
print("PARENT override()")
def implicit(self):
print("PARENT implicit()")
def altered(self):
print("PARENT altered()")
class Child(Parent):
def override(self):
print("CHILD override()")
def altered(self):
print("CHILD, BEFORE PARENT altered()")
super(Child, self).altered()
print("CHILD, AFTER PARENT altered()")
dad = Parent()
son = Child()
dad.implicit()
son.implicit()
dad.override()
son.override()
dad.altered()
son.altered()
| Paul-Haley/LPTHW_python3 | ex44d.py | Python | mit | 570 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RetailPartners.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| darshanbagul/EntityManagement | RetailPartners/manage.py | Python | mit | 257 |
from __future__ import (absolute_import, division, print_function, unicode_literals)
__author__ = 'vlad'
__version__ = 0.2
import numpy as np
from scipy import ndimage
# from sklearn.cluster import KMeans
import skimage.morphology as morph
# from skimage.restoration import denoise_tv_bregman
from skimage.feature import peak_local_max
import mahotas
# from stain.he import rgb2he
HE_OPTS = {'gauss1': np.sqrt(2.0),
'gauss2': 1.0/np.sqrt(2.0),
'strel1': morph.disk(3),
'bregm': 3.5,
# options for nuclei extraction at 40x magnification:
'40x_nuclei_min_area': 30}
# NUCLEI_REGIONS
def nuclei_regions(comp_map):
"""
NUCLEI_REGIONS: extract "support regions" for nuclei. This function
expects as input a "tissue components map" (as returned, for example,
by segm.tissue_components) where values of 1 indicate pixels having
a color corresponding to nuclei.
It returns a set of compact support regions corresponding to the
nuclei.
:param comp_map: numpy.ndarray
A mask identifying different tissue components, as obtained
by classification in RGB space. The value 0
See segm.tissue.tissue_components()
:return:
"""
# Deprecated:...
# img_hem, _ = rgb2he(img0, normalize=True)
# img_hem = denoise_tv_bregman(img_hem, HE_OPTS['bregm'])
# Get a mask of nuclei regions by unsupervised clustering:
# Vector Quantization: background, mid-intensity Hem and high intensity Hem
# -train the quantizer for 3 levels
# vq = KMeans(n_clusters=3)
# vq.fit(img_hem.reshape((-1,1)))
# -the level of interest is the brightest:
# k = np.argsort(vq.cluster_centers_.squeeze())[2]
# mask_hem = (vq.labels_ == k).reshape(img_hem.shape)
# ...end deprecated
# Final mask:
mask = (comp_map == 1) # use the components classified by color
# mask = morph.closing(mask, selem=HE_OPTS['strel1'])
# mask = morph.opening(mask, selem=HE_OPTS['strel1'])
# morph.remove_small_objects(mask, in_place=True)
# mask = (mask > 0)
mask = mahotas.close_holes(mask)
morph.remove_small_objects(mask, in_place=True)
dst = mahotas.stretch(mahotas.distance(mask))
Bc=np.ones((9,9))
lmax = mahotas.regmax(dst, Bc=Bc)
spots, _ = mahotas.label(lmax, Bc=Bc)
regions = mahotas.cwatershed(lmax.max() - lmax, spots) * mask
return regions
# end NUCLEI_REGIONS | vladpopovici/WSItk | WSItk/segm/nuclei.py | Python | mit | 2,446 |
from django.contrib import admin
from imager_profile.models import UserProfile
# Register your models here.
admin.site.register(UserProfile) | clair3st/django-imager | imagersite/imager_profile/admin.py | Python | mit | 142 |
import ftfy
import PyPDF2
import unidecode
from bs4 import BeautifulSoup
from urllib.request import urlopen
from goslate import Goslate
from mercury_api import ParserAPI
import html2text
import os
html_parser = html2text.HTML2Text()
html_parser.ignore_links = True
html_parser.ignore_images = True
html_parser.body_width = 0
mercury = ParserAPI(api_key='p5XXJvAPT7AtTyi6zuPAHRVk2RaX8z7lxJaNiERz')
gs = Goslate()
speech_system = 'google'
language = 'en'
class Story:
"""
This class handles the retrieval and classification
of text from the various websites or formats.
Now can handle mutable iterables.
---------------------------------------------------------
Supported Websites/Filetypes:
* Fanfiction.net
* Wattpad
* Deviantart
* PDF Books - Tenuous
* Plain Text
---------------------------------------------------------
Attributes:
url: Inputted string representing a story
speech: Speech to text system to be used
local: Local system solution
google: Google text to speech engine
text: Initially equal to url, set to story text after
initialization is performed.
changes: A dictionary listing changes that should be
made to the text before it is spoken.
chapters: On fanfiction, holds a list of chapters
initialized: Whether the story has retrieved its text
type: What is the input, where does it lead?
wattpad: http://wattpad.com
fanficton: http://fanfiction.net
deviantart: http://deviantart.com
pdf: Can be stored online or locally
text: Plain text to be read, fallback type
iterable: Objects like a list with multiple items
pathtype: Is it stored online or locally?
url: Online
local: Locally
"""
def __init__(self, url):
self.url = url
self.speech = speech_system
self.text = url
self.chapters = []
self.initialized = False
self.changes = {}
self.language = language
if not hasattr(self.url, '__iter__') or type(self.url) is str:
if 'wattpad' in self.url:
self.type = 'wattpad'
elif 'fanfiction' in self.url and 'wattpad' not in self.url:
self.type = 'fanfiction'
elif 'deviantart' in self.url:
self.type = 'deviantart'
elif 'pdf' in self.url:
self.type = 'pdf'
else:
self.type = 'text'
if url.startswith(('http://', 'https://')):
self.pathtype = 'url'
else:
self.pathtype = 'local'
if '.com' in url or '.net' in url:
self.url = 'http://' + url
self.pathtype = 'url'
else:
self.type = 'iterable'
self.backup = self.url
map_class = [Story(each_url) for each_url in self.url]
self.url = map_class
def __repr__(self):
return "Story({})".format(self.url)
def __bool__(self):
return self.initialized
def __add__(self, other):
return self.text + other
def __radd__(self, other):
return other + self.text
def __lt__(self, other):
return len(self.text) < other
def __le__(self, other):
return len(self.text) <= other
def __eq__(self, other):
return len(self.text) == other
def __ne__(self, other):
return len(self.text) != other
def __gt__(self, other):
return len(self.text) > other
def __ge__(self, other):
return len(self.text) >= other
def __getitem__(self, index):
if index > len(self.url) - 1:
raise IndexError
return self.url[index]
def __iter__(self):
return iter(self.url)
def __len__(self):
return len(self.text)
def initialize(self):
"""
Automatically detects and initializes the
different types of stories that can be
inputted.
"""
if self.type == 'wattpad':
self.wattpad()
elif self.type == 'fanfiction':
self.fanfiction()
elif self.type == 'deviantart':
self.deviantart()
elif self.type == 'pdf':
self.pdf_initialize()
else:
if self.pathtype == 'url':
self.text = html_parser.handle(mercury.parse(self.url).content)
self.initialized = True
#self.language = gs.detect(self.text)
def translate(self, target='en'):
"""
If the storytext has been gathered gathered and analyzed
then translate it from its current detected language
into a given target language, usually the reader's native
language. By default the target language is English.
"""
if self.initialized:
# self.source_language = gs.get_languages()[self.language]
# self.target_language = gs.get_languages()[target]
# print("Translating from {0} to {1}.".format(self.source_language, self.target_language))
# self.text = gs.translate(self.text, target)
# self.language = target
pass
else:
print("Please initialize.")
def fanfiction(self):
"""
Retrieves and parses text from a Fanfiction.net story.
Will attempt to grab a chapter list.
"""
# Opens and parses the URL with BeautifulSoup
soup = BeautifulSoup(urlopen(str(self.url)), "html.parser")
# Retrieve story text from the URL to be used elsewhere
try:
# The following code knows a bit too much about the input
# Find better solution, this will likely break for edge cases
# Or not. This now works for edge cases. - 10/4/2015 6:09 PM
self.text = soup.find(class_='storytext').text
# Following code will grab the number of chapters for later use.
chapter_list = soup.select('#chap_select')
if chapter_list: # There are multiple chapters
options = str(chapter_list[0].option)
last_chapter_w_extra = options.split('option value="')[-1]
last_chapter = int(last_chapter_w_extra.split('"')[0])
self.chapters = list(map(str, range(-1, last_chapter + 1)))
else:
self.chapters = ['-1', '0', '1']
# self.language = gs.detect(self.text)
self.initialized = True
"""
# This code tries to get chapter names, but doesn't always work
# It remains to remind me what not to do.
# Meanwhile, chapters will be named after their number.
options = soup.select('#chap_select')[0].option.text
options_modified = options
for char in range(len(options)):
if options[char].isdigit() and options[char + 1] == '.':
options_modified = options_modified.replace(
options[char], "~$~" + options[char]
)
self.chapters = options_modified.split('~$~')[1:]
"""
except Exception as E:
print('Retrieval of Fanfiction story failed: ' + str(E))
def deviantart(self):
"""
Retrieves text from Deviantart stories.
"""
try:
soup = BeautifulSoup(urlopen(str(self.url)), "html.parser")
self.text = soup.select('#devskin >'
' div > div >'
' div.gr-body >'
' div > div >'
' div')[0].text
# self.language = gs.detect(self.text)
self.initialized = True
except Exception as E:
print('Retrieval of Deviantart story failed: ' + str(E))
def wattpad(self, page=0, mode='singular'):
"""
Retrieve text from Wattpad stories given a page
number and a mode type.
Mode types are singular and plural.
Notice: Now that Wattpad uses an authentication system
this method no longer works. The only solution will be
to require the user to enter a username and password
upon entering a Wattpad URL.
"""
# Sets up the page variable to be added onto the URL
page = '/page/' + str(page) if page else ''
soup = BeautifulSoup(urlopen(str(self.url + page)), "html.parser")
# Finds the path and contents given by the search
if mode == 'singular':
self.text = soup.find(class_="panel panel-reading")
elif mode == 'plural':
self.text = soup.find_all(class_="panel panel-reading")
# self.language = gs.detect(self.text)
self.initialized = True
def pdf_initialize(self):
"""
Sets up the retrieval of text from a PDF,
whether stored online or locally.
"""
local_path = os.getcwd() + '\\PDF2BEREAD.pdf'
if os.path.isfile(local_path):
os.remove(local_path)
if self.pathtype == 'url':
# Download the PDF from the web
web_path = urlopen(self.url)
with open('PDF2BEREAD.pdf', 'wb') as file:
file.write(web_path.read())
self.url = local_path
self.initialized = True
def pdf(self, page):
"""
Retrieves text from a PDF document, stored locally or online.
"""
# While this works it's a bit odd. More research required.
page = PyPDF2.PdfFileReader(self.url).getPage(page)
self.text = page.extractText().replace('\u2122', "'")
# self.language = gs.detect(self.text)
def parse(self):
"""
Removes all unicode characters, nonprintable characters,
and unneeded special characters.
This formats the text for audio reading.
"""
try: # Attempt to scrub the unicode with a library
text = ftfy.fix_text(self.text)
self.text = unidecode.unidecode(text).replace('[?]', '')
except Exception: # If that fails, kill it with fire.
print("Nuking the text.")
text = bytes(self.text, 'utf-8')
text = text.decode('unicode_escape')
text = text.encode('ascii', 'ignore')
text = text.decode('utf-8')
self.text = str(text)
# try: # Try to translate the story into the reader's language
# if self.language != language:
# # self.translate(language)
# except:
# pass
# Formats text to remove odd artifacts from the conversion
self.changes.update({
'\n': ' ', '\r': ' ',
'"': "'", '.': '. ',
'. . . ': '', '. . .': '...',
"\'": "'", '\"': '',
':': ': ', ': ': ': ',
'!': '! ', '! ': '! ',
'?': '? ', '? ': '? ',
';': '; ', '; ': '; ',
'0': '0 ', '1': '1 ',
'2': '2 ', '3': '3 ',
'4': '4 ', '5': '5 ',
'6': '6 ', '7': '7 ',
'8': '8 ', '9': '9 '
})
if self.speech == 'local':
# The Microsoft SAPI pronunciation is a bit off
self.changes.update({
'Tali': 'Tahlie', 'tali': 'tahlie',
'Yalo': ' Yah-lo ', 'caf ': 'cafe ',
'Garrus': 'Gae-rrus', 'Klenon': 'Klenn une',
'Binary': 'Bi-nary', 'Noveria': ' No-veir-eaah ',
'Vakarian': 'Vah-kare-eean'
})
else:
# Google's TTS is better at its job :)
self.changes.update({
'Tali': 'Tahhlee', 'tali': 'Tahhlee',
'caf ': 'cafe '
})
# Apply the changes to the text.
for original_word, changed_word in self.changes.items():
self.text = self.text.replace(original_word, changed_word)
| Zenohm/OmniReader | OmniReader/story.py | Python | mit | 12,411 |
from os.path import isfile, exists, abspath, dirname
from os import chdir, getcwd
from startup import EnumWindows, EnumWindowsProc, foreach_window, similar, windows
from pywinauto import application
from keyboard import PressKey, ReleaseKey, VK_TAB, VK_SHIFT
from time import time, sleep
import re
import pyotp
ctok = {'c': 67, 'z': 90, 'j': 74, 'x': 88, 'v': 86, 'r': 82, 'p': 80, 'f': 70, 'q': 81, 'y': 89, 'k': 75, 't': 84, 'd': 68, 'h': 72, 'l': 76, 'i': 73, 'm': 77, 'a': 65, 's': 83, 'u': 85, 'g': 71, 'e': 69, 'w': 87, 'n': 78, 'b': 66, 'o': 79}
timeout = 5
chdir(dirname(abspath(__file__))) # Change the scripts working directory to the script's own directory
file = 'ignore/Cisco AnyConnect.txt'
print('looking for', file, 'in', getcwd())
if exists(file):
password, secret = [l.strip() for l in open(file).readlines()]
otp = pyotp.TOTP(secret)
del secret
else:
input('settings file (%s) not found' % file) # wait for user input before exit
exit()
path = "%PROGRAMFILES(x86)%\Cisco\Cisco AnyConnect Secure Mobility Client\vpnui.exe"
path = r'C:\Program Files (x86)\Cisco\Cisco AnyConnect Secure Mobility Client\vpnui.exe'
app = application.Application()
sleep(1)
app.start(path)
cac = r'Cisco AnyConnect'
smc = r'Cisco AnyConnect Secure Mobility Client'
pattern = r'Cisco AnyConnect (\|) ([\w ]+)'
EnumWindows(EnumWindowsProc(foreach_window), 0)
l = sorted([similar(title, cac) for title in windows], reverse=True)[:2]
if l[0][1] == smc:
print('Found:', smc)
for _ in range(3):
PressKey(VK_TAB)
ReleaseKey(VK_TAB)
sleep(0.05)
PressKey(0x0D)
ReleaseKey(0x0D)
start = time()
while not re.match(pattern, l[0][1]):
sleep(0.1)
if time() > start + timeout:
input('Waiting too long for %s prompt' % cac)
exit() # wait for user input before exit
EnumWindows(EnumWindowsProc(foreach_window), 0)
l = sorted([similar(title, cac) for title in windows], reverse=True)[:2]
print(otp.now())
for c in password + '\t' + otp.now() + '\t\r':
if c.isupper():
PressKey(VK_SHIFT)
if c in ctok:
PressKey(ctok[c])
ReleaseKey(ctok[c])
else:
PressKey(ord(c))
ReleaseKey(ord(c))
if c.isupper():
ReleaseKey(VK_SHIFT)
sleep(0.05)
| wittrup/crap | python/anyconnect.py | Python | mit | 2,265 |
# f1/s/io/persistence.py
import uuid
import json
import records
import os
from .sqlite import SQlite
from .couchbase import Couchbase
class Persistence:
me = None
def __init__(self, boot_config=None, briefing=None):
"""
Central class for data storage. Handles reads/writes and sync.
remote_dbs:
local_dbs:
sync: defines sync rules per class
"""
if boot_config:
# print('Persistence.__init__ bootstrap')
self.remote_host = boot_config['mc']['host']
self.remote_port = boot_config['mc']['port']
user = boot_config['mc']['user']
password = boot_config['mc']['password']
self.credentials = {'user': user, 'password': password}
else:
if not briefing:
from f1.a.user.session import Session
# print('Persistence.__init__ not briefing > Session')
if Session.me.briefing.briefing:
try:
briefing = Session.me.briefing.briefing
except Exception as exc:
print(exc)
self.briefing = briefing
try:
# self.brief_me(briefing)
# print('Persistence.__init__ briefing')
self.remote_host = briefing['details']['database']['remote']['host']
self.remote_port = briefing['details']['database']['remote']['port']
self.credentials = {'user': None, 'password': None} # TODO
if briefing['details']['database']['remote']['type'] == 'couchbase':
try:
self.remote_dbs = Couchbase(self.remote_host, self.remote_port, self.credentials)
except Exception as exc:
print(exc)
else:
print(briefing['details']['database']['remote']['type'])
if briefing['details']['database']['local']['type'] == 'sqlite':
try:
self.local_dbs = SQlite()
except Exception as exc:
print(exc)
else:
print(briefing['details']['database']['local']['type'])
self.local_path = briefing['details']['database']['local']['path']
self.sync = briefing['mission']['sync']
self.remote_last_sync = ''
self.remote_last_sync_class = {}
except Exception as exc:
print(exc)
# TODO credentials
# try:
# with open('/zeroclient/config/cred_{}.json'.format(self.local_dbs)) as data_file:
# self.credentials = json.load(data_file)
# except Exception as exc:
# print('Persistent.load_json_cred', exc)
@classmethod
def set_me_boot(cls, boot_config):
if cls.me is None:
cls.me = Persistence(boot_config=boot_config)
return cls.me
@classmethod
def set_me_briefing(cls, briefing):
if hasattr(cls.me, 'briefing'):
if not cls.me.briefing:
cls.me = Persistence(boot_config=None, briefing=briefing)
return cls.me
else:
cls.me = Persistence(boot_config=None, briefing=briefing)
return cls.me
# @classmethod
# def brief_me(cls, briefing):
#
# print('Persistence.__init__ briefing')
# remote_host = briefing['details']['database']['remote']['host']
# remote_port = briefing['details']['database']['remote']['port']
# credentials = {'user': None, 'password': None} # TODO
#
# if briefing['details']['database']['remote']['type'] == 'couchbase':
# try:
# cls.remote_dbs = Couchbase(remote_host, remote_port, credentials)
# print(cls.remote_dbs)
# except Exception as exc:
# print(exc)
#
# if briefing['details']['database']['local']['type'] == 'sqlite':
# try:
# cls.local_dbs = SQlite()
# print(cls.local_dbs)
# except Exception as exc:
# print(exc)
#
# cls.local_path = briefing['details']['database']['local']['path']
#
# cls.sync = briefing['mission']['sync']
#
# cls.remote_last_sync = ''
# cls.remote_last_sync_class = {}
def insert(self, object_data, object_class):
result = self.local_dbs.write(self.local_path, object_data, object_class)
return result
def update(self, data_object, object_class):
pass
def read(self, data, object_class):
# TODO insert logic: local or remote
# print('persistence. init: ', self.init)
# bootstrap
if object_class == 'AUserBriefing':
assert data['device_id']
data['result'] = '*'
data['lookup_key'] = 'device_id'
data['lookup_value'] = data['device_id']
# credentials = Session.me.secrets # TODO
self.remote_dbs = Couchbase(self.remote_host, self.remote_port, self.credentials)
result = self.remote_dbs.read(data, object_class)
return result
else:
print('Persistence.read(object_class)', object_class)
# result = self.local_dbs.read(self.local_path, data, object_class)
# return result
def sync(self, object_class):
"""
Synchronizes local und remote databases according to self.sync
:param object_class: object (table, bucket) to be synced
:return: True or False
"""
# TODO infinite loop or trigger?
pass
@staticmethod
def uuid():
uid = str(uuid.uuid4())
return uid
def set_me(self):
pass
def test(self):
pass
| filemakergarage/zeroclient | f1/s/io/persistence.py | Python | mit | 5,932 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rohan.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| jairtrejo/doko | app/manage.py | Python | mit | 248 |
"""Report the manhattan distance between a starting point and an ending point,
given a set of directions to follow to move between the two points."""
from distance import get_distance
from directions import load_directions, follow_directions
def main():
directions = load_directions('directions.txt')
starting_point = (0, 0)
starting_orientation = 'N'
ending_point, _ = follow_directions(starting_point, starting_orientation, *directions)
print(get_distance(starting_point, ending_point))
if __name__ == '__main__':
main()
| machinelearningdeveloper/aoc_2016 | 01/solve_1.py | Python | mit | 553 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# This is a simple PMI tool based on inverted_index
#
# Reference: http://en.wikipedia.org/wiki/Pointwise_mutual_information
#
# @author: Jason Wu ([email protected])
from inverted_index import InvertedIndex
from topkheap import TopkHeap
import math
class PMIElement(object):
'''Term's PMI element
With another term's value and pmi of this term and other term
Attributes:
t2 : another term
pmi: pmi value of this term and t2
'''
def __init__(self, t2, pmi = 0):
self.t2 = t2
self.pmi = pmi
def __gt__(self, element):
# compare the pmi value
if isinstance(element, PMIElement):
return self.pmi > element.pmi
raise TypeError
def __eq__(self, other):
# must t2 and pmi equal
if isinstance(other, PMIElement) and \
other.t2 == self.t2 and other.pmi == self.pmi:
return True
return False
def __str__(self):
# t2:pmi
return self.t2 + ":" + str(self.pmi)
def get_pair(self):
'''get term and pmi pair
Returns:
t2 and pmi tuple
'''
return (self.t2, self.pmi)
class PMI(object):
'''PMI compute for items based on InvertedIndex
The library constructs an inverted index corpus from documents specified by reading from input files.
Then compute all terms' pmi top k element and pmi score
Attributes:
iindex : the inverted index of given documents
top : means the k of top k elements while hold for one term
term_pmi : dictory of terms top pmi elements "term : list of PMIElement"
'''
def __init__(self, inverted_index, top = 50):
'''init all attributes
Args:
inverted_index : InvertedIndex instance
top : how many top element to save
'''
self.iindex = inverted_index
self.top = top
self.term_pmi = {}
def change_inverted_index(self, inverted_index):
'''change instance's iindex
Args:
inverted_index : InvertedIndex instance
'''
self.inverted_index = inverted_index
def build(self):
'''compute all terms' top pmi elements
All terms computed is from iindex's get_terms method.
'''
terms = self.iindex.get_terms()
for term in terms:
self.term_pmi[term] = TopkHeap(self.top)
for i in range(len(terms)-1):
for j in range(i+1, len(terms)):
pmi = self.compute_pmi(terms[i], terms[j])
self.term_pmi[terms[i]].push(PMIElement(terms[j], pmi))
self.term_pmi[terms[j]].push(PMIElement(terms[i], pmi))
def compute_pmi(self, t1 , t2):
# PMI(t1, t2) = log(p(t1,t2)/(p(t1)p(t2)))
# = concurrent * N / (xapp * yapp)
if self.iindex.get_word_appear(t1) == 0 or self.iindex.get_word_appear(t2) == 0:
return -float('inf')
to_log = self.iindex.concurrence(t1, t2) * self.iindex.get_num_docs() \
/(self.iindex.get_word_appear(t1) \
* self.iindex.get_word_appear(t2))
if to_log == 0:
return -float('inf')
else:
return math.log(to_log, 2)
def get_top_pmi(self, term):
'''Get top pmi elements of given term.
Args:
term : the given term to get top pmi elements
Returns:
A list object of PMIElement
'''
return self.term_pmi[term].topk()
class MI(object):
'''MI compute based on PMI and InvertedIndex
Attributes:
iindex : the inverted index of given documents
pmi : means the k of top k elements while hold for one term
'''
def __init__(self, inverted_index, pmi):
'''Init all attributes
Args:
inverted_index : InvertedIndex instance
top : how many top element to save
'''
self.iindex = inverted_index
self.pmi = pmi
def compute_mi(self, sentence1, sentence2):
'''Compute mi of two sentence
Args:
sentence1 : list of words
sentence2 : list of words
'''
res = 0.0
for t1 in sentence1:
for t2 in sentence2:
pmi = self.pmi.compute_pmi(t1, t2)
if pmi != -float('inf'):
res += self.iindex.concurrence(t1, t2) / self.iindex.get_num_docs() * pmi
return res | jasonwbw/NLPbasic | retrieval/basic/pmi.py | Python | mit | 3,869 |
import os.path
from dxr.query import Query
from dxr.utils import connect_db
from dxr.testing import SingleFileTestCase, MINIMAL_MAIN
from nose.tools import eq_
class MemberFunctionTests(SingleFileTestCase):
source = """
class MemberFunction {
public:
void member_function(int a); // Don't assume the qualname
// field in the DB ends in just
// ().
class InnerClass {
};
};
void MemberFunction::member_function(int a) {
}
""" + MINIMAL_MAIN
def direct_result_eq(self, query_text, line_num):
conn = connect_db(os.path.join(self._config_dir_path, 'target', 'trees', 'code'))
eq_(Query(conn, query_text).direct_result(), ('main.cpp', line_num))
def test_qualified_function_name_prefix(self):
"""A unique, case-insensitive prefix match on fully qualified function
name should take you directly to the result."""
self.direct_result_eq('MemberFunction::member_FUNCTION', 12)
def test_qualified_type_name(self):
"""A unique, case-insensitive prefix match on fully qualified type name
should take you directly to the result."""
self.direct_result_eq('MemberFunction::InnerCLASS', 8)
def test_line_number(self):
"""A file name and line number should take you directly to that
file and line number."""
self.direct_result_eq('main.cpp:6', 6)
| erikrose/dxr | tests/test_direct.py | Python | mit | 1,544 |
# coding: utf-8
# (c) 2015-11-28 Teruhisa Okada
import netCDF4
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import numpy as np
import pandas as pd
import romspy
def resample(date, var, **kw):
rule = kw.pop('resample', 'D')
if rule == 'H':
loffset = '-30min'
elif rule == 'D':
loffset = '-12H'
elif rule == 'M':
loffset = '-15D'
df = pd.DataFrame({'sur':var[:,-1], 'bot':var[:,0]}, index=date)
df = df.resample(rule, loffset=loffset)
return df.index.values, df.sur.values, df.bot.values
def tplot_station_main(stafile, vname, station, dates, **kw):
print stafile, vname, station, dates
ax = kw.pop('ax', None)
date_format = kw.pop('date_format', '%Y-%m')
cff = kw.pop('cff', 1.0)
#ntime = kw.pop('ntime', 8785)
if ax is None:
ax = plt.gca()
nc = netCDF4.Dataset(stafile, 'r')
time = nc.variables['ocean_time']
time = np.linspace(time[0], time[-1], len(time))
date = netCDF4.num2date(time, romspy.JST)
var = nc.variables[vname][:,station-1,[0,19]] * cff
date, sur, bot = resample(date, var, **kw)
ax.plot(date, sur, '-', lw=1.5, color='#4D71AF', label='surface')
ax.plot(date, bot, '-', lw=1.5, color='#C34F53', label='bottom')
ax.legend(loc='best')
ax.set_title('Sta.{}'.format(station))
ax.set_ylabel(vname)
ax.set_xlim(dates[0], dates[-1])
ax.xaxis.set_major_formatter(DateFormatter(date_format))
def tplot_station(stafile, vname, station, dates, **kw):
if 'N' in vname:
cff = romspy.mol2g_N
elif 'P' in vname:
cff = romspy.mol2g_P
#elif 'plankton' in vname:
# cff = romspy.mol2g_N
elif 'oxygen' in vname:
cff = romspy.mol2g_O2
else:
cff = 1.0
tplot_station_main(stafile, vname, station, dates, cff=cff, **kw)
if __name__ == '__main__':
import seaborn as sns
import datetime
stafile = '/home/okada/ism-i/apps/OB500P/case25/NL2/ob500_sta.nc'
vname = 'phytoplankton'
stations = [3,4,5,6,12,13]
dates = [datetime.datetime(2012,1,1,0), datetime.datetime(2013,1,1,0)]
fig, axes = plt.subplots(6,1, figsize=[10,15])
plt.subplots_adjust(hspace=0.4)
for station, ax in zip(stations, axes):
tplot_station(stafile, vname, station, dates, ax=ax, date_format='%m/%d')
ax.set_ylim(-1,1)
plt.show()
| okadate/romspy | romspy/tplot/tplot_station.py | Python | mit | 2,385 |
"""
The purpose of this server is to maintain a list of
nodes connected to the P2P network. It supports traditional
passive nodes which receive connections but it also
provides a protocol to facilitate TCP hole punching
allowing >85% of nodes behind a standard router to
receive connections.
Notes: Passives nodes don't keep a persistent connection
to the rendezvous server because they don't need to
and it would be unnecessarily taxing. For this reason,
the list of passive nodes retrieved from bootstrapping
may be stale but they are periodically cleaned every node
life time. Additionally, well behaved passive nodes
send the clear command which causes the server to remove
the node details.
"""
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
from .lib import *
error_log_path = "error.log"
debug = 1
class RendezvousProtocol(LineReceiver):
def __init__(self, factory):
self.factory = factory
self.challege_timeout = 60 * 2 # Seconds.
self.node_lifetime = 60 * 60 * 24 # 12 hours.
self.cleanup = 5 * 60 # Every 5 minutes.
self.max_candidates = 100 # Per simultaneous node.
self.connected = False
def log_entry(self, msg, direction="none"):
if sys.version_info >= (3, 0, 0):
if type(msg) == bytes:
msg = msg.decode("utf-8")
ip_addr = str(self.transport.getPeer().host)
port = str(self.transport.getPeer().port)
when = time.strftime("%H:%M:%S %Y-%m-%d")
who = """%s:%s""" % (ip_addr, port)
if direction == "send":
direction = " -> "
elif direction == "recv":
direction = " <- "
else:
direction = " "
entry = """[%s] %s%s%s""" % (when, msg, direction, who)
return entry
def send_line(self, msg):
# Not connected.
if not self.connected:
return
# Network byte order.
try:
if type(msg) != bytes:
msg = msg.encode("ascii")
except Exception as e:
print("send line e")
print(e)
return
# stdout for debugging.
if debug:
print(self.log_entry(msg, "send"))
self.sendLine(msg)
def send_remote_port(self):
"""
Sends the remote port mapped for the connection.
This port is surprisingly often the same as the locally
bound port for an endpoint because a lot of NAT types
preserve the port.
"""
msg = "REMOTE TCP %s" % (str(self.transport.getPeer().port))
self.send_line(msg)
def is_valid_ipv4_address(self, address):
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error: # not a valid address
return False
return True
def is_valid_port(self, port):
port = re.findall("^[0-9]+$", str(port))
if len(port):
port = int(port[0])
if 0 < port <= 65535:
return 1
return 0
def cleanup_candidates(self, node_ip):
"""
Removes old TCP hole punching candidates for a
designated node if a certain amount of time has passed
since they last connected.
"""
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
elapsed = int(time.time() - candidate["time"])
if elapsed > self.challege_timeout:
old_candidates.append(candidate)
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate)
def propogate_candidates(self, node_ip):
"""
Used to progate new candidates to passive simultaneous
nodes.
"""
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Not connected.
if not candidate["con"].connected:
continue
# Already sent -- updated when they accept this challenge.
if candidate["propogated"]:
continue
# Notify node of challege from client.
msg = "CHALLENGE %s %s %s" % (
candidate["ip_addr"],
" ".join(map(str, candidate["predictions"])),
candidate["proto"])
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
old_candidates.append(candidate)
def synchronize_simultaneous(self, node_ip):
"""
Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt.
"""
for candidate in self.factory.candidates[node_ip]:
# Only if candidate is connected.
if not candidate["con"].connected:
continue
# Synchronise simultaneous node.
if candidate["time"] -\
self.factory.nodes["simultaneous"][node_ip]["time"] >\
self.challege_timeout:
msg = "RECONNECT"
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
return
self.cleanup_candidates(node_ip)
self.propogate_candidates(node_ip)
def connectionMade(self):
try:
self.connected = True
if debug:
print(self.log_entry("OPENED =", "none"))
# Force reconnect if node has candidates and the timeout is old.
ip_addr = self.transport.getPeer().host
if ip_addr in self.factory.nodes["simultaneous"]:
# Update time.
self.factory.nodes["simultaneous"][ip_addr]["time"] =\
time.time()
self.synchronize_simultaneous(ip_addr)
except Exception as e:
error = parse_exception(e)
log_exception(error_log_path, error)
print(self.log_entry("ERROR =", error))
def connectionLost(self, reason):
"""
Mostly handles clean-up of node + candidate structures.
Avoids memory exhaustion for a large number of connections.
"""
try:
self.connected = False
if debug:
print(self.log_entry("CLOSED =", "none"))
# Every five minutes: cleanup
t = time.time()
if time.time() - self.factory.last_cleanup >= self.cleanup:
self.factory.last_cleanup = t
# Delete old passive nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["passive"]):
passive_node = self.factory.nodes["passive"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - passive_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["passive"][node_ip]
# Delete old simultaneous nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["simultaneous"]):
simultaneous_node =\
self.factory.nodes["simultaneous"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - simultaneous_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["simultaneous"][node_ip]
# Delete old candidates and candidate structs.
old_node_ips = []
for node_ip in list(self.factory.candidates):
# Record old candidates.
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Hole punching is ms time sensitive.
# Candidates older than this is safe to assume
# they're not needed.
if node_ip not in self.factory.nodes["simultaneous"] \
and t - candidate["time"] >= self.challenge_timeout * 5:
old_candidates.append(candidate)
# Remove old candidates.
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate)
# Record old node IPs.
if not len(self.factory.candidates[node_ip]) and \
node_ip not in self.factory.nodes["simultaneous"]:
old_node_ips.append(node_ip)
# Remove old node IPs.
for node_ip in old_node_ips:
del self.factory.candidates[node_ip]
except Exception as e:
error = parse_exception(e)
log_exception(error_log_path, error)
print(self.log_entry("ERROR =", error))
def lineReceived(self, line):
# Unicode for text patterns.
try:
line = line.decode("utf-8")
except:
# Received invalid characters.
return
if debug:
print(self.log_entry(line, "recv"))
try:
# Return nodes for bootstrapping.
if re.match("^BOOTSTRAP", line) is not None:
parts = re.findall("^BOOTSTRAP ([0-9]+)", line)
while 1:
# Invalid response.
if not len(parts):
break
n = int(parts[0])
# Invalid number.
if n < 1 or n > 100:
break
# Bootstrap n passive, n .
msg = "NODES "
node_types = ["passive"]
our_ip = self.transport.getPeer().host
node_no = 0
for node_type in node_types:
ip_addr_list = list(self.factory.nodes[node_type])
for i in range(0, n):
# There's no nodes left to bootstrap with.
ip_addr_list_len = len(ip_addr_list)
if not ip_addr_list_len:
break
# Choose a random node.
rand_index = random.randrange(0, ip_addr_list_len)
ip_addr = ip_addr_list[rand_index]
element = self.factory.nodes[node_type][ip_addr]
# Skip our own IP.
if our_ip == ip_addr or ip_addr == "127.0.0.1":
i -= 1
ip_addr_list.remove(ip_addr)
continue
# Not connected.
if node_type == "simultaneous" and\
not element["con"].connected:
i -= 1
ip_addr_list.remove(ip_addr)
continue
# Append new node.
msg += node_type[0] + ":" + ip_addr + ":"
msg += str(element["port"]) + " "
ip_addr_list.remove(ip_addr)
node_no += 1
# No nodes in response.
if not node_no:
msg = "NODES EMPTY"
# Send nodes list.
self.send_line(msg)
break
# Add node details to relevant sections.
if re.match("^(SIMULTANEOUS|PASSIVE) READY [0-9]+ [0-9]+$", line)\
is not None:
# Get type.
node_type, passive_port, max_inbound = re.findall("^(SIMULTANEOUS|PASSIVE) READY ([0-9]+) ([0-9]+)", line)[0]
node_type = node_type.lower()
valid_node_types = ["simultaneous", "passive"]
if node_type not in valid_node_types:
return
# Init / setup.
node_ip = self.transport.getPeer().host
self.factory.nodes[node_type][node_ip] = {
"max_inbound": max_inbound,
"no": 0,
"port": passive_port,
"time": time.time(),
"con": self,
"ip_list": []
}
# Passive doesn't have a candidates list.
if node_type == "simultaneous":
if node_ip not in self.factory.candidates:
self.factory.candidates[node_ip] = []
else:
self.cleanup_candidates(node_ip)
self.propogate_candidates(node_ip)
# Echo back mapped port.
if re.match("^SOURCE TCP", line) is not None:
self.send_remote_port()
# Client wishes to actively initate a simultaneous open.
if re.match("^CANDIDATE", line) is not None:
# CANDIDATE 192.168.0.1 TCP.
parts = re.findall("^CANDIDATE ([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+) (TCP|UDP) ((?:[0-9]+\s?)+)$", line)
while 1:
# Invalid response.
if not len(parts):
break
node_ip, proto, predictions = parts[0]
predictions = predictions.split(" ")
client_ip = self.transport.getPeer().host
# Invalid IP address.
if not self.is_valid_ipv4_address(node_ip):
print("Candidate invalid ip4" + str(node_ip))
break
if node_ip not in self.factory.nodes["simultaneous"]:
print("Candidate: node ip not in factory nodes sim.")
break
# Valid port.
valid_ports = 1
for port in predictions:
if not self.is_valid_port(port):
valid_ports = 0
if not valid_ports:
print("Candidate not valid port")
break
# Not connected.
if not self.factory.nodes["simultaneous"][node_ip]["con"].\
connected:
print("Candidate not connected.")
break
candidate = {
"ip_addr": client_ip,
"time": time.time(),
"predictions": predictions,
"proto": proto,
"con": self,
"propogated": 0
}
# Delete candidate if it already exists.
if node_ip in self.factory.candidates:
# Max candidates reached.
num_candidates = len(self.factory.candidates[node_ip])
if num_candidates >= self.max_candidates:
print("Candidate max candidates reached.")
break
for test_candidate in self.factory.candidates[node_ip]:
if test_candidate["ip_addr"] == client_ip:
self.factory.candidates[node_ip].remove(test_candidate)
print("Candidate removign test canadidate.")
break
self.factory.candidates[node_ip].append(candidate)
candidate_index = len(self.factory.candidates[node_ip]) - 1
# Update predictions.
self.factory.candidates[node_ip][candidate_index]["predictions"] = predictions
msg = "PREDICTION SET"
self.send_line(msg)
# Synchronize simultaneous node.
self.synchronize_simultaneous(node_ip)
break
# Node wishes to respond to a simultaneous open challenge from
# a client.
if re.match("^ACCEPT", line) is not None:
# ACCEPT 192.168.0.1 4552 345 TCP 1412137849.288068
p = "^ACCEPT ([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)"
p += " ((?:[0-9]+\s?)+) (TCP|UDP) ([0-9]+(?:[.][0-9]+)?)$"
parts = re.findall(p, line)
while 1:
# Invalid reply.
if not len(parts):
break
client_ip, predictions, proto, ntp = parts[0]
# Invalid IP address.
node_ip = self.transport.getPeer().host
if node_ip not in self.factory.candidates:
break
# Invalid predictions.
predictions = predictions.split(" ")
valid_ports = 1
for port in predictions:
if not self.is_valid_port(port):
valid_ports = 0
if not valid_ports:
break
# Invalid NTP.
ntp = ntp
t = time.time()
minute = 60 * 10
if int(float(ntp)) < t - minute or\
int(float(ntp)) > t + minute:
break
# Relay fight to client_ip.
# FIGHT 192.168.0.1 4552 345 34235 TCP 123123123.1
msg = "FIGHT %s %s %s %s" % (node_ip, " ".join(map(str, predictions)), proto, str(ntp))
for candidate in self.factory.candidates[node_ip]:
if candidate["ip_addr"] == client_ip:
candidate["con"].send_line(msg)
"""
Signal to propogate_candidates() not to relay this
candidate again. Note that this occurs after a
valid accept which thus counts as acknowledging
receiving the challenge.
"""
candidate["propogated"] = 1
break
break
# Remove node details.
if re.match("^CLEAR", line) is not None:
ip_addr = self.transport.getPeer().host
if ip_addr in self.factory.nodes["passive"]:
del self.factory.nodes["passive"][ip_addr]
if ip_addr in self.factory.nodes["simultaneous"]:
del self.factory.nodes["simultaneous"][ip_addr]
# Disconnect.
if re.match("^QUIT", line) is not None:
self.transport.loseConnection()
except Exception as e:
error = parse_exception(e)
log_exception(error_log_path, error)
print(self.log_entry("ERROR =", error))
class RendezvousFactory(Factory):
"""
PASSIVE
A node with the incoming port successfully reachable.
For clients, this is done by opening the port.
Outbound: Passive, simultaneous
Inbound: Active, passive, simultaneous
SIMULTANEOUS
This is a node with a predictable NAT. TCP hole
punching can be used to connect to these nodes.
Note that a simultaneous node can technically
also be a passive node and may be added to
both categories.
Outbound: Passive, simultaneous
Inbound: Active, passive, simultaneous
ACTIVE
(For explanation only, this struct isn't used.)
Active nodes are nodes which can only make outgoing
connections - they cannot receive any. These nodes are
problematic in a P2P network because a P2P network by
definition requires nodes capable of receiving connections,
or incoming connections to form the network. As a
network first starts, it's likely there will be a lot
more active nodes than anything else which will tax the
network by taking up those limited inbound connection slots.
Simultaneous nodes were created to try solve this problem
and greatly make bootstrapping more automagic and user-friendly.
Outbound: Passive, simultaneous
Inbound:
RELAY
Not yet used: Relay nodes will be nodes capable of
relaying connections on behalf of other nodes.
BOOTSTRAP
Not yet used: A list of other servers which can be
used for bootstrapping.
"""
def __init__(self):
self.last_cleanup = time.time()
self.candidates = {}
self.nodes = {
'passive': {},
'simultaneous': {},
'active': {},
'relay': {},
'bootstrap': {}
}
# Test data.
"""
test_ip = "192.168.0.10"
self.nodes["passive"][test_ip] = {
"ip_addr": test_ip,
"port": 194,
}
self.nodes["simultaneous"][test_ip] = {
"time": time.time(),
"ip_addr": test_ip,
"port": 0
}
self.candidates[test_ip] = []
"""
def buildProtocol(self, addr):
return RendezvousProtocol(self)
if __name__ == "__main__":
print("Starting rendezvous server.")
factory = RendezvousFactory()
reactor.listenTCP(8000, factory, interface="0.0.0.0")
reactor.run()
| Storj/pyp2p | pyp2p/rendezvous_server.py | Python | mit | 23,140 |
import sys
import time
import threading
import pygame
import simplejson as json
from utils import rabbitmq
from utils import drawable
class WorkerThread(threading.Thread):
def __init__(self, logger, status, conf, queue_rows):
threading.Thread.__init__(self)
self._error_count = 0
self._logger = logger
self._status = status
self._queue_rows = queue_rows
self.config = conf
self.running = True
def process_msg(self, body):
body = body.replace('\x00','')
self._logger.debug(body.replace('\n',' '))
obj = json.loads(body, encoding='utf-8')
return obj
def run(self):
connection = None
channel = None
ack_rate = int(self.config['tweaks.ack_rate'])
ack_delay = int(self.config['tweaks.ack_delay'])
while self.running == True:
if connection == None or channel == None:
try:
self._logger.debug('Create connection')
connection = rabbitmq.get_connection(self.config)
self._logger.debug('Create channel')
channel = rabbitmq.setup_channel(self.config, connection)
if self._status.isError():
self._status.nowNormal()
except:
self._logger.error('Error trying to connect to RabbitMQ')
self._logger.exception(sys.exc_info()[1])
self._status.error()
connection = None
channel = None
pygame.time.wait(1000)
continue
else:
try:
if self._status.isError():
self._logger.debug("Error mode active, disabling connection")
connection = None
channel = None
else:
total_msgs = 0
for method_frame, properties, body in channel.consume(self.config['amqp.queue']):
self._logger.debug("msg recv: %s" % body)
pygame.time.wait(ack_delay)
total_msgs += 1
if total_msgs > ack_rate:
break
data = self.process_msg(body)
queue_name = "psistats-%s" % data['hostname']
if self._queue_rows.has_queue_row(queue_name) == False:
qr = drawable.create_queue_row(data, self.config)
self._queue_rows.add_queue_row(queue_name, qr)
if self._queue_rows.has_queue_row(queue_name):
qr = self._queue_rows.get_queue_row(queue_name)
qr.get_drawable('cpu').process(float(data['cpu']) / float(100))
qr.get_drawable('mem').process(float(data['mem_used']) / float(data['mem_total']))
qr.update_title("%s - %s" % (data['hostname'], data['uptime']))
self._queue_rows.add_processed(queue_name)
channel.cancel()
pygame.time.wait(1000)
except:
self._logger.error('Error trying to fetch messages')
self._logger.exception(sys.exc_info()[1])
self._status.error()
finally:
self._logger.debug("Error count: %s" % self._status.error_count)
if self._status.isError():
connection = None
channel = None
pygame.time.wait(1000)
self._logger.warning("Worker thread exiting!")
| alex-dow/psistatsrd | psistatsrd/worker.py | Python | mit | 4,007 |
import multiprocessing
class SlotPool(object):
"""Execution slots pool
Helps tracking and limiting parrallel executions. Go ahead
and define a slots pool to limit the number of parrallel
executions in your program for example.
:param size: Size of the pool, aka how many parrallel
execution slots can be added.
:type size: int
"""
def __init__(self, size, *args, **kwargs):
self.size = size or multiprocessing.cpu_count()
self.free = self.size
self._semaphore = multiprocessing.Semaphore(self.size)
def acquire(self):
self._semaphore.acquire()
self.free -= 1
def release(self):
if (self.free + 1) > self.size:
raise ValueError("No more slots to release from the pool")
self._semaphore.release()
self.free += 1
def reset(self):
del self._semaphore
self._semaphore = multiprocessing.BoundedSemaphore(self.size)
self.free = self.size
| botify-labs/process-kit | pkit/slot/pool.py | Python | mit | 990 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Remove empty dirs in the provided trees"""
import KmdCmd
import KmdFiles
import logging
class KmdTreeEmptyDirs(KmdCmd.KmdCommand):
def extendParser(self):
"""extend parser"""
super(KmdTreeEmptyDirs, self).extendParser()
#Extend parser
self.parser.add_argument('folders', metavar='</path/to/tree>', nargs="+", help='the list of file trees to clean-up')
def run(self):
"""Run the clean-up"""
for folder in self.args.folders :
#FIXME : Test if folder exists ?
logging.info("Traverse %s", folder)
KmdFiles.removeEmptyFolders(folder, self.args.doit)
if __name__ == "__main__":
KmdTreeEmptyDirs(__doc__).run()
| pzia/keepmydatas | src/dir_rm_empty.py | Python | mit | 761 |
from django.contrib import admin
from djpubsubhubbub.models import Subscription
admin.site.register(Subscription)
| jpadilla/feedleap | vendor/djpubsubhubbub/djpubsubhubbub/admin.py | Python | mit | 116 |
from globals import *
import life as lfe
import missions
def tick(life):
if life['missions']:
missions.do_mission(life, life['missions'].keys()[0])
| flags/Reactor-3 | alife/alife_work.py | Python | mit | 155 |
#!/usr/bin/env python
from Bio import AlignIO
from Bio.Alphabet import IUPAC,Gapped
import sys
#This script takes a FASTA alignment and converts is to a
#nexus alignment
# check for correct arguments
if len(sys.argv) != 3:
print("Usage: FastaToNexus.py <inputfile> <outputfile>")
sys.exit(0)
input_name = sys.argv[1]
output_name = sys.argv[2]
input_file = open(input_name, 'r')
output_file = open(output_name, 'w')
alignment = AlignIO.read(input_file, 'fasta', alphabet=Gapped(IUPAC.ambiguous_dna, '-'))
AlignIO.write(alignment, output_file, 'nexus')
input_file.close()
output_file.close()
| tatumdmortimer/formatConverters | FastaToNexus.py | Python | mit | 605 |
class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print(line)
happy_bday = Song(["Happy birthday to you",
"I don't want to get sued",
"So I'll stop right there"])
bulls_on_parade = Song(["They rally around tha family",
"With pockets full of shells"])
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
| zedshaw/learn-python3-thw-code | ex40.py | Python | mit | 488 |
#!/usr/bin/env python
# table auto-generator for zling.
# author: Zhang Li <[email protected]>
kBucketItemSize = 4096
matchidx_blen = [0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7] + [8] * 1024
matchidx_code = []
matchidx_bits = []
matchidx_base = []
while len(matchidx_code) < kBucketItemSize:
for bits in range(2 ** matchidx_blen[len(matchidx_base)]):
matchidx_code.append(len(matchidx_base))
matchidx_base.append(len(matchidx_code) - 2 ** matchidx_blen[len(matchidx_base)])
f_blen = open("ztable_matchidx_blen.inc", "w")
f_base = open("ztable_matchidx_base.inc", "w")
f_code = open("ztable_matchidx_code.inc", "w")
for i in range(0, matchidx_base.__len__()):
f_blen.write("%4u," % matchidx_blen[i] + "\n\x20" [int(i % 16 != 15)])
f_base.write("%4u," % matchidx_base[i] + "\n\x20" [int(i % 16 != 15)])
for i in range(0, matchidx_code.__len__()):
f_code.write("%4u," % matchidx_code[i] + "\n\x20" [int(i % 16 != 15)])
| eevans/squash-deb | plugins/zling/libzling/src/ztable.py | Python | mit | 964 |
from Auth import TokenGenerator
import requests
import json
class ListRetriever:
'Retrieve a list of our follower buddies'
URL = 'https://api.twitter.com/1.1/lists/list.json'
USER_ID = '894195594360737792'
key = ''
secret = ''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def getLists(self):
headers = {'Authorization' : self.getToken()}
requestData = {'user_id': self.USER_ID}
jsonResponse = requests.get(self.URL, params=requestData, headers=headers)
lists = json.loads(jsonResponse.text)
return lists
def getListByName(self, name):
lists = self.getLists()
for listElem in lists:
if name == listElem['name']:
return listElem
return False
def getToken(self):
tokenGenerator = TokenGenerator(self.key, self.secret)
return 'Bearer ' + tokenGenerator.generate()
| csdinos/twitterBot | Api/ListRetriever.py | Python | mit | 953 |
"""Wrapper for utilizing tronn on datasets
"""
import os
import h5py
import glob
import logging
import numpy as np
import pandas as pd
from ggr.analyses.baselines import compare_sig_motifs
from ggr.analyses.baselines import compare_tf_motif_corrs
from ggr.analyses.bioinformatics import run_gprofiler
from ggr.analyses.linking import regions_to_genes
from ggr.analyses.nn import get_motif_region_set
from ggr.analyses.nn import get_motifs_region_set
from ggr.analyses.nn import get_sig_motif_list
from ggr.analyses.nn import compare_nn_gene_set_to_diff_expr
from ggr.analyses.nn import evaluate_enhancer_prediction
from ggr.analyses.nn import run_low_affinity_workflow
from ggr.analyses.nn import summarize_functional_enrichments
from ggr.analyses.nn import get_summit_centric_motif_maps
from ggr.analyses.nn import compare_aligned_motif_orientations
from ggr.analyses.nn import filter_summit_centric_maps
from ggr.analyses.nn import compare_homotypic_region_sets_to_chipseq
from ggr.analyses.nn import clean_and_plot_enrichments
from ggr.analyses.tronn_scripts import tronn_calldiffmotifs_cmd
from ggr.analyses.tronn_scripts import tronn_intersect_with_expression_cmd
from ggr.analyses.tronn_scripts import tronn_intersect_with_expression_for_baseline_cmd
from ggr.util.utils import run_shell_cmd
def _run_motif_functional_enrichment_workflow(
args, motif_name, bed_file, out_dir, filter_by_score=0.01):
"""motif to region set and functional enrichments
"""
# linking file
links_dir = "{}/linking/proximity".format(args.outputs["results"]["dir"])
#links_dir = "{}/linking/hichip".format(args.outputs["results"]["dir"])
interactions_file = "{}/ggr.linking.ALL.overlap.interactions.txt.gz".format(links_dir)
# link to nearby genes
# TODO how are we up/downweighting things?
genes_file = "{}/{}.genes.all.txt.gz".format(out_dir, os.path.basename(bed_file).split(".bed")[0])
if not os.path.isfile(genes_file):
regions_to_genes(
bed_file,
interactions_file,
#args.outputs["annotations"]["tss.pc.bed"],
args.outputs["results"]["rna"]["expression_filtering"]["tss.pc.expressed"],
#args.outputs["data"]["tss.dynamic"],
genes_file,
filter_by_score=filter_by_score)
#filter_by_score=0.1) # proximity corr thresh
# just look at top 1000?
top_genes_file = "{}.max_1k.txt.gz".format(genes_file.split(".txt")[0])
if not os.path.isfile(top_genes_file):
genes_data = pd.read_csv(genes_file, sep="\t")
if genes_data.shape[0] > 1000:
genes_data = genes_data.iloc[:1000]
genes_data.to_csv(top_genes_file, sep="\t", compression="gzip", index=False, header=True)
genes_file = top_genes_file
# then run gene enrichment
gprofiler_output = "{}.go_gprofiler.txt".format(genes_file.split(".txt")[0])
if not os.path.isfile(gprofiler_output):
run_gprofiler(
genes_file,
args.outputs["data"]["rna.counts.pc.expressed.mat"],
out_dir, ordered=True, header=True)
return
def _run_motifs_functional_enrichment_workflow(args, pvals_file, nn_files, out_dir):
"""get a mapping of motifs to regions and functions
"""
# first get motifs list
motif_names = get_sig_motif_list(pvals_file, nn_files[0])
# then get results per motif
for motif_name in motif_names:
bed_file = "{}/{}.bed.gz".format(out_dir, motif_name)
if not os.path.isfile(bed_file):
get_motif_region_set(nn_files, motif_name, bed_file)
_run_motif_functional_enrichment_workflow(args, motif_name, bed_file, out_dir)
# summarize results
summary_dir = "{}/summary".format(out_dir)
if not os.path.isdir(summary_dir):
os.system("mkdir -p {}".format(summary_dir))
summarize_functional_enrichments(out_dir, summary_dir)
return
def _run_atac_summit_centric_workflow(args, pvals_file, nn_files, work_dir):
"""look at motif positions around atac summit
"""
# set up
enrichments_dir = "{}/enrichments.hits.homotypic_filt.genes_all".format(work_dir)
if not os.path.isdir(enrichments_dir):
os.system("mkdir -p {}".format(enrichments_dir))
matrices_dir = "{}/data".format(work_dir)
if not os.path.isdir(matrices_dir):
os.system("mkdir -p {}".format(matrices_dir))
# first get motifs list
motif_names = get_sig_motif_list(pvals_file, nn_files[0])
# then get results per motif
for motif_name in motif_names:
# debug
if False:
run_motifs = ["GRHL", "TP53", "CEBPA", "CEBPD", "TFAP2A"]
run_motifs = ["FOX", "CEBP", "TP53", "KLF", "TFAP2", "MAF", "OVOL"]
run_motifs = ["KLF"]
dont_run = True
for motif_str in run_motifs:
if motif_str in motif_name:
dont_run = False
if dont_run:
continue
# run FWD motifs
fwd_regions_file = "{}/{}.FWD.nn_filt.bed.gz".format(matrices_dir, motif_name)
if not os.path.isfile(fwd_regions_file):
get_summit_centric_motif_maps(
motif_name,
nn_files,
args.outputs["data"]["atac.master.bed"],
args.outputs["data"]["atac.master.summits.bed"],
matrices_dir)
# plotting - use these for final figs
aligned_file = "{}/{}.FWD.aligned.hits.mat.txt.gz".format(matrices_dir, motif_name)
plot_prefix = "{}/{}.FWD.aligned.hits".format(work_dir, motif_name)
if not os.path.isfile("{}.sum.pdf".format(plot_prefix)):
r_cmd = "~/git/ggr-project/R/plot.summit_aligned_motif_maps.R {} {} {}".format(
aligned_file, plot_prefix, motif_name)
print r_cmd
os.system(r_cmd)
aligned_file = "{}/{}.FWD.aligned.nn.mat.txt.gz".format(matrices_dir, motif_name)
plot_prefix = "{}/{}.FWD.aligned.nn".format(work_dir, motif_name)
if not os.path.isfile("{}.sum.pdf".format(plot_prefix)):
r_cmd = "~/git/ggr-project/R/plot.summit_aligned_motif_maps.R {} {} {}".format(
aligned_file, plot_prefix, motif_name)
print r_cmd
os.system(r_cmd)
data_file = "{}/{}.FWD.affinities.hits.mat.txt.gz".format(matrices_dir, motif_name)
plot_file = "{}/{}.FWD.num_v_affinity.hits.pdf".format(work_dir, motif_name)
if not os.path.isfile(plot_file):
r_cmd = "~/git/ggr-project/R/plot.num_motifs_vs_affinity.R {} {} {}".format(
data_file, plot_file, motif_name)
print r_cmd
os.system(r_cmd)
# run REV motifs - do not use, no asymmetry
rev_regions_file = "{}/{}.REV.summit_center_filt.bed.gz".format(matrices_dir, motif_name)
#if not os.path.isfile(rev_regions_file):
if False:
get_summit_centric_motif_maps(
motif_name,
nn_files,
args.outputs["data"]["atac.master.bed"],
args.outputs["data"]["atac.master.summits.bed"],
matrices_dir,
reverse_pwm=True)
# do not use: if there were positional dependence based on orientation, should have seen it
# when just looking at FWD version
fwd_aligned_file = "{}/{}.FWD.aligned.mat.txt.gz".format(work_dir, motif_name) # note this is NN filt
rev_aligned_file = "{}/{}.REV.aligned.mat.txt.gz".format(work_dir, motif_name) # note this is NN filt
compare_aligned_motif_orientations(fwd_aligned_file, rev_aligned_file, work_dir, motif_name)
# filter motif hits based on params: distance from summit, average affinity
aligned_file = "{}/{}.FWD.aligned.hits.mat.txt.gz".format(matrices_dir, motif_name)
homotypic_bed_file = "{}/{}.FWD.aligned.hits.homotypic_filt.bed.gz".format(enrichments_dir, motif_name)
#aligned_file = "{}/{}.FWD.aligned.nn.mat.txt.gz".format(matrices_dir, motif_name)
#homotypic_bed_file = "{}/{}.FWD.aligned.nn.homotypic_filt.bed.gz".format(enrichments_dir, motif_name)
if not os.path.isfile(homotypic_bed_file):
filter_summit_centric_maps(aligned_file, homotypic_bed_file, max_dist=50)
# and then filter again against the NN filt bed file to get
# regions with motifs of thresholded distance/affinity with NN importance
homotypic_nn_filt_bed_file = "{}.nn_filt.bed.gz".format(homotypic_bed_file.split(".bed")[0])
if not os.path.isfile(homotypic_nn_filt_bed_file):
bedtools_cmd = "bedtools intersect -u -a {} -b {} | gzip -c > {}".format(
homotypic_bed_file, fwd_regions_file, homotypic_nn_filt_bed_file)
print bedtools_cmd
os.system(bedtools_cmd)
# how to compare back to ChIP-seq data?
# q: how well does filtering for homotypic clusters improve prediction of ChIP-seq peaks?
# i guess compare using the data in the nn files? compare motif hits to nn filt to homotypic rules
# or do they show greater binding (agg plot)
if False:
compare_homotypic_region_sets_to_chipseq(
work_dir, motif_name, nn_files)
bed_file = homotypic_bed_file
# run functional enrichments
_run_motif_functional_enrichment_workflow(args, motif_name, bed_file, enrichments_dir)
# then summarize functional enrichment results
summary_dir = "{}/summary.hits.homotypic_filt.genes_all".format(work_dir)
if not os.path.isdir(summary_dir):
os.system("mkdir -p {}".format(summary_dir))
summarize_functional_enrichments(enrichments_dir, summary_dir)
return
def runall(args, prefix):
"""all workflows for deep learning (TRONN)
"""
# set up logging, files, folders
logger = logging.getLogger(__name__)
logger.info("WORKFLOW: run atac analyses")
# set up data and results dir
data_dir = args.outputs["data"]["dir"]
run_shell_cmd("mkdir -p {}".format(data_dir))
out_data = args.outputs["data"]
results_dirname = "learning"
results_dir = "{}/{}".format(args.outputs["results"]["dir"], results_dirname)
args.outputs["results"][results_dirname] = {"dir": results_dir}
run_shell_cmd("mkdir -p {}".format(results_dir))
out_results = args.outputs["results"][results_dirname]
# -------------------------------------------
# ANALYSIS 0 - preprocess data to TRONN data formats
# input: peak files
# output: hdf5 files
# -------------------------------------------
dataset_dir = "{}/datasets/{}".format(results_dir, prefix)
if not os.path.isdir(dataset_dir):
datasets = []
label_strings = []
# TODO need full paths
# atac labels
atac_labels = sorted(
glob.glob("{}/{}".format(
args.inputs["atac"][args.cluster]["data_dir"],
args.inputs["atac"][args.cluster]["idr_peak_glob"])))
atac_labels = [os.path.abspath(filename) for filename in atac_labels]
print len(atac_labels)
label_strings.append("{}_LABELS={}".format(
"ATAC", ",".join(atac_labels)))
# trajectory labels
traj_labels = sorted(
glob.glob("./results/atac/timeseries/dp_gp/reproducible/hard/reordered/bed/*bed.gz"))
traj_labels = [os.path.abspath(filename) for filename in traj_labels]
print len(traj_labels)
label_strings.append("{}_LABELS={}".format(
"TRAJ", ",".join(traj_labels)))
# histone labels
histones = args.inputs["chipseq"][args.cluster]["histones"]["ordered_names"]
for histone in histones:
histone_labels = sorted(
glob.glob("{}/{}".format(
args.inputs["chipseq"][args.cluster]["data_dir"],
args.inputs["chipseq"][args.cluster]["histones"][histone]["overlap_glob"])))
histone_labels = [os.path.abspath(filename) for filename in histone_labels]
print len(histone_labels)
label_strings.append("{}_LABELS={}::method=histone_overlap".format(
histone, ",".join(histone_labels)))
# TF labels
tfs = args.inputs["chipseq"][args.cluster]["tfs"]["ordered_names"]
for tf in tfs:
tf_labels = sorted(
glob.glob("{}/{}".format(
args.inputs["chipseq"][args.cluster]["data_dir"],
args.inputs["chipseq"][args.cluster]["tfs"][tf]["idr_peak_glob"])))
tf_labels = [os.path.abspath(filename) for filename in tf_labels]
print len(tf_labels)
label_strings.append("{}_LABELS={}".format(
tf, ",".join(tf_labels)))
# add other tf labels (external to GGR)
tfs = args.inputs["chipseq"][args.cluster]["tfs"]["other"]["ordered_names"]
for tf in tfs:
tf_labels = sorted(
glob.glob("{}/{}".format(
args.inputs["chipseq"][args.cluster]["tfs"]["other"]["data_dir"],
args.inputs["chipseq"][args.cluster]["tfs"]["other"][tf]["narrowPeak_glob"])))
tf_labels = [os.path.abspath(filename) for filename in tf_labels]
print len(tf_labels)
label_strings.append("{}_LABELS={}".format(
tf, ",".join(tf_labels)))
# chrom labels
chrom_labels = sorted(
glob.glob("./results/epigenome/dynamic/clusters/by_mark/bed/*bed.gz"))
chrom_labels = [os.path.abspath(filename) for filename in chrom_labels]
print len(chrom_labels)
label_strings.append("{}_LABELS={}".format(
"DYNAMIC_MARK", ",".join(chrom_labels)))
chrom_labels = sorted(
glob.glob("./results/epigenome/dynamic/clusters/by_state/bed/*bed.gz"))
chrom_labels = [os.path.abspath(filename) for filename in chrom_labels]
print len(chrom_labels)
label_strings.append("{}_LABELS={}".format(
"DYNAMIC_STATE", ",".join(chrom_labels)))
chrom_labels = sorted(
glob.glob("./results/epigenome/stable/clusters/by_mark/bed/*bed.gz"))
chrom_labels = [os.path.abspath(filename) for filename in chrom_labels]
print len(chrom_labels)
label_strings.append("{}_LABELS={}".format(
"STABLE_MARK", ",".join(chrom_labels)))
chrom_labels = sorted(
glob.glob("./results/epigenome/stable/clusters/by_state/bed/*bed.gz"))
chrom_labels = [os.path.abspath(filename) for filename in chrom_labels]
print len(chrom_labels)
label_strings.append("{}_LABELS={}".format(
"STABLE_STATE", ",".join(chrom_labels)))
signal_strings = []
# atac signals
atac_signals = sorted(
glob.glob("{}/{}".format(
args.inputs["atac"][args.cluster]["data_dir"],
args.inputs["atac"][args.cluster]["bigwig_pooled_glob"])))
atac_signals = [os.path.abspath(filename) for filename in atac_signals]
print len(atac_signals)
signal_strings.append("{}_SIGNALS={}".format(
"ATAC", ",".join(atac_signals)))
# histone signals
histones = args.inputs["chipseq"][args.cluster]["histones"]["ordered_names"]
for histone in histones:
histone_signals = sorted(
glob.glob("{}/{}".format(
args.inputs["chipseq"][args.cluster]["data_dir"],
args.inputs["chipseq"][args.cluster]["histones"][histone]["pooled_bigwig_glob"])))
histone_signals = [os.path.abspath(filename) for filename in histone_signals]
signal_strings.append("{}_SIGNALS={}::window=1000".format(
histone, ",".join(histone_signals)))
# tf signals
tfs = args.inputs["chipseq"][args.cluster]["tfs"]["ordered_names"]
for tf in tfs:
tf_signals = sorted(
glob.glob("{}/{}".format(
args.inputs["chipseq"][args.cluster]["data_dir"],
args.inputs["chipseq"][args.cluster]["tfs"][tf]["pooled_bigwig_glob"])))
tf_signals = [os.path.abspath(filename) for filename in tf_signals]
signal_strings.append("{}_SIGNALS={}".format(
tf, ",".join(tf_signals)))
# run tronn preprocess
preprocess = (
"tronn preprocess \\\n"
"--annotations {0} \\\n"
"--labels \\\n \t{1} \\\n"
"--signals \\\n \t{2} \\\n"
"-o ./datasets/{3} \\\n"
"--prefix {3} \\\n"
"--master_label_keys ATAC_LABELS \\\n"
"--genomewide \\\n"
"--parallel {4}").format(
args.inputs["annot"][args.cluster]["tronn_preprocess_annot"],
" \\\n\t".join(label_strings),
" \\\n\t".join(signal_strings),
prefix,
args.threads)
run_shell_cmd("echo '{}' > {}/preprocess.tmp".format(preprocess, results_dir))
#run_shell_cmd("source preprocess.tmp")
# run tronn preprocess for just dynamic regions - faster interpretation runs
preprocess = (
"tronn preprocess \\\n"
"--annotations {0} \\\n"
"--labels \\\n \t{1} \\\n"
"--signals \\\n \t{2} \\\n"
"-o ./datasets/{3}.traj \\\n"
"--prefix {3}.traj \\\n"
"--master_label_keys TRAJ_LABELS \\\n"
"--parallel {4}").format(
args.inputs["annot"][args.cluster]["tronn_preprocess_annot"],
" \\\n\t".join(label_strings),
" \\\n\t".join(signal_strings),
prefix,
args.threads)
run_shell_cmd("echo '{}' > {}/preprocess_traj.tmp".format(preprocess, results_dir))
#run_shell_cmd("source preprocess_traj.tmp")
# Also generate a TSS oriented dataset to run through scanmotifs
# note - since TSS are point files, need to extend flanks to properly label
l_len = 200 # should give 5 examples per TSS
r_len = 100
tss_prep_dir = "{}/tss_prep".format(results_dir)
if not os.path.isdir(tss_prep_dir):
os.system("mkdir -p {}".format(tss_prep_dir))
tss_prep_dir = os.path.abspath(tss_prep_dir)
tss_all = os.path.abspath(args.outputs["annotations"]["tss.pc.bed"])
tss_all_extend = "{}/tss.all.extend.bed.gz".format(tss_prep_dir)
if not os.path.isfile(tss_all_extend):
extend_cmd = "bedtools slop -s -i {} -g {} -l {} -r {} | gzip -c > {}".format(
tss_all, args.inputs["annot"][args.cluster]["chromsizes"], l_len, r_len, tss_all_extend)
print extend_cmd
os.system(extend_cmd)
tss_dynamic = os.path.abspath(args.outputs["data"]["tss.dynamic"])
tss_dynamic_extend = "{}/tss.dynamic.extend.bed.gz".format(tss_prep_dir)
if not os.path.isfile(tss_dynamic_extend):
extend_cmd = "bedtools slop -s -i {} -g {} -l {} -r {} | gzip -c > {}".format(
tss_dynamic, args.inputs["annot"][args.cluster]["chromsizes"], l_len, r_len, tss_dynamic_extend)
print extend_cmd
os.system(extend_cmd)
tss_stable = os.path.abspath(args.outputs["data"]["tss.stable"])
tss_stable_extend = "{}/tss.stable.extend.bed.gz".format(tss_prep_dir)
if not os.path.isfile(tss_stable_extend):
extend_cmd = "bedtools slop -s -i {} -g {} -l {} -r {} | gzip -c > {}".format(
tss_stable, args.inputs["annot"][args.cluster]["chromsizes"], l_len, r_len, tss_stable_extend)
print extend_cmd
os.system(extend_cmd)
tss_nonexpr = os.path.abspath(args.outputs["data"]["tss.non_expr"])
tss_nonexpr_extend = "{}/tss.nonexpr.extend.bed.gz".format(tss_prep_dir)
if not os.path.isfile(tss_nonexpr_extend):
extend_cmd = "bedtools slop -s -i {} -g {} -l {} -r {} | gzip -c > {}".format(
tss_nonexpr, args.inputs["annot"][args.cluster]["chromsizes"], l_len, r_len, tss_nonexpr_extend)
print extend_cmd
os.system(extend_cmd)
tss_labels = [tss_all_extend, tss_dynamic_extend, tss_stable_extend, tss_nonexpr_extend]
label_strings.append("TSS_LABELS={}".format(",".join(tss_labels)))
# run tronn preprocess
preprocess = (
"tronn preprocess \\\n"
"--annotations {0} \\\n"
"--labels \\\n \t{1} \\\n"
"--signals \\\n \t{2} \\\n"
"-o ./datasets/{3}.tss \\\n"
"--prefix {3}.tss \\\n"
"--master_label_keys TSS_LABELS \\\n"
"--parallel {4}").format(
args.inputs["annot"][args.cluster]["tronn_preprocess_annot"],
" \\\n\t".join(label_strings),
" \\\n\t".join(signal_strings),
prefix,
args.threads)
run_shell_cmd("echo '{}' > {}/preprocess_tss.tmp".format(preprocess, results_dir))
#run_shell_cmd("source preprocess_tss.tmp")
# NOTE: in a linear run, the tronn processes (train, eval, interpret, etc) would all
# be here in the code
# results dir
NN_DIR = "/mnt/lab_data3/dskim89/ggr/nn/2019-03-12.freeze"
# -------------------------------------------
# ANALYSIS - motifs to TF occupancy - low affinity sites
# -------------------------------------------
affinity_dir = "{}/low_affinity".format(results_dir)
if not os.path.isdir(affinity_dir):
os.system("mkdir -p {}".format(affinity_dir))
prefix = "nn.low_affinity_hits"
scanmotifs_dir = "/mnt/lab_data3/dskim89/ggr/nn/2020-01-13/scanmotifs/motifs.background.lite"
#scanmotifs_dir = "/mnt/lab_data3/dskim89/ggr/nn/2019-03-12.freeze/motifs.input_x_grad.lite"
scanmotifs_file = "{}/ggr.scanmotifs.h5".format(scanmotifs_dir)
tfs = [
#("TP53", 6, "TP63_LABELS", 1),
("TP53", 0, "TP63_LABELS", 0),
("CTCF", 6, "CTCF_LABELS", 1),
("KLF12", 6, "KLF4_LABELS", 1),
("TFAP2A", 6, "ZNF750_LABELS", 0)
]
for motif_name, timepoint_idx, chipseq_key, chipseq_idx in tfs:
plot_file = "{}/{}.{}.chipseq_pr_curves.pdf".format(
affinity_dir, prefix, motif_name)
if not os.path.isfile(plot_file):
run_low_affinity_workflow(
scanmotifs_file,
motif_name,
timepoint_idx,
chipseq_key,
chipseq_idx,
affinity_dir,
"{}.{}".format(prefix, motif_name))
# -------------------------------------------
# ANALYSIS - baselines: compare results to non-NN framework
# -------------------------------------------
baselines_dir = "{}/baselines".format(results_dir)
if not os.path.isdir(baselines_dir):
os.system("mkdir -p {}".format(baselines_dir))
prefix = "nn_vs_homerATAC"
# NN results - pvals file
nn_sig_motifs_dir = "{}/motifs.sig".format(NN_DIR)
nn_pvals_file = "{}/motifs.adjust.diff/pvals.h5".format(
nn_sig_motifs_dir)
# for first venn diagram, compare full homer results (not expr filt) with NN results
plot_file = "{}/{}.homer_unfilt.compare_sig.plot.pdf".format(baselines_dir, prefix)
if not os.path.isfile(plot_file):
compare_sig_motifs(
nn_pvals_file,
args.outputs["results"]["atac"]["homer.sig_motifs.pvals"],
["NN", "Homer+ATAC"],
baselines_dir, "{}.homer_unfilt".format(prefix))
# switch to expression filt pvals file
nn_pvals_file = "{}/motifs.adjust.diff.rna_filt.dmim/pvals.rna_filt.corr_filt.h5".format(
nn_sig_motifs_dir)
# 1) for venn diagram overlap, use the intersect file that matches correlation
out_dir = "{}/corr_75".format(baselines_dir)
if not os.path.isdir(out_dir):
os.system("mkdir -p {}".format(out_dir))
homer_pvals_file = "{}/motifs.rna_filt/pvals.rna_filt.corr_filt.h5".format(
out_dir)
if not os.path.isfile(homer_pvals_file):
tronn_intersect_with_expression_for_baseline_cmd(
args.outputs["results"]["atac"]["homer.sig_motifs.pvals"],
args.outputs["annotations"]["pwms.renamed.nonredundant"],
args.outputs["annotations"]["pwms.metadata.nonredundant.expressed"],
"{}/matrices/ggr.rna.counts.pc.expressed.timeseries_adj.pooled.rlog.mat.txt.gz".format(
args.outputs["results"]["rna"]["timeseries"]["dir"]),
out_dir)
# plot
plot_file = "{}/{}.compare_sig.plot.pdf".format(baselines_dir, prefix)
if not os.path.isfile(plot_file):
compare_sig_motifs(
nn_pvals_file,
homer_pvals_file,
["NN", "Homer+ATAC"],
baselines_dir, prefix)
# 2) for the correlation plot, use the intersect file that shows as many correlation values for homer+ATAC
# TODO also for nn
out_dir = "{}/corr_00.nn".format(baselines_dir)
if not os.path.isdir(out_dir):
os.system("mkdir -p {}".format(out_dir))
nn_pvals_file = "{}/motifs.rna_filt/pvals.rna_filt.corr_filt.h5".format(
out_dir)
if not os.path.isfile(nn_pvals_file):
tronn_intersect_with_expression_for_baseline_cmd(
"{}/motifs.adjust.diff/pvals.h5".format(nn_sig_motifs_dir),
args.outputs["annotations"]["pwms.renamed.nonredundant"],
args.outputs["annotations"]["pwms.metadata.nonredundant.expressed"],
"{}/matrices/ggr.rna.counts.pc.expressed.timeseries_adj.pooled.rlog.mat.txt.gz".format(
args.outputs["results"]["rna"]["timeseries"]["dir"]),
out_dir,
min_cor=0)
out_dir = "{}/corr_00".format(baselines_dir)
if not os.path.isdir(out_dir):
os.system("mkdir -p {}".format(out_dir))
homer_pvals_file = "{}/motifs.rna_filt/pvals.rna_filt.corr_filt.h5".format(
out_dir)
if not os.path.isfile(homer_pvals_file):
tronn_intersect_with_expression_for_baseline_cmd(
args.outputs["results"]["atac"]["homer.sig_motifs.pvals"],
args.outputs["annotations"]["pwms.renamed.nonredundant"],
args.outputs["annotations"]["pwms.metadata.nonredundant.expressed"],
"{}/matrices/ggr.rna.counts.pc.expressed.timeseries_adj.pooled.rlog.mat.txt.gz".format(
args.outputs["results"]["rna"]["timeseries"]["dir"]),
out_dir,
min_cor=0)
plot_file = "{}/{}.compare_corr.plot.pdf".format(baselines_dir, prefix)
if not os.path.isfile(plot_file):
compare_tf_motif_corrs(
nn_pvals_file,
homer_pvals_file,
["NN", "Homer+ATAC"],
baselines_dir, prefix)
# -------------------------------------------
# ANALYSIS - predicting enhancers as defined by chromatin state
# -------------------------------------------
enhancer_dir = "{}/enh_state".format(results_dir)
if not os.path.isdir(enhancer_dir):
os.system("mkdir -p {}".format(enhancer_dir))
evals_dir = "/mnt/lab_data/kundaje/users/dskim89/ggr/nn/evals.2018-12-03"
model_prefix = "ggr.basset.clf.pretrained.folds.testfold-"
eval_files = sorted(glob.glob(
"{}/{}*/ggr.eval.h5".format(evals_dir, model_prefix)))
plot_prefix = "{}/enh_state".format(enhancer_dir)
if not os.path.isfile("{}.pdf".format(plot_prefix)):
evaluate_enhancer_prediction(eval_files, plot_prefix)
# -------------------------------------------
# ANALYSIS - homotypic grammars
# -------------------------------------------
work_dir = "{}/homotypic".format(results_dir)
if not os.path.isdir(work_dir):
os.system("mkdir -p {}".format(work_dir))
# sig motifs
nn_pvals_file = (
"{}/motifs.sig/motifs.adjust.diff.rna_filt.dmim/pvals.rna_filt.corr_filt.h5").format(
NN_DIR)
# scanmotif files
NN_DIR = "/mnt/lab_data3/dskim89/ggr/nn/2019-03-12.freeze"
nn_motif_files = [
"{}/motifs.input_x_grad.early/ggr.scanmotifs.h5".format(NN_DIR),
"{}/motifs.input_x_grad.mid/ggr.scanmotifs.h5".format(NN_DIR),
"{}/motifs.input_x_grad.late/ggr.scanmotifs.h5".format(NN_DIR)]
if False: # debug
NN_DIR = "/mnt/lab_data3/dskim89/ggr/nn/2020-01-13/scanmotifs"
nn_motif_files = [
"{}/motifs.dynamic.early/ggr.scanmotifs.h5".format(NN_DIR),
"{}/motifs.dynamic.mid/ggr.scanmotifs.h5".format(NN_DIR),
"{}/motifs.dynamic.late/ggr.scanmotifs.h5".format(NN_DIR)
]
if False:
# run a functional enrichment mapping just based on presence of NN-active motifs
_run_motifs_functional_enrichment_workflow(args, nn_pvals_file, nn_motif_files, work_dir)
# do ATAC summit-centric view on motifs
# also functional enrichment mapping here too
if False:
work_dir = "{}/homotypic.atac_summits.FIGS".format(results_dir)
if not os.path.isdir(work_dir):
os.system("mkdir -p {}".format(work_dir))
_run_atac_summit_centric_workflow(args, nn_pvals_file, nn_motif_files, work_dir)
# -------------------------------------------
# ANALYSIS - compare homotypic v heterotypic
# -------------------------------------------
# numbers comparison of homotypic vs heterotypic
work_dir = "{}/homotypic_v_heterotypic".format(results_dir)
if not os.path.isdir(work_dir):
os.system("mkdir -p {}".format(work_dir))
# linking file
links_dir = "{}/linking/proximity".format(args.outputs["results"]["dir"])
interactions_file = "{}/ggr.linking.ALL.overlap.interactions.txt.gz".format(links_dir)
# heterotypic
heterotypic_bed_dir = "/mnt/lab_data3/dskim89/ggr/review/pre-review"
heterotypic_bed_all = "{}/heterotypic.all_regions.bed.gz".format(work_dir)
if not os.path.isfile(heterotypic_bed_all):
copy_cmd = "cp {}/rules_all_regions.bed.gz {}".format(heterotypic_bed_dir, heterotypic_bed_all)
print copy_cmd
os.system(copy_cmd)
heterotypic_genes_file = "{}/{}.genes.all.txt.gz".format(
work_dir, os.path.basename(heterotypic_bed_all).split(".bed")[0])
if not os.path.isfile(heterotypic_genes_file):
regions_to_genes(
heterotypic_bed_all,
interactions_file,
#args.outputs["annotations"]["tss.pc.bed"],
#args.outputs["results"]["rna"]["expression_filtering"]["tss.pc.expressed"],
args.outputs["data"]["tss.dynamic"],
genes_file,
filter_by_score=0.5)
homotypic_bed_dir = "/mnt/lab_data/kundaje/users/dskim89/ggr/integrative/v1.0.0a/results/learning/homotypic/enrichments.hits.homotypic_filt.genes_all"
homotypic_bed_all = "{}/homotypic.all_regions.bed.gz".format(work_dir)
if not os.path.isfile(homotypic_bed_all):
bedtools_cmd = "zcat {}/*.homotypic_filt.bed.gz | sort -k1,1 -k2,2n | bedtools merge -i stdin | gzip -c > {}".format(
homotypic_bed_dir, homotypic_bed_all)
print bedtools_cmd
os.system(bedtools_cmd)
homotypic_genes_file = "{}/{}.genes.all.txt.gz".format(
work_dir, os.path.basename(homotypic_bed_all).split(".bed")[0])
if not os.path.isfile(homotypic_genes_file):
regions_to_genes(
homotypic_bed_all,
interactions_file,
#args.outputs["annotations"]["tss.pc.bed"],
#args.outputs["results"]["rna"]["expression_filtering"]["tss.pc.expressed"],
args.outputs["data"]["tss.dynamic"],
genes_file,
filter_by_score=0.5)
total_genes_file = "{}/genes_all.txt.gz".format(work_dir)
if not os.path.isfile(total_genes_file):
heterotypic = pd.read_csv(heterotypic_genes_file, sep="\t")
homotypic = pd.read_csv(homotypic_genes_file, sep="\t")
total_genes = heterotypic.merge(homotypic, how="outer", on="gene_id")
total_genes.to_csv(total_genes_file, sep="\t", compression="gzip")
# -------------------------------------------
# ANALYSIS - look at repression
# -------------------------------------------
work_dir = "{}/repr".format(results_dir)
os.system("mkdir -p {}".format(work_dir))
NN_DIR = "/mnt/lab_data3/dskim89/ggr/nn/2021-05-22.repr_analyses"
BACKGROUND_DIR = "/mnt/lab_data3/dskim89/ggr/nn/2020-01-13/scanmotifs/motifs.background.lite"
background_file = "{}/ggr.scanmotifs.h5".format(BACKGROUND_DIR)
background_files = [background_file]
# motifs in early dynamic - for negative scores, any sig?
# correlated - these repressor motifs INCREASE across time
out_dir = "{}/motifs.dynamic.early.REPR".format(work_dir)
os.system("mkdir -p {}".format(out_dir))
scanmotifs_dir = "motifs.dynamic.early.REPR"
h5_file = "{}/{}/ggr.scanmotifs.h5".format(NN_DIR, scanmotifs_dir)
infer_json = "{}/{}/infer.scanmotifs.json".format(NN_DIR, scanmotifs_dir)
# foregrounds and background
foregrounds = [
"TRAJ_LABELS=0",
"TRAJ_LABELS=7",
"TRAJ_LABELS=8,10,11",
"TRAJ_LABELS=9"]
background = "ATAC_LABELS=0,1,2,3,4,5,6,9,10,12"
# call sig motifs
if False:
tronn_calldiffmotifs_cmd(
[h5_file],
background_files,
foregrounds,
background,
infer_json,
out_dir,
qval=0.01) # TODO adjust this
pvals_file = "{}/pvals.h5".format(out_dir)
new_pvals_file = "{}/motifs.rna_filt/pvals.rna_filt.corr_filt.h5".format(out_dir)
if not os.path.isfile(new_pvals_file):
tronn_intersect_with_expression_cmd(
[h5_file],
pvals_file,
args.outputs["annotations"]["pwms.renamed.nonredundant"],
args.outputs["annotations"]["pwms.metadata.nonredundant.expressed"],
"{}/matrices/ggr.rna.counts.pc.expressed.timeseries_adj.pooled.rlog.mat.txt.gz".format(
args.outputs["results"]["rna"]["timeseries"]["dir"]),
out_dir)
# plot the reduced set
summary_dir = "{}/motifs.rna_filt/summary".format(out_dir)
pwm_traj_presence_file = "{}/ggr.pwms_present_summary.txt".format(summary_dir)
pwm_patterns_file = "{}/ggr.pwms_patterns_summary.txt".format(summary_dir)
tf_traj_presence_file = "{}/ggr.tfs_corr_summary.txt".format(summary_dir)
tf_patterns_file = "{}/ggr.tfs_patterns_summary.txt".format(summary_dir)
# run once for ordering
ordering_file = "{}/tf_ordering.txt".format(summary_dir)
if not os.path.isfile(ordering_file):
plot_script = "/users/dskim89/git/ggr-project/figs/fig_2.modelling/fig_3-d.0.plot.motifs_and_tfs.R"
plot_cmd = "{} {} {} {} {}".format(
plot_script, pwm_traj_presence_file, pwm_patterns_file,
tf_traj_presence_file, tf_patterns_file)
os.system(plot_cmd)
# python script: get a mapping from motif to TF
reordering_script = "/users/dskim89/git/ggr-project/scripts/map_motifs_to_tfs.py"
reordering_cmd = "python {} {} {} {} {}".format(
reordering_script,
"motif_ordering.txt",
args.outputs["annotations"]["pwms.metadata.nonredundant.expressed"],
tf_patterns_file,
ordering_file)
os.system(reordering_cmd)
# and run with extra file for ordering
plot_cmd += " {}".format(ordering_file)
os.system(plot_cmd)
# clean up
os.system("mv fig_3*pdf {}/".format(summary_dir))
os.system("mv motif_ordering.txt {}/".format(summary_dir))
# -------------------------------------------
# ANALYSIS - look at more closely at KLF and CEBP
# -------------------------------------------
work_dir = "{}/repr/CEBPA_x_KLF".format(results_dir)
os.system("mkdir -p {}".format(work_dir))
# inputs
prefix = "ggr.nn.repr"
NN_DIR = "/mnt/lab_data3/dskim89/ggr/nn/2021-05-22.repr_analyses"
scanmotifs_dir = "motifs.dynamic.early.REPR"
h5_file = "{}/{}/ggr.scanmotifs.h5".format(NN_DIR, scanmotifs_dir)
motif_indices = [174, 184] # CEBPA, KLF4 respectively
motif_names = ["CEBPA", "KLF4"]
# linking file
links_dir = "{}/linking/proximity".format(args.outputs["results"]["dir"])
interactions_file = "{}/ggr.linking.ALL.overlap.interactions.txt.gz".format(links_dir)
# first look at them separately
for i in range(len(motif_indices)):
motif_idx = motif_indices[i]
motif_name = motif_names[i]
print motif_name
# get region set (and vignettes table)
rule_file = "{}/{}.{}.bed.gz".format(work_dir, prefix, motif_name)
if not os.path.isfile(rule_file):
get_motifs_region_set([h5_file], [motif_idx], rule_file)
# get functional enrichments
gprofiler_file = "{}/{}.{}.genes.all.max_1k.go_gprofiler.txt".format(
work_dir, prefix, motif_name)
if not os.path.isfile(gprofiler_file):
_run_motif_functional_enrichment_workflow(
args, motif_name, rule_file, work_dir, filter_by_score=0.5)
# plot enrichments
plot_file = "{}.enrichments.pdf".format(gprofiler_file.split(".txt")[0])
if not os.path.isfile(plot_file):
clean_and_plot_enrichments(gprofiler_file, motif_name, plot_file)
# vignettes: use tronn helper script?
vignette_file = "{}.for_vignettes.mat.txt.gz".format(rule_file.split(".bed")[0])
vignette_prefix = "{}/{}.vignette".format(work_dir, motif_name)
get_vignettes_cmd = "/users/dskim89/git/tronn/scripts/ggr/ggr_plot_repr_vignettes.py {} {}".format(vignette_file, vignette_prefix)
print get_vignettes_cmd
#os.system(get_vignettes_cmd)
quit()
# and then look at them together
# notes: so far looks like they really don't like to co-occur
rule_file = "{}/{}.CEBPA_x_KLF4.bed.gz".format(work_dir, prefix)
if not os.path.isfile(rule_file):
get_motifs_region_set([h5_file], motif_indices, rule_file)
# get functional enrichments
gprofiler_file = "{}/{}.CEBPA_x_KLF4.genes.all.max_1k.go_gprofiler.txt".format(
work_dir, prefix, motif_name)
if not os.path.isfile(gprofiler_file):
_run_motif_functional_enrichment_workflow(
args, motif_name, rule_file, work_dir, filter_by_score=0.5)
# plot enrichments
plot_file = "{}.enrichments.pdf".format(gprofiler_file.split(".txt")[0])
if not os.path.isfile(plot_file):
clean_and_plot_enrichments(gprofiler_file, "CEBPA_x_KLF", plot_file)
# plot out a venn summary of CEBP and KLF
# vignettes - plot out following regions
quit()
# motifs in early dynamic - for normal scores, any sig?
# anti-correlated - these activating motifs INCREASE across time
# ^ this is hard to explain, why would closing regions have motifs getting LARGER across time
# ^ hold off on this
# TSS - motifs - for H3K2me3 set, any sig?
# correlated - these motifs INCREASE across time
print "here"
quit()
# -------------------------------------------
# ANALYSIS - predicting TF KD/KO gene sets from learning results
# -------------------------------------------
tf_kd_dir = "{}/tfs".format(results_dir)
print tf_kd_dir
if not os.path.isdir(tf_kd_dir):
os.system("mkdir -p {}".format(tf_kd_dir))
# motif inputs
motifs = [
("TP53", 6, "/mnt/lab_data3/dskim89/labmembers/margaret/TP63_GSE33495_dex_results.csv"),
("TP53", 6, "/mnt/lab_data3/dskim89/labmembers/margaret/TP63_GSE67382_dex_results.csv"),
("KLF12", 6, "/mnt/lab_data3/dskim89/labmembers/margaret/KLF4_GSE32685_dex_results.csv"),
#("KLF12", 6, "/mnt/lab_data3/dskim89/labmembers/margaret/KLF4_GSE111786_dex_results.csv"),
#("KLF12", 6, "/mnt/lab_data3/dskim89/labmembers/margaret/KLF4_GSE140992_dex_results.csv"),
("TFAP2A", 6, "/mnt/lab_data3/dskim89/labmembers/margaret/ZNF750_GSE32685_dex_results.csv"),
("MAFB", 9, "/mnt/lab_data3/dskim89/labmembers/margaret/MAF_GSE52651_dex_results.csv"),
#("FOXA1", 0, "/mnt/lab_data3/dskim89/labmembers/margaret/FOXC1_D0_GSE76647_dex_results.csv"),
("FOXA1", 9, "/mnt/lab_data3/dskim89/labmembers/margaret/FOXC1_D5_GSE76647_dex_results.csv")
]
if False:
# ATAC traj files
atac_clusters_file = args.outputs["results"]["atac"]["timeseries"]["dp_gp"][
"clusters.reproducible.hard.reordered.list"]
atac_mat_file = args.outputs["data"]["atac.counts.pooled.rlog.dynamic.traj.mat"]
atac_mat = pd.read_csv(atac_mat_file, sep="\t", index_col=0)
atac_mat = atac_mat.drop("d05", axis=1)
# RNA traj files
rna_clusters_file = args.outputs["results"]["rna"]["timeseries"]["dp_gp"][
"clusters.reproducible.hard.reordered.list"]
rna_mat_file = args.outputs["data"][
"rna.counts.pc.expressed.timeseries_adj.pooled.rlog.dynamic.traj.mat"]
rna_mat = pd.read_csv(rna_mat_file, sep="\t", index_col=0)
# linking file
links_dir = "{}/linking/proximity".format(args.outputs["results"]["dir"])
#links_dir = "{}/linking/hichip".format(args.outputs["results"]["dir"])
interactions_file = "{}/ggr.linking.ALL.overlap.interactions.txt.gz".format(links_dir)
# background atac file
atac_background_file = args.outputs["data"]["atac.master.bed"]
# nn files
#nn_motif_files = glob.glob(
# "{}/motifs.input_x_grad.lite/ggr.scanmotifs.h5".format(NN_DIR))
nn_motif_files = [
"/mnt/lab_data3/dskim89/ggr/nn/2020-01-13/scanmotifs/motifs.background.lite/ggr.scanmotifs.h5"
]
#if True:
# nn_motif_files = glob.glob(
# "{}/motifs.input_x_grad.mid/ggr.scanmotifs.h5".format(NN_DIR))
if False:
# run for each motif-timepoint
for motif_name, timepoint_idx, diff_expr_file in motifs:
print ">>", motif_name
# first run with motif hits
# get bed file of regions
bed_file = "{}/{}-{}.bed.gz".format(tf_kd_dir, motif_name, timepoint_idx)
#if not os.path.isfile(bed_file):
if True:
get_motif_region_set(
nn_motif_files,
motif_name,
bed_file,
timepoint_idx=timepoint_idx)
# link to nearby genes
# TODO how are we up/downweighting things?
genes_file = "{}.genes.txt.gz".format(bed_file.split(".bed")[0])
#if not os.path.isfile(genes_file):
if True:
regions_to_genes(
bed_file,
interactions_file,
args.outputs["annotations"]["tss.pc.bed"],
genes_file,
filter_by_score=0.00)
#filter_by_score=0.5) # proximity corr thresh
# compare to differential dataset
if True:
compare_nn_gene_set_to_diff_expr(
genes_file,
diff_expr_file,
convert_table=args.outputs["annotations"]["geneids.mappings.mat"])
# then run with NN-active motifs
bed_file = "{}/{}-{}.bed.gz".format(tf_kd_dir, motif_name, timepoint_idx)
#if not os.path.isfile(bed_file):
if True:
get_motif_region_set(
nn_motif_files,
motif_name,
bed_file,
timepoint_idx=timepoint_idx,
motif_key="sequence.active.pwm-scores.thresh.sum")
# link to nearby genes
# TODO how are we up/downweighting things?
genes_file = "{}.genes.txt.gz".format(bed_file.split(".bed")[0])
#if not os.path.isfile(genes_file):
if True:
regions_to_genes(
bed_file,
interactions_file,
args.outputs["annotations"]["tss.pc.bed"],
genes_file,
filter_by_score=0.00)
#filter_by_score=0.5) # proximity corr thresh
# compare to differential dataset
if True:
compare_nn_gene_set_to_diff_expr(
genes_file,
diff_expr_file,
convert_table=args.outputs["annotations"]["geneids.mappings.mat"])
return args
| vervacity/ggr-project | ggr/workflows/learning.py | Python | mit | 45,053 |
"""
Validation of model outputs against in situ data stored and extracted from a database.
This also includes the code to build the databases of time series data sets.
"""
import datetime as dt
import glob as gb
import os
import sqlite3 as sq
import subprocess as sp
import matplotlib.path as mplPath
import matplotlib.pyplot as plt
import numpy as np
from pandas import read_hdf
from PyFVCOM.grid import get_boundary_polygons, vincenty_distance
from PyFVCOM.plot import Time, Plotter
from PyFVCOM.read import FileReader
from PyFVCOM.stats import calculate_coefficient, rmse
SQL_UNIX_EPOCH = dt.datetime(1970, 1, 1, 0, 0, 0)
class ValidationDB(object):
""" Work with an SQLite database. """
def __init__(self, db_name):
""" Create a new database `db_name'.
Parameters
----------
db_name : str
The path and name for the new database.
"""
if db_name[-3:] != '.db':
db_name += '.db'
self.conn = sq.connect(db_name)
self.create_table_sql = {}
self.retrieve_data_sql = {}
self.c = self.conn.cursor()
def execute_sql(self, sql_str):
"""
Execute the given SQL statement.
Parameters
----------
sql_str : str
SQL statement to execute.
Returns
-------
results : np.ndarray
Data from the database which matches the SQL statement.
"""
self.c.execute(sql_str)
return self.c.fetchall()
def create_table(self, table_name, col_list):
"""
Create a database table if no such table exists.
Parameters
----------
table_name : str
Table name to create.
col_list : list
List of column names to add to the table.
"""
create_str = 'CREATE TABLE IF NOT EXISTS {table} ({cols});'.format(table=table_name, cols=', '.join(col_list))
self.execute_sql(create_str)
def insert_into_table(self, table_name, data, column=None):
"""
Insert data into a table.
Parameters
----------
table_name : str
Table name into which to insert the given data.
data : np.ndarray
Data to insert into the database.
column : list, optional
Insert the supplied `data' into this `column' within the given `table_name'.
"""
data = np.asarray(data)
if np.ndim(data) == 1:
no_cols = len(data)
no_rows = 1
data = data[np.newaxis, :]
else:
no_rows, no_cols = data.shape
qs_string = '({})'.format(','.join('?' * no_cols))
# Format our optional column.
if column is not None:
column = '({})'.format(','.join(column))
else:
column = ''
if no_rows > 1:
self.c.executemany('insert or ignore into {tab} {col} values {val}'.format(tab=table_name, col=column, val=qs_string), data)
elif no_rows == 1:
self.c.execute('insert into {tab} {col} values {val}'.format(tab=table_name, col=column, val=qs_string), data[0])
self.conn.commit()
def select_qry(self, table_name, where_str, select_str='*', order_by_str=None, inner_join_str=None, group_by_str=None):
"""
Extract data from the database which matches the given SQL query.
Parameters
----------
table_name : str
Table name to query.
where_str : str
Where statement.
select_str : str, optional
Optionally give a set of columns to select.
order_by_str : str, optional
Optionally give a set of columns by which to order the results.
inner_join_str : str, optional
Optionally give an inner join string.
group_by_str : str, optional
Optionally give a string by which to group the results.
Returns
-------
results : np.ndarray
The data which matches the given query.
"""
qry_string = 'select {} from {}'.format(select_str, table_name)
if inner_join_str:
qry_string += ' inner join {}'.format(inner_join_str)
if where_str:
qry_string += ' where {}'.format(where_str)
if order_by_str:
qry_string += ' order by {}'.format(order_by_str)
if group_by_str:
qry_string += ' group by {}'.format(group_by_str)
return self.execute_sql(qry_string)
def table_exists(self, variable):
pass
#return table_exist
def close_conn(self):
""" Close the connection to the database. """
self.conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
""" Tidy up the connection to the SQLite database. """
self.conn.close()
def dt_to_epochsec(time_to_convert):
"""
Convert a datetime to our SQL database epoch.
Parameters
----------
time_to_convert : datetime.datetime
Datetime to convert.
Returns
-------
epoched : int
Converted datetime (in seconds).
"""
return (time_to_convert - SQL_UNIX_EPOCH).total_seconds()
def epochsec_to_dt(time_to_convert):
"""
Parameters
----------
time_to_convert : int
Seconds in the SQL database epoch.
Return
------
unepoched : datetime.datetime.
Converted time.
"""
return SQL_UNIX_EPOCH + dt.timedelta(seconds=time_to_convert)
def plot_map(fvcom, tide_db_path, threshold=np.inf, legend=False, **kwargs):
"""
Plot the tide gauges which fall within the model domain (in space and time) defined by the given FileReader object.
Parameters
----------
fvcom : PyFVCOM.read.FileReader
FVCOM model data as a FileReader object.
tide_db_path : str
Path to the tidal database.
threshold : float, optional
Give a threshold distance (in spherical units) beyond which a gauge is considered too far away.
legend : bool, optional
Set to True to add a legend to the plot. Defaults to False.
Any remaining keyword arguments are passed to PyFVCOM.plot.Plotter.
Returns
-------
plot : PyFVCOM.plot.Plotter
The Plotter object instance for the map
"""
tide_db = TideDB(tide_db_path)
gauge_names, gauge_locations = tide_db.get_gauge_locations(long_names=True)
gauges_in_domain = []
fvcom_nodes = []
for gi, gauge in enumerate(gauge_locations):
river_index = fvcom.closest_node(gauge, threshold=threshold)
if river_index:
gauge_id, gauge_dist = tide_db.get_nearest_gauge_id(*gauge)
times, data = tide_db.get_tidal_series(gauge_id, np.min(fvcom.time.datetime), np.max(fvcom.time.datetime))
if not np.any(data):
continue
gauges_in_domain.append(gi)
fvcom_nodes.append(river_index)
plot = Plotter(fvcom, **kwargs)
fx, fy = plot.m(fvcom.grid.lon, fvcom.grid.lat)
plot.plot_field(-fvcom.grid.h)
plot.axes.plot(fx[fvcom_nodes], fy[fvcom_nodes], 'ro', markersize=3, zorder=202, label='Model')
# Add the gauge locations.
rx, ry = plot.m(gauge_locations[:, 0], gauge_locations[:, 1])
plot.axes.plot(rx, ry, 'wo', label='Gauges')
for xx, yy, name in zip(rx, ry, gauge_names[gauges_in_domain]):
plot.axes.text(xx, yy, name, fontsize=10, rotation=45, rotation_mode='anchor', zorder=203)
if legend:
plot.axes.legend(numpoints=1, scatterpoints=1, ncol=2, loc='upper center', fontsize=10)
return plot
def plot_tides(fvcom, db_name, threshold=500, figsize=(10, 10), **kwargs):
"""
Plot model and tide gauge data.
Parameters
----------
fvcom : PyFVCOM.read.FileReader
FVCOM model data as a FileReader object.
db_name : str
Database name to interrogate.
threshold : float, optional
Give a threshold distance (in spherical units) to exclude gauges too far from a model node.
figsize : tuple
Give a figure size (units are inches).
Remaining keyword arguments are passed to PyFVCOM.plot.Time.
Returns
-------
time : PyFVCOM.plot.Time
Time series plot object.
gauge_obs : dict
Dictionary with the gauge and model data.
"""
tide_db = TideDB(db_name)
# Get all the gauges in the database and find the corresponding model nodes.
gauge_names, gauge_locations = tide_db.get_gauge_locations(long_names=True)
gauge_obs = {}
gauges_in_domain = []
fvcom_nodes = []
for gi, gauge in enumerate(gauge_locations):
river_index = fvcom.closest_node(gauge, threshold=threshold)
if river_index:
current_gauge = {}
current_gauge['gauge_id'], current_gauge['gauge_dist'] = tide_db.get_nearest_gauge_id(*gauge)
current_gauge['times'], current_gauge['data'] = tide_db.get_tidal_series(current_gauge['gauge_id'],
np.min(fvcom.time.datetime),
np.max(fvcom.time.datetime))
if not np.any(current_gauge['data']):
continue
current_gauge['lon'], current_gauge['lat'] = gauge_locations[gi, :]
current_gauge['gauge_clean'] = current_gauge['data'][:, 1] == 0
current_gauge['gauge_obs_clean'] = {'times': np.copy(current_gauge['times'])[current_gauge['gauge_clean']],
'data': np.copy(current_gauge['data'])[current_gauge['gauge_clean'], 0]}
current_gauge['rescale_zeta'] = fvcom.data.zeta[:, river_index] - np.mean(fvcom.data.zeta[:, river_index])
current_gauge['rescale_gauge_obs'] = current_gauge['gauge_obs_clean']['data'] - np.mean(current_gauge['gauge_obs_clean']['data'])
current_gauge['dates_mod'] = np.isin(fvcom.time.datetime, current_gauge['gauge_obs_clean']['times'])
current_gauge['dates_obs'] = np.isin(current_gauge['gauge_obs_clean']['times'], fvcom.time.datetime)
# Skip out if we don't have any coincident data (might simply be a sampling issue) within the model
# period. We should interpolate here.
if not np.any(current_gauge['dates_mod']) or not np.any(current_gauge['dates_obs']):
continue
current_gauge['r'], current_gauge['p'] = calculate_coefficient(current_gauge['rescale_zeta'][current_gauge['dates_mod']], current_gauge['rescale_gauge_obs'][current_gauge['dates_obs']])
current_gauge['rms'] = rmse(current_gauge['rescale_zeta'][current_gauge['dates_mod']], current_gauge['rescale_gauge_obs'][current_gauge['dates_obs']])
current_gauge['std'] = np.std(current_gauge['rescale_zeta'][current_gauge['dates_mod']] - current_gauge['rescale_gauge_obs'][current_gauge['dates_obs']])
gauges_in_domain.append(gi)
fvcom_nodes.append(river_index)
name = gauge_names[gi]
gauge_obs[name] = current_gauge
del current_gauge
tide_db.close_conn() # tidy up after ourselves
# Now make a figure of all that data.
if len(gauge_obs) > 5:
cols = np.ceil(len(gauge_obs) ** (1.0 / 3)).astype(int) + 1
else:
cols = 1
rows = np.ceil(len(gauge_obs) / cols).astype(int)
fig = plt.figure(figsize=figsize)
for count, site in enumerate(sorted(gauge_obs)):
ax = fig.add_subplot(rows, cols, count + 1)
time = Time(fvcom, figure=fig, axes=ax, hold=True, **kwargs)
time.plot_line(gauge_obs[site]['rescale_zeta'], label='Model', color='k')
# We have to use the raw plot function for the gauge data as the plot_line function assumes we're using model
# data.
time.axes.plot(gauge_obs[site]['gauge_obs_clean']['times'], gauge_obs[site]['rescale_gauge_obs'], label='Gauge', color='m')
# Should add the times of the flagged data here.
time.axes.set_xlim(fvcom.time.datetime.min(), fvcom.time.datetime.max())
time.axes.set_ylim(np.min((gauge_obs[site]['rescale_gauge_obs'].min(), gauge_obs[site]['rescale_zeta'].min())),
np.max((gauge_obs[site]['rescale_gauge_obs'].max(), gauge_obs[site]['rescale_zeta'].max())))
time.axes.set_title(site)
return time, gauge_obs
def _make_normal_tide_series(h_series):
height_series = h_series - np.mean(h_series)
return height_series
class TideDB(ValidationDB):
""" Create a time series database and query it. """
def make_bodc_tables(self):
""" Make the complete set of empty tables for data to be inserted into (as defined in _add_sql_strings) """
# Insert information into the error flags table
self._add_sql_strings()
for this_key, this_val in self.bodc_tables.items():
self.create_table(this_key, this_val)
error_data = [(0, '', 'No error'), (1, 'M', 'Improbable value flagged by QC'),
(2, 'N', 'Null Value'), (3, 'T', 'Value interpolated from adjacent values')]
self.insert_into_table('error_flags', error_data)
def insert_tide_file(self, file_list):
"""
Add data from a set of files to the database.
Parameters
----------
file_list : list
List of file names.
"""
for this_file in file_list:
print('Inserting data from file: ' + this_file)
this_file_obj = BODCAnnualTideFile(this_file)
try:
site_id = self.select_qry('sites', "site_tla == '" + this_file_obj.site_tla + "'", 'site_id')[0][0]
except:
try:
current_id_max = np.max(self.select_qry('sites', None, 'site_id'))
site_id = int(current_id_max + 1)
except:
site_id = 1
site_data = [(site_id, this_file_obj.site_tla, this_file_obj.site_name, this_file_obj.lon, this_file_obj.lat, '')]
self.debug_data = site_data
self.insert_into_table('sites', site_data)
site_id_list = [site_id] * len(this_file_obj.seconds_from_ref)
table_data = list(zip(site_id_list, this_file_obj.seconds_from_ref, this_file_obj.elevation_data,
this_file_obj.elevation_flag, this_file_obj.residual_data, this_file_obj.residual_flag))
self.insert_into_table('gauge_obs', table_data)
def get_tidal_series(self, station_identifier, start_date_dt=None, end_date_dt=None):
"""
Extract a time series of tidal elevations for a given station.
Parameters
----------
station_identifier : str
Database station identifier.
start_date_dt, end_date_dt : datetime.datetime, optional
Give start and/or end times to extract from the database. If omitted, all data are returned.
Returns
-------
dates : np.ndarray
Array of datetime objects.
data : np.ndarray
Surface elevation and residuals from the database for the given station.
"""
select_str = "time_int, elevation, elevation_flag"
table_name = "gauge_obs as go"
inner_join_str = "sites as st on st.site_id = go.site_id"
if isinstance(station_identifier, str):
where_str = "st.site_tla = '" + station_identifier + "'"
else:
where_str = "st.site_id = " + str(int(station_identifier))
if start_date_dt is not None:
start_sec = dt_to_epochsec(start_date_dt)
where_str += " and go.time_int >= " + str(start_sec)
if end_date_dt is not None:
end_sec = dt_to_epochsec(end_date_dt)
where_str += " and go.time_int <= " + str(end_sec)
order_by_str = 'go.time_int'
return_data = self.select_qry(table_name, where_str, select_str, order_by_str, inner_join_str)
if not return_data:
print('No data available')
dates, data = None, None
else:
return_data = np.asarray(return_data)
date_list = [epochsec_to_dt(this_time) for this_time in return_data[:, 0]]
dates, data = np.asarray(date_list), return_data[:, 1:]
return dates, data
def get_gauge_locations(self, long_names=False):
"""
Extract locations and names of the tide gauges from the database.
Parameters
----------
long_names : bool, optional
If True, return the 'nice' long names rather than the station identifiers.
Returns
-------
tla_name : np.ndarray
List of tide gauge names.
lon_lat : np.ndarray
Positions of the gauges.
"""
gauge_site_data = np.asarray(self.select_qry('sites', None))
if long_names:
tla_name = gauge_site_data[:, 2]
else:
tla_name = gauge_site_data[:, 1]
lon_lat = np.asarray(gauge_site_data[:, 3:5], dtype=float)
return tla_name, lon_lat
def get_nearest_gauge_id(self, lon, lat, threshold=np.inf):
"""
Get the ID of the gauge closest to the position given by `lon' and `lat'.
lon, lat : float
Position for which to search for the nearest tide gauge.
Returns
-------
closest_gauge_id : int
Database ID for the gauge closest to `lon' and `lat'.
min_dist : float
Distance in metres between `lon' and `lat' and the gauge.
threshold : float
Threshold distance in metres (inclusive) within which gauges must be from the given position. If no
gauges are found within this distance, the gauge ID is None.
"""
sites_lat_lon = np.asarray(self.select_qry('sites', None, 'site_id, lat, lon'))
min_dist = np.inf
closest_gauge_id = None # we should make this False or None or something
for this_row in sites_lat_lon:
this_dist = vincenty_distance([lat, lon], [this_row[1], this_row[2]])
if this_dist < min_dist:
min_dist = this_dist
closest_gauge_id = this_row[0]
if min_dist >= threshold:
closest_gauge_id = None
else:
closest_gauge_id = int(closest_gauge_id)
return closest_gauge_id, min_dist
def _add_sql_strings(self):
""" Function to define the database structure. """
self.bodc_tables = {'gauge_obs': ['site_id integer NOT NULL', 'time_int integer NOT NULL',
'elevation real NOT NULL', 'elevation_flag integer', 'residual real', 'residual_flag integer',
'PRIMARY KEY (site_id, time_int)', 'FOREIGN KEY (site_id) REFERENCES sites(site_id)',
'FOREIGN KEY (elevation_flag) REFERENCES error_flags(flag_id)',
'FOREIGN KEY (residual_flag) REFERENCES error_flags(flag_id)'],
'sites': ['site_id integer NOT NULL', 'site_tla text NOT NULL', 'site_name text', 'lon real', 'lat real',
'other_stuff text', 'PRIMARY KEY (site_id)'],
'error_flags': ['flag_id integer NOT NULL', 'flag_code text', 'flag_description text']}
class BODCAnnualTideFile(object):
"""
TODO: Add docstring
"""
def __init__(self, file_name, header_length=11):
"""
Assumptions: file name of the form yearTLA.txt
"""
self._clean_tide_file(file_name, header_length)
with open(file_name) as f:
header_lines = [next(f) for _ in range(header_length)]
for this_line in header_lines:
if 'ongitude' in this_line:
self.lon = [float(s) for s in this_line.split() if self._is_number(s)][0]
if 'atitude' in this_line:
self.lat = [float(s) for s in this_line.split() if self._is_number(s)][0]
if 'Site' in this_line:
site_str_raw = this_line.split()[1:]
if len(site_str_raw) == 1:
site_str = site_str_raw[0]
else:
site_str = ''
for this_str in site_str_raw:
site_str += this_str
self.site_name = site_str
self.site_tla = file_name.split('/')[-1][4:7]
raw_data = np.loadtxt(file_name, skiprows=header_length, dtype=bytes).astype(str)
seconds_from_ref = []
for this_row in raw_data:
this_dt_str = this_row[1] + ' ' + this_row[2]
this_seconds_from_ref = dt_to_epochsec(dt.datetime.strptime(this_dt_str, '%Y/%m/%d %H:%M:%S'))
seconds_from_ref.append(int(this_seconds_from_ref))
self.seconds_from_ref = seconds_from_ref
elevation_data = []
elevation_flag = []
residual_data = []
residual_flag = []
for this_row in raw_data:
meas, error_code = self._parse_tide_obs(this_row[3])
elevation_data.append(meas)
elevation_flag.append(error_code)
meas, error_code = self._parse_tide_obs(this_row[4])
residual_data.append(meas)
residual_flag.append(error_code)
self.elevation_data = elevation_data
self.elevation_flag = elevation_flag
self.residual_data = residual_data
self.residual_flag = residual_flag
@staticmethod
def _parse_tide_obs(in_str):
"""
TODO: Add docstring
"""
error_code_dict = {'M':1, 'N':2, 'T':3}
try:
int(in_str[-1])
error_code = 0
meas = float(in_str)
except:
error_code_str = in_str[-1]
meas = float(in_str[0:-1])
try:
error_code = error_code_dict[error_code_str]
except:
print('Unrecognised error code')
return
return meas, error_code
@staticmethod
def _is_number(s):
"""
TODO: Add docstring
"""
try:
float(s)
return True
except ValueError:
return False
@staticmethod
def _clean_tide_file(file_name, header_length):
"""
TODO: Add docstring
"""
sed_str = "sed -i '"+ str(header_length + 1) + ",$ {/^ *[0-9]/!d}' " + file_name
sp.call([sed_str], shell=True)
"""
Validation against L4 and E1 CTD and buoy data
observations_meta_data = {'buoy_name':'E1', 'year':'2006', 'ctd_new_file_type': False,
'ctd_datadir':'/data/euryale4/backup/mbe/Data/WCO_data/E1/CTD_data/2006',
'buoy_filepath':None, 'lon':-4.368, 'lat':50.035}
observations_meta_data = {'buoy_name':'L4', 'year':'2015', 'ctd_new_file_type': True, 'ctd_filepath':'./data/e1_data_2015.txt',
'buoy_filepath': , '/data/euryale4/backup/mbe/Data/WCO_data/L4/Buoy_data/l4_cont_data_2015.txt', 'lon':-4.217, 'lat':50.250}
model_filestr_lambda = lambda m: '/data/euryale4/backup/mbe/Models/FVCOM/tamar_v2/run/output/depth_tweak2/2006/{:02d}/tamar_v2_0001.nc'.format(m)
available_months = np.arange(1, 13)
model_file_list = [model_filestr_lambda(this_month) for this_month in available_months]
"""
class WCODB(ValidationDB):
""" Work with an SQL database of data from PML's Western Channel Observatory. """
def make_wco_tables(self):
"""
Make the complete set of empty tables for data to be inserted into (as defined in _add_sql_strings).
"""
# Insert information into the error flags table
self._add_sql_strings()
for this_table, this_str in self.wco_tables.items():
self.create_table(this_table, this_str)
sites_data = [(0, 'L4', -4.217, 50.250, ' '), (1, 'E1', -4.368, 50.035, ' ')]
self.insert_into_table('sites', sites_data)
measurement_type_data = [(0, 'CTD measurements'), (1, 'Surface buoy measurements')]
self.insert_into_table('measurement_types', measurement_type_data)
self.execute_sql('create index date_index on obs (time_int);')
def _add_sql_strings(self):
"""
TODO: Add docstring
"""
self.wco_tables = {'sites':['buoy_id integer NOT NULL', 'buoy_name text', 'lon real', 'lat real',
'other_stuff text', 'PRIMARY KEY (buoy_id)'],
'measurement_types':['measurement_flag integer NOT NULL', 'measurement_description text',
'PRIMARY KEY (measurement_flag)']}
def _add_new_variable_table(self, variable):
"""
TODO: Add docstring
"""
this_table_sql = ['buoy_id integer NOT NULL', 'time_int integer NOT NULL',
'depth real NOT NULL', variable + ' real', 'measurement_flag integer NOT NULL',
'PRIMARY KEY (buoy_id, depth, measurement_flag, time_int)', 'FOREIGN KEY (buoy_id) REFERENCES sites(buoy_id)',
'FOREIGN KEY (measurement_flag) REFERENCES measurement_types(measurement_flag)']
self.create_table(variable, this_variable_table_sql)
def insert_CTD_file(self, filestr, buoy_id):
"""
TODO: Add docstring
"""
file_obj = WCOObsFile(filestr)
self._insert_obs(file_obj, buoy_id, 0.0)
def insert_buoy_file(self, filestr, buoy_id):
"""
TODO: Add docstring
"""
file_obj = WCOObsFile(filestr, depth=0)
self._insert_obs(file_obj, buoy_id, 1.0)
def insert_CTD_dir(self, dirstr, buoy_id):
"""
TODO: Add docstring
"""
file_obj = WCOParseFile(dirstr)
self._insert_obs(file_obj, buoy_id, 0.0)
def insert_csv_file(self, filestr, buoy_id):
"""
TODO: Add docstring
"""
file_obj = CSVFormatter(filstr)
self._insert_obs(file_obj, buoy_id)
def _insert_obs(self, file_obj, buoy_id, measurement_id):
"""
TODO: Add docstring
"""
epoch_sec_timelist = []
for this_time in file_obj.observation_dict['dt_time']:
epoch_sec_timelist.append(dt_to_epochsec(this_time))
buoy_id_list = np.tile(buoy_id, len(epoch_sec_timelist))
measurement_id_list = np.tile(measurement_id, len(epoch_sec_timelist))
table_data = list(zip(buoy_id_list, epoch_sec_timelist, file_obj.observation_dict['depth'], file_obj.observation_dict['temp'],
file_obj.observation_dict['salinity'], measurement_id_list))
self.insert_into_table('obs', table_data)
def get_observations(self, buoy_name, start_date_dt=None, end_date_dt=None, measurement_id=None):
"""
TODO: Add docstring
"""
select_str = "time_int, depth, temp, salinity"
table_name = "obs as go"
inner_join_str = "sites as st on st.buoy_id = go.buoy_id"
where_str = "st.buoy_name = '" + buoy_name + "'"
if start_date_dt is not None:
start_sec = dt_to_epochsec(start_date_dt)
where_str += " and go.time_int >= " + str(start_sec)
if end_date_dt is not None:
end_sec = dt_to_epochsec(end_date_dt)
where_str += " and go.time_int <= " + str(end_sec)
order_by_str = 'go.time_int, go.depth'
return_data = self.select_qry(table_name, where_str, select_str, order_by_str, inner_join_str)
if not return_data:
dates, data = None, None
print('No data available')
else:
return_data = np.asarray(return_data)
date_list = [epochsec_to_dt(this_time) for this_time in return_data[:, 0]]
dates, data = np.asarray(date_list), return_data[:, 1:]
return dates, data
class WCOObsFile(object):
def __init__(self, filename, depth=None):
"""
TODO: Add docstring
"""
self._setup_possible_vars()
self.observation_dict = self._add_file(filename)
if depth is not None:
self.observation_dict['depth'] = np.tile(depth, len(self.observation_dict['dt_time']))
def _add_file(self, filename, remove_undated=True):
"""
TODO: Add docstring
"""
# remove duff lines
sed_str = "sed '/^-9.990e/d' " + filename + " > temp_file.txt"
sp.call(sed_str, shell=True)
# some files have multiple records of differing types...helpful
temp_str = 'YKA123ASD'
file_split_str = '''awk '/^[^0-9]/{g++} { print $0 > "''' + temp_str + '''"g".txt"}' temp_file.txt'''
sp.call(file_split_str, shell=True)
temp_file_list = gb.glob(temp_str + '*')
obs_dict_list = []
for this_file in temp_file_list:
this_obs = self._add_file_part(this_file)
if not remove_undated or 'dt_time' in this_obs:
obs_dict_list.append(this_obs)
rm_file = [os.remove(this_file) for this_file in temp_file_list]
return {this_key: np.hstack([this_dict[this_key] for this_dict in obs_dict_list]) for this_key in obs_dict_list[0]}
def _add_file_part(self, filename):
"""
TODO: Add docstring
"""
# seperate header and clean out non numeric lines
head_str ="head -1 " + filename + " > temp_header_file.txt"
sed_str = "sed '/^[!0-9]/!d' " + filename + " > temp_file.txt"
sp.call(head_str, shell=True)
sp.call(sed_str, shell=True)
# Load the files, some use semi-colon delimiters, some whitespace...
if ';' in str(np.loadtxt('temp_header_file.txt', delimiter='no_delimination_needed', dtype=str)):
observations_raw = np.loadtxt('temp_file.txt', delimiter=';', dtype=str)
observations_header = np.loadtxt('temp_header_file.txt', delimiter=';', dtype=str)
elif ',' in str(np.loadtxt('temp_header_file.txt', delimiter='no_delimination_needed', dtype=str)):
observations_raw = np.loadtxt('temp_file.txt', delimiter=',', dtype=str)
observations_header = np.loadtxt('temp_header_file.txt', delimiter=',', dtype=str)
else:
observations_raw = np.loadtxt('temp_file.txt', dtype=str)
observations_header = np.loadtxt('temp_header_file.txt', dtype=str)
# Clean up temp files
os.remove('temp_file.txt')
os.remove('temp_header_file.txt')
# Find the relevant columns and pull out temp, salinity, date, etc if available
observation_dict = {}
time_vars = []
for this_var, this_possible in self.possible_vars.items():
if np.any(np.isin(this_possible, observations_header)):
this_col = np.where(np.isin(observations_header, this_possible))[0]
if this_var == 'time' or this_var =='date' or this_var=='Jd':
observation_dict[this_var] = np.squeeze(np.asarray(observations_raw[:, this_col], dtype=str))
time_vars.append(this_possible[np.isin(this_possible, observations_header)])
else:
observation_dict[this_var] = np.squeeze(np.asarray(observations_raw[:, this_col], dtype=float))
if 'date' in observation_dict:
observation_dict['dt_time'] = self._parse_dates_to_dt(observation_dict, time_vars)
return observation_dict
def _setup_possible_vars(self):
"""
TODO: Add docstring
"""
self.possible_vars = {'temp': np.asarray(['Tv290C', 'SST', ' Mean SST (degC)']),
'salinity': np.asarray(['Sal00', 'Sal', ' Mean SST (degC)']),
'depth': np.asarray(['DepSM']),
'date': np.asarray(['mm/dd/yyyy', 'Year', ' Date (YYMMDD)']),
'julian_day': np.asarray(['Jd']),
'time': np.asarray(['hh:mm:ss', 'Time', ' Time (HHMMSS)'])}
@staticmethod
def _parse_dates_to_dt(obs_dict, time_vars):
"""
TODO: Add docstring
"""
dt_list = []
if np.any(np.isin('mm/dd/yyyy', time_vars)):
for this_time, this_date in zip(obs_dict['time'], obs_dict['date']):
dt_list.append(dt.datetime.strptime(this_date + ' ' + this_time, '%m/%d/%Y %H:%M:%S'))
elif np.any(np.isin('Year', time_vars)):
for this_time, (this_jd, this_year) in zip(obs_dict['time'], zip(obs_dict['julian_day'], obs_dict['date'])):
dt_list.append(dt.datetime(int(this_year), 1, 1) + dt.timedelta(days=int(this_jd) - 1) +
dt.timedelta(hours=int(this_time.split('.')[0])) + dt.timedelta(minutes=int(this_time.split('.')[1])))
elif np.any(np.isin(' Date (YYMMDD)', time_vars)):
for this_time, this_date in zip(obs_dict['time'], obs_dict['date']):
dt_list.append(dt.datetime.strptime(this_date + ' ' + this_time, '%y%m%d %H%M%S'))
else:
print('Date parser not up to date with possible vars')
dt_list = None
return np.asarray(dt_list)
class WCOParseFile(WCOObsFile):
"""
TODO: Add docstring
"""
def __init__(self, dirname):
"""
TODO: Add docstring
"""
all_files = os.listdir(dirname)
dt_list = []
observation_dict_list = []
self._setup_possible_vars()
for this_file in all_files:
print('Processing file {}'.format(this_file))
try:
observation_dict_list.append(self._add_file(dirname + this_file, remove_undated=False))
date_str = '20' + this_file[0:2] + '-' + this_file[2:4] + '-' + this_file[4:6]
this_dt = dt.datetime.strptime(date_str, '%Y-%m-%d') + dt.timedelta(hours=12)
dt_list.append(np.tile(this_dt, len(observation_dict_list[-1]['temp'])))
except ValueError:
print('Error in file {}'.format(this_file))
# Flatten the list of dictionaries to one dictionary
self.observation_dict = {this_key: np.hstack([this_dict[this_key] for this_dict in observation_dict_list]) for this_key in observation_dict_list[0]}
self.observation_dict['dt_time'] = np.hstack(dt_list)
class CSVFormatter(object):
"""
TODO: Add docstring and code!
"""
def __init__(self, filename):
"""
TODO: Add docstring and code!
"""
pass
class CompareData(object):
"""
Compare the WCO data.
"""
def __init__(self, buoy_list, model_ident, wco_database, max_time_threshold=dt.timedelta(hours=1), max_depth_threshold = 100, probe_depths=None):
self.model_ident = model_ident
self.database_obj = wco_database
self.buoy_list = buoy_list
self.model_data = {}
if probe_depths is not None:
for this_ind, this_buoy in enumerate(buoy_list):
self.model_data[this_buoy]['depth'] = probe_depths[this_ind]
self.observations = {}
for this_buoy in self.buoy_list:
self.observations[this_buoy] = {}
def retrieve_file_data(self):
"""
TODO: Add docstring
"""
pass
def retrieve_obs_data(self, buoy_name, var, measurement_type=None):
"""
TODO: Add docstring
"""
if not hasattr(self.model_date_mm):
print('Retrieve model data first')
return
obs_dt, obs_raw = self.database_obj.get_obs_data(buoy_name, var, self.model_date_mm[0], self.model_date_mm[1], measurement_type)
obs_depth = obs_raw[:, 0]
obs_var = obs_raw[:, 1]
self.observations[buoy_name][var] = obs_dict
def get_comp_data_interpolated(self, buoy_name, var_list):
"""
TODO: Add docstring
"""
if not hasattr(self, comp_dict):
self.comp_dict = {}
obs_dates = np.unique([this_date.date() for this_date in observations['time']])
obs_comp = {}
for this_ind, this_obs_time in enumerate(obs_dates):
if this_obs_date >= model_time_mm[0].date() and this_obs_date <= model_time_mm[1].date():
this_obs_choose = [this_time.date() == this_obs_date for this_time in self.observations[buoy_name]['dt_time']]
t
this_time_before, this_time_after = self.model_closest_both_times(this_obs_time)
this_obs_deps = self.observations[buoy_name]['depth'][this_obs_choose]
for var in var_list:
this_obs = self.observations[buoy_name][var][this_obs_choose]
this_model = np.squeeze(fvcom_data_reader.data.temp[this_time_close, ...])
this_model_interp = np.squeeze(np.interp(this_obs_deps, model_depths, this_model))
try:
obs_comp[var].append(this_comp)
except KeyError:
obs_comp[var] = this_comp
max_obs_depth.append(np.max(this_obs_deps))
self.comp_dict[buoy_name] = obs_comp
def comp_data_nearest(self, buoy_name, var_list):
"""
TODO: Add docstring and code!
"""
pass
def model_closest_time(self):
"""
TODO: Add docstring and code!
"""
pass
class CompareDataFileReader(CompareData):
def retrieve_file_data(self):
"""
TODO: Add docstring
"""
where_str = 'buoy_name in ('
for this_buoy in self.buoy_list:
where_str+=this_buoy + ','
where_str = where_str[0:-1] + ')'
buoy_lat_lons = self.wco_database.select_query('sites', where_str, 'buoy_name, lon, lat')
first_file = True
model_all_dicts = {}
for this_file in self.file_list:
if first_file:
fvcom_data_reader = FileReader(this_file, ['temp', 'salinity'])
close_node = []
for this_buoy_ll in buoy_lat_lons:
close_node.append()
model_depths = fvcom_data_reader.grid.siglay * fvcom_data_reader.grid.h * -1
for this_buoy in self.buoy_list:
model_all_dicts[this_buoy] = {'dt_time': mod_times, 'temp': mod_t_vals, 'salinity': mod_s_vals}
first_file = False
else:
fvcom_data_reader = FileReader(this_file, ['temp', 'salinity'], dims={'node':[close_node]})
for this_buoy in self.buoy_list:
model_all_dicts
model_depths = fvcom_data_reader.grid.siglay * fvcom_data_reader.grid.h * -1
for this_buoy in self.buoy_list:
model_all_dicts[this_buoy]['depth'] = model_dp
self.model_data[this_buoy] = model_all_dicts[this_buoy]
def model_closest_time(self, find_time):
"""
TODO: Add docstring and code!
"""
return closest_time
class CompareDataProbe(CompareData):
"""
TODO: Add docstring
"""
def retrieve_file_data(self):
"""
TODO: Add docstring
"""
for this_buoy in self.buoy_list:
t_filelist = []
s_filelist = []
for this_dir in self.model_ident:
t_filelist.append(this_dir + this_buoy + '_t1.dat')
s_filelist.append(this_dir + this_buoy + '_s1.dat')
mod_times, mod_t_vals, mod_pos = pf.read.read_probes(t_filelist, locations=True, datetimes=True)
mod_times, mod_s_vals, mod_pos = pf.read.read_probes(s_filelist, locations=True, datetimes=True)
model_dict = {'dt_time': mod_times, 'temp': mod_t_vals, 'salinity': mod_s_vals}
self.model_data[this_buoy] = model_dict
class CompareICES(object):
"""
A class for comparing FVCOM(-ERSEM) models to ICES bottle data. It is a fvcom-ised and class-ised version of code
written by Momme Butenschon for NEMO output.
The ICES data used is in a premade h5 file. This how it was inherited and should be updated to a form we can
reproduce.
Default ICES variables: 'TEMP', 'PSAL', 'DOXY(umol/l)', 'PHOS(umol/l)', 'SLCA(umol/l)', 'NTRA(umol/l)',
'AMON(umol/l)', 'PHPH', 'ALKY(mmol/l)', 'CPHL(mg/m^3)'
Example
-------
from PyFVCOM.validation import ICES_comp
import matplotlib.pyplot as plt
datafile="/data/euryale4/backup/momm/Data/ICES-data/CTD-bottle/EX187716.averaged.sorted.reindexed.h5"
modelroot="/data/euryale2/scratch/mbe/Models_2/FVCOM/rosa/output"
years=[2005]
months = [2, 3]
modelfile=lambda y, m: "{}/{}/{:02d}/aqua_v16_0001.nc".format(modelroot, y, m)
modelfilelist = [modelfile(years[0], this_month) for this_month in months]
test_comp = ICES_comp(modelfilelist, datafile, noisy=True)
temp_ices, temp_model = test_comp.get_var_comp('TEMP')
plt.scatter(temp_model, temp_ices)
plt.xlabel('Modelled temperature')
plt.ylabel('Observed temperature')
To Do
-----
Make script to generate the ICES datafile
Parallelise
Add plotting
Change null value to non numeric
"""
def __init__(self, modelfilelist, ices_hdf_file, var_list=None, daily_avg=False, noisy=False):
"""
Retrieves the ICES data from the timeperiod of the model run and from within its bounding polygon, then for
each observation retrieves the nearest model output in space and time. These data are held in dicts self.ices_data
and self.model_data, with keys corresponding to the ICES variable names.
Parameters
----------
modelfilelist : list-like
List of strings of the locations of . It is assumed the files are in sequential time order.
ices_hdf_file : string
The path to the hdf file of ICES data.
var_list : list-like, optional
The variables for comparison (ICES names). If not specified then defaults to all available (see self._add_default_varlist)
daily_avg : boolean, optional
Set to true if comparing daily averaged FVCOM output
noisy : boolean, optional
Output progress strings
"""
self.model_files = modelfilelist
self.ices_file = ices_hdf_file
self.daily_avg = daily_avg
self._add_ICES_model_varnames()
self._add_data_dicts()
self.noisy = noisy
if var_list:
self.var_keys = var_list
else:
self._add_default_varlist()
model_varkeys = []
for this_key in self.var_keys:
this_model_key = self.ices_model_conversion[this_key]
if "+" in this_model_key:
vlist = this_model_key.split('+')
for v in vlist:
model_varkeys.append(v)
else:
model_varkeys.append(this_model_key)
self.model_varkeys = model_varkeys
self.zeta_filereader = FileReader(self.model_files[0], ['zeta'])
if len(self.model_files) > 1:
for this_file in self.model_files[1:]:
self.zeta_filereader = FileReader(this_file, ['zeta']) >> self.zeta_filereader
self.lon_mm = [np.min(self.zeta_filereader.grid.lon), np.max(self.zeta_filereader.grid.lon)]
self.lat_mm = [np.min(self.zeta_filereader.grid.lat), np.max(self.zeta_filereader.grid.lat)]
bn_list = get_boundary_polygons(self.zeta_filereader.grid.triangles)[0] # Assumes first poly is outer boundary
self.bnd_poly = mplPath.Path(np.asarray([self.zeta_filereader.grid.lon[bn_list], self.zeta_filereader.grid.lat[bn_list]]).T)
self._ICES_dataget()
self._model_dataget()
def get_var_comp(self, var, return_locs_depths_dates = False):
"""
Retreive the comparison data for a single variable
Parameters
----------
var : string
The variable to return, this must be the same as something in the self.var_keys (i.e. the ICES name)
return_locs_depths_dates : boolean, optional
As well as data return the locations, depths, and dates of the ICES observations to allow subsetting of results
Returns
-------
ices_data : array or dict
array of observations or (if return_locs_depths_dates) a dict with observations, depths, dates, and locations
model_data : array
array of nearest model data corresponding to observations
"""
if var not in self.var_keys:
print('Variable not in retrieved data')
return None, None
ices_data = np.asarray(self.ices_data[var])
model_data = np.asarray(self.model_data[var])
remove_data = model_data < -100
ices_data = ices_data[~remove_data]
model_data = model_data[~remove_data]
if return_locs_depths_dates:
ices_dates = np.asarray(self.ices_data['time_dt'])[~remove_data]
ices_depths = np.asarray(self.ices_data['z'])[~remove_data]
ices_lon = np.asarray(self.ices_data['lon'])[~remove_data]
ices_lat = np.asarray(self.ices_data['lat'])[~remove_data]
ices_data = {var: ices_data, 'depths': ices_depths, 'dates': ices_dates, 'lon': ices_lon, 'lat': ices_lat}
return ices_data, model_data
def _ICES_dataget(self):
"""
TODO: Add docstring
"""
# Read the ICES datafile
df = read_hdf(self.ices_file, "df")
start_step_len = 1000000
end_step_len = 10
start_index = 0
# The dataframe is huge so skip through to the approriate start point
while start_step_len >= end_step_len:
start_index = self._year_start(np.min(self.zeta_filereader.time.datetime).year, start_index, start_step_len, df)
start_step_len = start_step_len/10
df = df[int(start_index):]
for n, sample in df.iterrows():
if self.noisy:
print('ICES sample {}'.format(n))
h = int(np.floor(sample['Hr']/100))
sample_dt = dt.datetime(int(sample['Year']), int(sample['Mnth']), int(sample['Dy']), h, int(sample['Hr'] - h * 100))
if sample_dt > np.max(self.zeta_filereader.time.datetime):
break
if self.lon_mm[0]<=sample['Longdeg']<=self.lon_mm[1] and self.lat_mm[0]<=sample['Latdeg']<=self.lat_mm[1] and \
sample_dt >= np.min(self.zeta_filereader.time.datetime):
if self.bnd_poly.contains_point(np.asarray([sample['Longdeg'], sample['Latdeg']])):
node_ind = self.zeta_filereader.closest_node([sample['Longdeg'], sample['Latdeg']], haversine=True)
if self.daily_avg: # For daily averages match by day, otherwise use nearest time
sample_dt = dt.datetime(int(sample['Year']), int(sample['Mnth']), int(sample['Dy']))
model_time_ind = self.zeta_filereader.closest_time(sample_dt)
model_dt = self.zeta_filereader.time.datetime[model_time_ind]
sample_depth=sample['d/p']
this_depth = self.zeta_filereader.grid.h[node_ind] + self.zeta_filereader.data.zeta[model_time_ind, node_ind]
dep_layers = this_depth * -1 * self.zeta_filereader.grid.siglay[:, node_ind]
z_ind = self._checkDepth(sample_depth, dep_layers)
if z_ind>=0 and self._checkSample(sample):
self._addICESsample(sample, sample_dt, model_dt, node_ind, z_ind)
def _addICESsample(self, sample, sample_dt, model_dt, node_ind, z_ind):
"""
TODO: Add docstring
"""
self.ices_data['time_dt'].append(sample_dt)
self.model_data['time_dt'].append(model_dt)
self.ices_data['lat'].append(sample['Latdeg'])
self.ices_data['lon'].append(sample['Longdeg'])
self.ices_data['z'].append(sample["d/p"])
self.model_data['node_ind'].append(node_ind)
self.model_data['z_ind'].append(z_ind)
for key in self.var_keys:
if sample[key]<-8.999999:
mvalue=-1.e15
dvalue=-1.e15
else:
dvalue=sample[key]
mvalue=-1.e10
self.ices_data[key].append(dvalue)
self.model_data[key].append(mvalue)
if 'NTRA(umol/l)' in self.var_keys:
if sample['NTRA(umol/l)']>-8.999999 and sample['NTRI(umol/l)']>-8.999999:
dvalue=sample['NTRI(umol/l)']
mvalue=-1.e10
else:
dvalue=-1.e15
mvalue=-1.e15
self.ices_data['NTRI(umol/l)'].append(dvalue)
self.model_data['NTRI(umol/l)'].append(mvalue)
def _model_dataget(self):
"""
TODO: Add docstring
"""
if len(self.ices_data['time_dt']) == 0:
print('No ICES data loaded for comparison')
return
current_modelfile_ind = 0
current_modelfile_dt = [this_date.date() for this_date in FileReader(self.model_files[current_modelfile_ind]).time.datetime]
unique_obs_days = np.unique([this_date.date() for this_date in self.ices_data['time_dt']])
for counter_ind, this_day in enumerate(unique_obs_days):
if self.noisy:
print('Getting model data from day {} of {}'.format(counter_ind +1, len(unique_obs_days)))
if this_day > current_modelfile_dt[-1]:
current_modelfile_ind += 1
if current_modelfile_ind < len(self.model_files):
current_modelfile_dt = [this_date.date() for this_date in FileReader(self.model_files[current_modelfile_ind]).time.datetime]
else:
return
this_day_index = np.where(np.asarray(current_modelfile_dt) == this_day)[0]
this_day_fr = FileReader(self.model_files[current_modelfile_ind], self.model_varkeys,
dims={'time': np.arange(np.min(this_day_index), np.max(this_day_index) + 1)})
this_day_obs_inds = np.where(np.asarray([this_dt.date() for this_dt in self.ices_data['time_dt']]) == this_day)[0]
for this_record_ind in this_day_obs_inds:
for key in self.var_keys:
if self.ices_data[key][this_record_ind] >-9.99e9:
this_model_key = self.ices_model_conversion[key]
space_ind = self.model_data['node_ind'][this_record_ind]
dep_ind = self.model_data['z_ind'][this_record_ind]
time_ind = this_day_fr.closest_time(self.ices_data['time_dt'][this_record_ind])
if "+" in this_model_key:
vlist = this_model_key.split('+')
mbuffer = 0
for v in vlist:
mbuffer += getattr(this_day_fr.data, v)[time_ind, dep_ind, space_ind]
self.model_data[key][this_record_ind] = mbuffer
else:
self.model_data[key][this_record_ind] = getattr(this_day_fr.data, this_model_key)[time_ind, dep_ind, space_ind]
def _add_ICES_model_varnames(self):
"""
TODO: Add docstring
"""
self.ices_model_conversion = {'TEMP':'temp', 'PSAL':'salinity', 'PHOS(umol/l)':'N1_p', 'SLCA(umol/l)':'N5_s',
'PHPH':'O3_pH', 'ALKY(mmol/l)':'O3_TA', 'NTRA(umol/l)':'N3_n', 'AMON(umol/l)':'N4_n',
'DOXY(umol/l)':'O2_o', 'CPHL(mg/m^3)':'P1_Chl+P2_Chl+P3_Chl+P4_Chl'}
def _add_data_dicts(self):
"""
TODO: Add docstring
"""
self.ices_data = {'lat':[], 'lon':[], 'z':[], 'time_dt':[], 'NTRI(umol/l)':[]}
self.model_data = {'node_ind':[], 'z_ind':[], 'time_dt':[], 'NTRI(umol/l)':[]}
for this_key in self.ices_model_conversion:
self.ices_data[this_key] = []
self.model_data[this_key] = []
def _add_default_varlist(self):
"""
TODO: Add docstring
"""
self.var_keys = ['TEMP', 'PSAL', 'DOXY(umol/l)', 'PHOS(umol/l)', 'SLCA(umol/l)', 'NTRA(umol/l)', 'AMON(umol/l)',
'PHPH', 'ALKY(mmol/l)', 'CPHL(mg/m^3)']
def _checkSample(self, sample):
"""
TODO: Add docstring
"""
hasData=False
for key in self.var_keys:
if sample[key]>-8.999999:
hasData=True
return hasData
@staticmethod
def _checkDepth(z, dep_lays_choose):
"""
TODO: Add docstring
"""
if z>dep_lays_choose[-1]:
return -1
else:
k=((z-dep_lays_choose)**2).argmin()
return k
@staticmethod
def _year_start(year_find, start_index, step, df):
"""
TODO: Add docstring
"""
year_found = 0
this_ind = start_index
while year_found == 0:
this_year = df.loc[this_ind]['Year']
if this_year >=year_find:
year_found = 1
return_step = this_ind - step
this_ind = this_ind + step
return int(return_step)
| pwcazenave/PyFVCOM | PyFVCOM/validation.py | Python | mit | 53,848 |
import os
import psycopg2
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class GeoLoc(Base):
'''
Definition of geographical location.
'''
__tablename__ = 'geoloc'
name = Column(String, primary_key=True)
lat = Column(Float, nullable=False)
lon = Column(Float, nullable=False)
def __repr__(self):
return "%s at (%f, %f)"%(self.name, self.lat, self.lon)
def __create_session():
db_engine = create_engine(os.environ["DATABASE_URL"])
Base.metadata.create_all(db_engine)
Session = sessionmaker(bind=db_engine, autocommit=True)
return Session()
Session = __create_session() | eiva/nadmozg | utils/Db.py | Python | mit | 822 |
"""Classes and methods for working with the physical Mebo robot"""
import logging
import socket
import sys
import time
from abc import ABC
from collections import namedtuple
from functools import partial
from ipaddress import (
AddressValueError,
IPv4Network,
IPv4Address
)
from xml.etree.ElementTree import fromstring as xmlfromstring
from requests import Session
from requests.exceptions import (
ConnectionError,
HTTPError
)
from zeroconf import ServiceBrowser, Zeroconf
from .exceptions import (
MeboCommandError,
MeboDiscoveryError,
MeboRequestError,
MeboConnectionError,
MeboConfigurationError
)
from mebo.stream.session import (
RTSPSession,
)
logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
Broadcast = namedtuple('Broadcast', ['ip', 'port', 'data'])
WirelessNetwork = namedtuple('WirelessNetwork', ['ssid', 'mac', 'a', 'q', 'si', 'nl', 'ch'])
NORTH = 'n'
NORTH_EAST = 'ne'
EAST = 'e'
SOUTH_EAST = 'se'
SOUTH = 's'
SOUTH_WEST = 'sw'
WEST = 'w'
NORTH_WEST = 'nw'
DIRECTIONS = {NORTH, NORTH_EAST, EAST, SOUTH_EAST, SOUTH, SOUTH_WEST, WEST, NORTH_WEST}
class _MeboMDNSListener:
def remove_service(self, zeroconf, type, name):
logging.debug("Service %s removed", name)
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
logging.debug("Service %s added, service info: %s", name, info)
class ComponentFactory:
"""Factory class for generating classes of components"""
@classmethod
def _from_parent(cls, name, **actions):
"""Generates a class of the given type as a subclass of component
:param name: Name of the generated class
:type name: str
:param actions: action names-> `callables`, which are closures using the parents shared request infrastructure
:type actions: dict
"""
cls = type(name, (Component,), actions)
cls.__doc__ = f"""{name.upper()} Component"""
return cls(actions=actions.keys())
class Component(ABC):
"""Abstract base class for all robot components"""
def __init__(self, actions):
self.actions = actions
def __repr__(self):
return '<{} actions={}>'.format(self.__class__, self.actions)
class Mebo:
"""Mebo represents a single physical robot"""
# port used by mebo to broadcast its presence
BROADCAST_PORT = 51110
# port used to establish media (RTSP) sessions
RTSP_PORT = 6667
def __init__(self, ip=None, auto_connect=False):
"""Initializes a Mebo robot object and establishes an http connection
If no ip or network is supplied, then we will autodiscover the mebo using mDNS.
:param ip: IPv4 address of the robot as a string.
:type ip: str
:param auto_connect: if True, will autodiscover and connect at object creation time
:type auto_connect: bool
>>> m = Mebo()
>>> m.connect()
>>> m2 = Mebo(auto_connect=True)
"""
self._session = Session()
self._ip = None
if ip:
self.ip = ip
elif auto_connect:
self.connect()
self._arm = None
self._wrist = None
self._claw = None
self._speaker = None
self._rtsp_session = None
@property
def endpoint(self):
return 'http://{}'.format(self.ip)
@property
def ip(self):
"""The IP of the robot on the LAN
This value is either provided explicitly at creation time or autodiscovered via mDNS
"""
if self._ip is None:
raise MeboConfigurationError('No configured or discovered value for ip address')
return self._ip
@ip.setter
def ip(self, value):
try:
addr = IPv4Address(value)
self._ip = addr
except AddressValueError:
raise MeboConfigurationError(f'Value {addr} set for IP is invalid IPv4 Address')
@property
def media(self):
"""an rtsp session representing the media streams (audio and video) for the robot"""
if self._rtsp_session is None:
url = f'rtsp://{self.ip}/streamhd/'
self._rtsp_session = RTSPSession(
url,
port=self.RTSP_PORT,
username='stream',
realm='realm',
user_agent='python-mebo'
)
return self._rtsp_session
# TODO: rip this out, or change it to a hearbeat
# listener which gets established, this has nothing to do with discovery
# instead, use mDNS
def _get_broadcast(self, address, timeout=10):
""" Attempts to receive the UDP broadcast signal from Mebo
on the supplied address. Raises an exception if no data is received
before 'timeout' seconds.
:param address: the broadcast address to bind
:param timeout: how long the socket should wait without receiving data before raising socket.timeout
:returns: A Broadcast object that containing the source IP, port, and data received.
:raises: `socket.timeout` if no data is received before `timeout` seconds
"""
logging.debug(f"reading from: {address}:{Mebo.BROADCAST_PORT}")
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.bind((address, Mebo.BROADCAST_PORT))
s.settimeout(timeout)
data, source = s.recvfrom(4096)
logging.debug(f"Data received: {data}:{source}")
return Broadcast(source[0], source[1], data)
def _setup_video_stream(self):
self._request(req='feedback_channel_init')
self._request(req='set_video_gop', value=40)
self._request(req='set_date', value=time.time())
self._request(req='set_video_gop', value=40, speed=1)
self._request(req='set_video_gop', value=40, speed=2)
self._request(req='set_video_bitrate', value=600)
self._request(req='set_video_bitrate', value=600, speed=1)
self._request(req='set_video_bitrate', value=600, speed=2)
self._request(req='set_resolution', value='720p')
self._request(req='set_resolution', value='720p', speed=1)
self._request(req='set_resolution', value='720p', speed=2)
self._request(req='set_video_qp', value=42)
self._request(req='set_video_qp', value=42, speed=1)
self._request(req='set_video_qp', value=42, speed=2)
self._request(req='set_video_framerate', value=20)
self._request(req='set_video_framerate', value=20, speed=1)
self._request(req='set_video_framerate', value=20, speed=2)
def _get_stream(self, address, timeout=10):
pass
def _get_mdns(self, key="_camera._tcp.local."):
try:
zeroconf = Zeroconf()
listener = _MeboMDNSListener()
browser = ServiceBrowser(zeroconf, key, listener)
time.sleep(1)
for name, record in browser.services.items():
info = zeroconf.get_service_info(record.key, record.alias)
# note: the library we're using keeps these keys and values as bytes
return info.properties[b'ip'].decode('ascii')
finally:
zeroconf.close()
def _discover(self):
"""
Runs the discovery scan to find Mebo on your LAN
:returns: The IP address of the Mebo found on the LAN
:raises: `MeboDiscoveryError` if discovery times out or the API probe produces a 40X or 50x status code
"""
try:
logging.debug('Looking for Mebo...')
ip = self._get_mdns()
return ip
except socket.timeout:
raise MeboDiscoveryError(('Unable to locate Mebo on the network.\n'
'\tMake sure it is powered on and connected to LAN.\n'
'\tIt may be necessary to power cycle the Mebo.'))
def connect(self):
"""Connect to the mebo control server over HTTP
If no IP exists for the robot already, the IP will be autodiscovered via mDNS. When there is already an IP, that will be used to make a canary request to get the command server software version. If the robot has been previously connected, no request is made at all.
:raises: :class:`mebo.exceptions.MeboDiscoveryError` when a mDNS discovery fails
:raises: :class:`mebo.exceptions.MeboConnectionError` when a TCP ConnectionError or HTTPError occurs
"""
if self._ip is None:
self.ip = self._discover()
logging.debug(f'Mebo found at {self.ip}')
try:
version = self.version
logging.debug(f'Mebo {version} connected')
except (ConnectionError, HTTPError) as e:
raise MeboConnectionError(f'Unable to connect to mebo: {e}')
def _request(self, **params):
""" private function to submit HTTP requests to Mebo's API
:param params: arguments to pass as query params to the Mebo API. Might
also include `need_response`, a kwarg dictating whether the caller
requires the response object from the API.
:returns: The `requests.HTTPResponse` object if `need_response` is True, `None` otherwise.
"""
try:
need_response = params.pop('need_response')
except KeyError:
# by default, don't return a response
need_response = False
try:
response = self._session.get(self.endpoint, params=params)
response.raise_for_status()
if need_response:
return response
except (ConnectionError, HTTPError) as e:
raise MeboRequestError(f'Request to Mebo failed: {e}')
def visible_networks(self):
"""
Retrieves list of wireless networks visible to Mebo.
:returns: A dictionary of name to `WirelessNetwork`
>>> m = Mebo(auto_connect=True)
>>> print(m.visible_networks())
"""
resp = self._request(req='get_rt_list', need_response=True)
et = xmlfromstring(f'{resp.text}')
visible = {}
for nw in et.findall('w'):
visible[nw.find('s').text.strip('"')] = WirelessNetwork(*(i.text.strip('"') for i in nw.getchildren()))
return visible
def add_router(self, auth_type, ssid, password, index=1):
"""
Save a wireless network to the Mebo's list of routers
"""
self._request(req='setup_wireless_save', auth=auth_type, ssid=ssid, key=password, index=index)
def set_scan_timer(self, value=30):
self._request(req='set_scan_timer', value=value)
def restart(self):
self._request(req='restart_system')
def set_timer_state(self, value=0):
self._request(req='set_timer_state', value=value)
def get_wifi_cert(self):
resp = self._request(req='get_wifi_cert', need_response=True)
_, cert_type = resp.text.split(':')
return cert_type.strip()
def get_boundary_position(self):
""" Gets boundary positions for 4 axes:
Arm: s_up, s_down
Claw: c_open, c_close
Wrist Rotation: w_left & w_right
Wrist Elevation: h_up, h_down
:returns: dictionary of functions to boundary positions
"""
resp = self._request(req='get_boundary_position', need_response=True)
_, key_value_string = [s.strip() for s in resp.text.split(':')]
return dict((k, int(v)) for k, v in [ks.strip().split('=') for ks in key_value_string.split('&')])
@property
def version(self):
"""returns the software version of the robot
>>> m = Mebo(auto_connect=True)
>>> m.version == '03.02.37'
"""
if not hasattr(self, '_version') or self._version is None:
resp = self._request(req='get_version', need_response=True)
_, version = resp.text.split(':')
self._version = version.strip()
return self._version
def move(self, direction, speed=255, dur=1000):
"""Move the robot in a given direction at a speed for a given duration
:param direction: map direction to move. 'n', 'ne', 'nw', etc.
:param speed: a value in the range [0, 255]. default: 255
:param dur: number of milliseconds the wheels should spin. default: 1000
"""
directions = {
NORTH: 'move_forward',
NORTH_EAST: 'move_forward_right',
EAST: 'move_right',
SOUTH_EAST: 'move_backward_right',
SOUTH: 'move_backward',
SOUTH_WEST: 'move_backward_left',
WEST: 'move_left',
NORTH_WEST: 'move_forward_left'
}
direction = directions.get(direction.lower())
if direction is None:
raise MeboCommandError(
'Direction must be one of the map directions: {}'.format(directions.keys()
))
speed = min(speed, 255)
# there is also a ts keyword that could be passed here.
self._request(req=direction, dur=dur, value=speed)
def turn(self, direction):
"""Turns a very small amount in the given direction
:param direction: one of R or L
"""
direction = direction.lower()[0]
if direction not in {"r", "l"}:
raise MeboCommandError('Direction for turn must be either "right", "left", "l", or "r"')
call = 'inch_right' if direction == 'r' else 'inch_left'
self._request(req=call)
def stop(self):
self._request(req='fb_stop')
@property
def claw(self):
""" The claw component at the end of Mebo's arm
>>> m = Mebo(auto_connect=True)
>>> m.claw.open(dur=1000, **params)
>>> m.claw.close(dur=400, **params)
>>> m.claw.stop(**params)
"""
if self._claw is None:
claw = ComponentFactory._from_parent(
'Claw',
open=partial(self._request, req='c_open'),
close=partial(self._request, req='c_close'),
stop=partial(self._request, req='c_stop')
)
self._claw = claw
return self._claw
@property
def wrist(self):
"""The wrist component of the robot
The wrist component has the following actions:
* rotate clockwise (to the right from the robot's perspective) OR counter-clockwise (to the left from the robot's perspective)
* raise or lower
>>> m = Mebo()
>>> m.wrist.rotate_right(**params)
>>> m.wrist.rotate_left(**params)
>>> m.wrist.inch_right(**params)
>>> m.wrist.inch_left(**params)
>>> m.wrist.rotate_stop()
>>> m.wrist.up()
>>> m.wrist.down()
>>> m.wrist.lift_stop()
"""
if self._wrist is None:
wrist = ComponentFactory._from_parent(
'Wrist',
rotate_right=partial(self._request, req='w_right'),
inch_right=partial(self._request, req='inch_w_right'),
rotate_left=partial(self._request, req='w_left'),
inch_left=partial(self._request, req='inch_w_left'),
rotate_stop=partial(self._request, req='w_stop'),
up=partial(self._request, req='h_up'),
down=partial(self._request, req='h_down'),
lift_stop=partial(self._request, req='h_stop'),
)
self._wrist = wrist
return self._wrist
@property
def arm(self):
"""The arm component of mebo
>>> m = Mebo(auto_connect=True)
>>> m.arm.up(dur=1000, **params)
>>> m.arm.down(dur=1000, **params)
>>> m.arm.stop(**params)
"""
up = partial(self._request, req='s_up')
up.__doc__ = """Move the arm up
:param dur: The duration of the arm movement
:type dur: int
"""
down = partial(self._request, req='s_down')
down.__doc__ = """Move the arm down
:param dur: The duration of the arm movement
:type dur: int
"""
stop = partial(self._request, req='s_stop')
stop.__doc__ = """Stop the arm"""
if self._arm is None:
arm = ComponentFactory._from_parent(
'Arm',
up=up,
down=down,
stop=stop
)
self._arm = arm
return self._arm
@property
def speaker(self):
"""
>>> m = Mebo(auto_connect=True)
>>> m.speaker.set_volume(value=6)
>>> m.speaker.get_volume()
>>> m.speaker.play_sound(**params)
"""
if self._speaker is None:
speaker = ComponentFactory._from_parent(
'Speaker',
set_volume=partial(self._request, req='set_spk_volume'),
play_sound=partial(self._request, req='audio_out0'))
self._speaker = speaker
return self._speaker
| crlane/python-mebo | mebo/robot.py | Python | mit | 17,041 |
"""
Provides the :class:`Model` plugin for two-way data binding between
context and html input elements.
"""
from browser import timer
try:
from ..expression import parse
from ..tpl import _compile, register_plugin
except:
from circular.template.expression import parse
from circular.template.tpl import _compile, register_plugin
from .tag import TagPlugin
class Model(TagPlugin):
"""
The model plugin binds an input element value to a context variable (or a simple expression).
For example the template:
```
Hello {{ name }}
<input model='name' type='textinput' />
```
when bound to a context would update the value of the variable `name` in the context
whenever a user typed (or pasted in) a new value on the input field. The plugin takes
two optional parameters: `update_event` and `update_interval`. When `update_event` is
set, the context is updated whenever the given event is fired on the input element.
The default is to update whenever the event `input` is fired. When `update_interval`
is set to number, the context is updated at most once per the given number of milliseconds.
WARNING: The Model plugin must come before any plugin which creates more than one
element (e.g. the `For` plugin).
"""
def __init__(self, tpl_element, model=None, update_interval=None, update_event='input'):
super().__init__(tpl_element)
self._model_change_timer = None
self._input_change_timer = None
self.element = None
self._ctx = None
if isinstance(tpl_element, Model):
# pylint: disable=protected-access; we are cloning Model, we can access protected variables
self._update_event = tpl_element._update_event
self._update_interval = tpl_element._update_interval
self._model = tpl_element.model.clone()
self.child = tpl_element.child.clone()
else:
self._update_event = update_event
if update_interval is not None:
self._update_interval = int(update_interval)
else:
self._update_interval = None
self._model, _ = parse(model)
self.child = _compile(tpl_element)
assert self._model.is_assignable(), "The expression "+model+" does not support assignment"
if self._update_interval:
self._model.bind('change', self._defer_model_change)
else:
self._model.bind('change', self._model_change)
self.child.bind('change', self._subtree_change_handler)
def bind_ctx(self, ctx):
self.element = self.child.bind_ctx(ctx)
self._model.bind_ctx(ctx)
self.element.value = self._model.value
if self._update_interval:
self.element.bind(self._update_event, self._defer_input_change)
else:
self.element.bind(self._update_event, self._input_change)
super().bind_ctx(ctx)
return self.element
def _defer_model_change(self, _event):
if self._model_change_timer is None:
self._model_change_timer = timer.set_interval(
self._model_change, self._update_interval)
def _defer_input_change(self, _event):
if self._input_change_timer is None:
self._input_change_timer = timer.set_interval(
self._input_change, self._update_interval)
def _model_change(self, event=None):
if self._model_change_timer:
timer.clear_interval(self._model_change_timer)
self._model_change_timer = None
if self.element:
if 'value' in event.data:
new_val = event.data['value']
else:
new_val = self._model.value
if not self.element.value == new_val:
self.element.value = new_val
def _input_change(self, _event=None):
if self.element:
if self._model.value != self.element.value:
if self._input_change_timer:
timer.clear_interval(self._input_change_timer)
self._input_change_timer = None
self._model.value = self.element.value
def __repr__(self):
return "<Model " + repr(self._model) + \
" (" + str(self._model.value) + ")>"
register_plugin(Model)
| jonathanverner/circular | src/circular/template/tags/model.py | Python | mit | 4,421 |
#!/usr/bin/env python
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.utils import *
p = Bunch(default_parameters)
def Varscan_somatic(sample, tumor, normal, Varscan_somatic_flag):
'''Runs Varscan2 on paired tumor/normal samples to detect somatic point mutations in cancer genomes.
input:
.pileup
output:
_varscan_somatic.vcf.gz
citation:
parameters from parameters file:
ALIGNMENT_DIR:
TEMP_DIR:
VARSCAN_VERSION:
GENOME:
CAPTURE_KIT_BED:
'''
spawn_job(jobname = 'Varscan_somatic', SAMPLE = sample + tumor + normal, LOG_PATH = p.OMICSPIPE["LOG_PATH"], RESULTS_EMAIL = p.OMICSPIPE["EMAIL"], SCHEDULER = p.OMICSPIPE["SCHEDULER"], walltime = p.VARSCAN["WALLTIME"], queue = p.OMICSPIPE["QUEUE"], nodes = p.VARSCAN["NODES"], ppn = p.VARSCAN["CPU"], memory = p.VARSCAN["MEMORY"], script = "/Varscan_somatic.sh", args_list = [sample, p.VARSCAN["VERSION"], p.VARSCAN["R_VERSION"], p.VARSCAN["VCFTOOLS_VERSION"], p.VARSCAN["SOMATIC_RESULTS"], p.VARSCAN["ALIGNMENT_DIR"], normal, tumor, p.VARSCAN["VCFLIB_VERSION"]])
job_status(jobname = 'Varscan_somatic', resultspath = p.VARSCAN["RESULTS"] + "/" + sample, SAMPLE = sample, outputfilename = sample + tumor + normal + "_varscan_somatic.vcf.gz", FLAG_PATH = p.OMICSPIPE["FLAG_PATH"])
return
if __name__ == '__main__':
Varscan_somatic(sample, tumor, normal, Varscan_somatic_flag)
sys.exit(0)
| adammaikai/PipeMaster | modules/Varscan_somatic.py | Python | mit | 1,547 |
Subsets and Splits