repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
hall1467/wikidata_usage_tracking | wbc_usage/utilities/determine_wikis.py | 1 | 2123 | """
Prints all wikis to stdout.
Usage:
determine_wikis (-h|--help)
determine_wikis [--debug]
[--verbose]
Options:
-h, --help This help message is printed
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import logging
import mwapi
import sys
import json
import docopt
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.WARNING if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
verbose = args['--verbose']
run(verbose)
# Contacts API to return list of wikis
# Code credit: https://github.com/WikiEducationFoundation/academic_classification/blob/master/pageclassifier/revgather.py
def run(verbose):
session = mwapi.Session(
'https://en.wikipedia.org',
user_agent='hall1467'
)
results = session.get(
action='sitematrix'
)
for database_dictionary in extract_query_results(results):
if verbose:
sys.stderr.write("Printing json for the database: " +
database_dictionary['dbname'] + "\n")
sys.stderr.flush()
sys.stdout.write(json.dumps(database_dictionary) + "\n")
# Code credit: https://github.com/WikiEducationFoundation/academic_classification/blob/master/pageclassifier/revgather.py
def extract_query_results(results):
results = results['sitematrix']
for entry in results:
if entry == 'count':
continue
if entry == 'specials':
for special_entry in results[entry]:
yield ({
"dbname" : special_entry['dbname'],
"wikiurl" : special_entry['url']
})
continue
for wiki in results[entry]['site']:
yield {
"dbname" : wiki['dbname'],
"wikiurl" : wiki['url']
}
| mit | 7,319,689,185,539,322,000 | 25.5375 | 121 | 0.563354 | false | 4.179134 | false | false | false |
nboley/grit | grit/simulator/reads_simulator.py | 1 | 21238 | """
Copyright (c) 2011-2015 Nathan Boley
This file is part of GRIT.
GRIT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GRIT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GRIT. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import os.path
import numpy
import pickle
import pysam
import math
from random import random
from collections import defaultdict
import tempfile
DEFAULT_QUALITY_SCORE = 'r'
DEFAULT_BASE = 'A'
DEFAULT_FRAG_LENGTH = 150
DEFAULT_READ_LENGTH = 100
DEFAULT_NUM_FRAGS = 100
NUM_NORM_SDS = 4
FREQ_GTF_STRINGS = [ 'freq', 'frac' ]
# add slide dir to sys.path and import frag_len mod
#sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), ".." ))
sys.path.insert(0, "/home/nboley/grit/grit/")
import grit.frag_len as frag_len
from grit.files.gtf import load_gtf
from grit.files.reads import clean_chr_name
def fix_chr_name(x):
return "chr" + clean_chr_name(x)
def get_transcript_sequence(transcript, fasta):
""" get the mRNA sequence of the transcript from the gene seq
"""
trans_seq = []
for start, stop in transcript.exons:
seq = fasta.fetch(fix_chr_name(transcript.chrm), start, stop+1)
trans_seq.append( seq.upper() )
trans_seq = "".join(trans_seq)
return trans_seq
def get_cigar( transcript, start, stop ):
"""loop through introns within the read and add #N to the cigar for each
intron add #M for portions of read which map to exons
"""
def calc_len(interval):
return interval[1]-interval[0]+1
cigar = []
# find the exon index of the start
genome_start = transcript.genome_pos(start)
start_exon = next(i for i, (e_start, e_stop) in enumerate(transcript.exons)
if genome_start >= e_start and genome_start <= e_stop)
genome_stop = transcript.genome_pos(stop-1)
stop_exon = next(i for i, (e_start, e_stop) in enumerate(transcript.exons)
if genome_stop >= e_start and genome_stop <= e_stop)
if start_exon == stop_exon:
return "%iM" % (stop-start)
tl = 0
# add the first overlap match
skipped_bases = sum(calc_len(e) for e in transcript.exons[:start_exon+1])
cigar.append("%iM" % (skipped_bases-start))
tl += skipped_bases-start
# add the first overlap intron
cigar.append("%iN" % calc_len(transcript.introns[start_exon]))
# add the internal exon and intron matches
for i in xrange(start_exon+1, stop_exon):
cigar.append("%iM" % calc_len(transcript.exons[i]))
cigar.append("%iN" % calc_len(transcript.introns[i]))
tl += calc_len(transcript.exons[i])
# add the last overlap match
skipped_bases = sum(e[1]-e[0]+1 for e in transcript.exons[:stop_exon])
cigar.append("%iM" % (stop-skipped_bases))
tl += stop - skipped_bases
assert tl == (stop-start)
return "".join(cigar)
def build_sam_line( transcript, read_len, offset, read_identifier, quality_string ):
"""build a single ended SAM formatted line with given inforamtion
"""
# set flag to indcate strandedness of read matching that of the transcript
flag = 0
if transcript.strand == '+': flag += 16
# adjust start position to correct genomic position
start = transcript.genome_pos(offset)
# set cigar string corresponding to transcript and read offset
cigar = get_cigar( transcript, offset, (offset + read_len) )
# calculate insert size by difference of genomic offset and genomic offset+read_len
insert_size = transcript.genome_pos(offset+read_len) - transcript.genome_pos(offset)
# get slice of seq from transcript
seq = ( transcript.seq[ offset : (offset + read_len) ]
if transcript.seq != None else '*' )
# initialize sam lines with read identifiers and then add appropriate fields
sam_line = '\t'.join( (
read_identifier, str( flag ), fix_chr_name(transcript.chrm),
str(start+1),
'255', cigar, "*", '0', str( insert_size ), seq, quality_string,
"NM:i:0", "NH:i:1" ) ) + "\n"
return sam_line
def build_sam_lines( transcript, read_len, frag_len, offset,
read_identifier, read_quals ):
"""build paired end SAM formatted lines with given information
"""
# set ordered quals and reverse the qualities for the read on the negative strand
ordered_quals = read_quals
# determine whether read1 should be the 5' read or visa verses
# and initialize attributes that are specific to a read number
# instead of 5' or 3' attribute
if transcript.strand == '+':
up_strm_read, dn_strm_read = (0, 1)
flag = [ 99, 147 ]
ordered_quals[1] = ordered_quals[1][::-1]
else:
up_strm_read, dn_strm_read = (1, 0)
flag = [ 83, 163 ]
ordered_quals[0] = ordered_quals[0][::-1]
# get slice of seq from transcript
seq = ['*', '*']
if transcript.seq != None:
seq[ up_strm_read ] = transcript.seq[offset:(offset + read_len)]
seq[ dn_strm_read ] = transcript.seq[
(offset + frag_len - read_len):(offset + frag_len)]
# adjust five and three prime read start positions to correct genomic positions
start = [ transcript.start, transcript.start ]
start[ up_strm_read ] = transcript.genome_pos(offset)
start[ dn_strm_read ] = transcript.genome_pos(offset + frag_len - read_len)
# set cigar string for five and three prime reads
cigar = [ None, None ]
cigar[ up_strm_read ] = get_cigar( transcript, offset, (offset+read_len) )
cigar[ dn_strm_read ] = get_cigar(
transcript, (offset+frag_len-read_len), (offset + frag_len))
# calculate insert size by difference of the mapped start and end
insert_size = (
transcript.genome_pos(offset+read_len) - transcript.genome_pos(offset))
insert_size = [ insert_size, insert_size ]
insert_size[ dn_strm_read ] *= -1
# initialize sam lines with read identifiers and then add appropriate fields
sam_lines = [ read_identifier + '\t', read_identifier + '\t' ]
for i in (0,1):
other_i = 0 if i else 1
sam_lines[i] += '\t'.join( (
str( flag[i] ), fix_chr_name(transcript.chrm),
str( start[i]+1 ),"255",
cigar[i], "=", str( start[other_i]+1 ), str( insert_size[i] ),
seq[i], ordered_quals[i], "NM:i:0", "NH:i:1" ) ) + "\n"
return sam_lines
def write_fastq_lines( fp1, fp2, transcript, read_len, frag_len, offset,
read_identifier ):
"""STUB for writing fastq lines to running through alignment pipeline
"""
pass
def simulate_reads( genes, fl_dist, fasta, quals, num_frags, single_end,
full_fragment, read_len, assay='RNAseq'):
"""write a SAM format file with the specified options
"""
# global variable that stores the current read number, we use this to
# generate a unique id for each read.
global curr_read_index
curr_read_index = 1
def sample_fragment_length( fl_dist, transcript ):
"""Choose a random fragment length from fl_dist
"""
if assay == 'CAGE':
return read_len
# if the fl_dist is constant
if isinstance( fl_dist, int ):
assert fl_dist <= transcript.calc_length(), 'Transcript which ' + \
'cannot contain a valid fragment was included in transcripts.'
return fl_dist
# Choose a valid fragment length from the distribution
while True:
fl_index = fl_dist.fl_density_cumsum.searchsorted( random() ) - 1
fl = fl_index + fl_dist.fl_min
# if fragment_length is valid return it
if fl <= transcript.calc_length():
return fl
assert False
def sample_read_offset( transcript, fl ):
# calculate maximum offset
max_offset = transcript.calc_length() - fl
if assay in ('CAGE', 'RAMPAGE'):
if transcript.strand == '+': return 0
else: return max_offset
elif assay == 'RNAseq':
return int( random() * max_offset )
elif assay == 'PASseq':
if transcript.strand == '-': return 0
else: return max_offset
def get_random_qual_score( read_len ):
# if no quality score were provided
if not quals:
return DEFAULT_QUALITY_SCORE * read_len
# else return quality string from input quality file
# scores are concatenated to match read_len if necessary
else:
qual_string = ''
while len( qual_string ) < read_len:
qual_string += str( quals[ int(random() * len(quals) ) ] )
return qual_string[0:read_len]
def get_random_read_pos( transcript ):
while True:
# find a valid fragment length
fl = sample_fragment_length( fl_dist, transcript )
if (fl >= read_len) or full_fragment: break
# find a valid random read start position
offset = sample_read_offset( transcript, fl )
# get a unique string for this fragment
global curr_read_index
read_identifier = 'SIM:%015d:%s' % (curr_read_index, transcript.id)
curr_read_index += 1
return fl, offset, read_identifier
def build_random_sam_line( transcript, read_len ):
"""build a random single ended sam line
"""
fl, offset, read_identifier = get_random_read_pos( transcript )
if full_fragment:
read_len = fl
# get a random quality scores
if transcript.seq == None:
read_qual = '*'
else:
read_qual = get_random_qual_score( read_len )
# build the sam lines
return build_sam_line(
transcript, read_len, offset, read_identifier, read_qual )
def build_random_sam_lines( transcript, read_len ):
"""build random paired end sam lines
"""
fl, offset, read_identifier = get_random_read_pos( transcript )
# adjust read length so that paired end read covers the entire fragment
if full_fragment:
read_len = int( math.ceil( fl / float(2) ) )
# get two random quality scores
if transcript.seq == None:
read_quals = ['*', '*']
else:
read_quals = [ get_random_qual_score( read_len ),
get_random_qual_score( read_len ) ]
sam_lines = build_sam_lines(
transcript, read_len, fl, offset, read_identifier, read_quals )
return sam_lines
def get_fl_min():
if isinstance( fl_dist, int ):
return fl_dist
else:
return fl_dist.fl_min
def calc_scale_factor(t):
if assay in ('RNAseq',):
length = t.calc_length()
if length < fl_dist.fl_min: return 0
fl_min, fl_max = fl_dist.fl_min, min(length, fl_dist.fl_max)
allowed_fl_lens = numpy.arange(fl_min, fl_max+1)
weights = fl_dist.fl_density[
fl_min-fl_dist.fl_min:fl_max-fl_dist.fl_min+1]
mean_fl_len = float((allowed_fl_lens*weights).sum())
return length - mean_fl_len
elif assay in ('CAGE', 'RAMPAGE', 'PASseq'):
return 1.0
# initialize the transcript objects, and calculate their relative weights
transcript_weights = []
transcripts = []
contig_lens = defaultdict(int)
min_transcript_length = get_fl_min()
for gene in genes:
contig_lens[fix_chr_name(gene.chrm)] = max(
gene.stop+1000, contig_lens[fix_chr_name(gene.chrm)])
for transcript in gene.transcripts:
if fasta != None:
transcript.seq = get_transcript_sequence(transcript, fasta)
else:
transcript.seq = None
if transcript.fpkm != None:
weight = transcript.fpkm*calc_scale_factor(transcript)
elif transcript.frac != None:
assert len(genes) == 1
weight = transcript.frac
else:
weight = 1./len(gene.transcripts)
#assert False, "Transcript has neither an FPKM nor a frac"
transcripts.append( transcript )
transcript_weights.append( weight )
#assert False
assert len( transcripts ) > 0, "No valid trancripts."
# normalize the transcript weights to be on 0,1
transcript_weights = numpy.array(transcript_weights, dtype=float)
transcript_weights = transcript_weights/transcript_weights.sum()
transcript_weights_cumsum = transcript_weights.cumsum()
# update the contig lens from the fasta file, if available
if fasta != None:
for name, length in zip(fasta.references, fasta.lengths):
if fix_chr_name(name) in contig_lens:
contig_lens[fix_chr_name(name)] = max(
length, contig_lens[name])
# create the output directory
bam_prefix = assay + ".sorted"
with tempfile.NamedTemporaryFile( mode='w+' ) as sam_fp:
# write out the header
for contig, contig_len in contig_lens.iteritems():
data = ["@SQ", "SN:%s" % contig, "LN:%i" % contig_len]
sam_fp.write("\t".join(data) + "\n")
while curr_read_index <= num_frags:
# pick a transcript to randomly take a read from. Note that they
# should be chosen in proportion to the *expected number of reads*,
# not their relative frequencies.
transcript_index = \
transcript_weights_cumsum.searchsorted( random(), side='left' )
transcript = transcripts[ transcript_index ]
if single_end:
sam_line_s = build_random_sam_line( transcript, read_len )
else:
sam_line_s = build_random_sam_lines( transcript, read_len )
sam_fp.writelines( sam_line_s )
# create sorted bam file and index it
sam_fp.flush()
#sam_fp.seek(0)
#print sam_fp.read()
call = 'samtools view -bS {} | samtools sort - {}'
os.system( call.format( sam_fp.name, bam_prefix ) )
os.system( 'samtools index {}.bam'.format( bam_prefix ) )
return
def build_objs( gtf_fp, fl_dist_const,
fl_dist_norm, full_fragment,
read_len, fasta_fn, qual_fn ):
genes = load_gtf( gtf_fp )
gtf_fp.close()
def build_normal_fl_dist( fl_mean, fl_sd ):
fl_min = max( 0, fl_mean - (fl_sd * NUM_NORM_SDS) )
fl_max = fl_mean + (fl_sd * NUM_NORM_SDS)
fl_dist = frag_len.build_normal_density( fl_min, fl_max, fl_mean, fl_sd )
return fl_dist
if fl_dist_norm:
fl_dist = build_normal_fl_dist( fl_dist_norm[0], fl_dist_norm[1] )
assert fl_dist.fl_max > read_len or full_fragment, \
'Invalid fragment length distribution and read length!!!'
else:
assert read_len < fl_dist_const or full_fragment, \
'Invalid read length and constant fragment length!!!'
fl_dist = fl_dist_const
if fasta_fn:
# create indexed fasta file handle object with pysam
fasta = pysam.Fastafile( fasta_fn )
else:
fasta = None
# if quals_fn is None, quals remains empty and reads will default to
# all base qualities of DEFAULT_BASE_QUALITY_SCORE
quals = []
if qual_fn:
with open( quals_fn ) as quals_fp:
for line in quals_fp:
quals.append( line.strip() )
quals = numpy.array( quals )
return genes, fl_dist, fasta, quals
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(\
description='Produce simulated reads in a perfecty aligned BAM file.' )
# gtf is the only required argument
parser.add_argument( 'gtf', type=file, \
help='GTF file from which to produce simulated reads ' + \
'(Note: Only the first trascript from this file will ' + \
'be simulated)' )
parser.add_argument(
'--assay', choices=['RNAseq', 'RAMPAGE', 'CAGE', 'PASseq'],
default='RNAseq', help='Which assay type to simulate from' )
# fragment length distribution options
parser.add_argument( '--fl-dist-const', type=int, default=DEFAULT_FRAG_LENGTH, \
help='Constant length fragments. (default: ' + \
'%(default)s)' )
parser.add_argument( '--fl-dist-norm', \
help='Mean and standard deviation (format "mn:sd") ' + \
'used to create normally distributed fragment lengths.' )
# files providing quality and sequnce information
parser.add_argument( '--fasta', '-f', \
help='Fasta file from which to create reads ' + \
'(default: all sequences are "' + DEFAULT_BASE + \
'" * length of sequence)' )
parser.add_argument( '--quality', '-q', \
help='Flat file containing one FASTQ quality score ' + \
'per line, created with get_quals.sh. (default: ' + \
'quality strings are "' + str(DEFAULT_QUALITY_SCORE) + \
'" * length of sequence.)' )
# type and number of fragments requested
parser.add_argument(
'--num-frags', '-n', type=int, default=1000,
help='Total number of fragments to create across all trascripts')
parser.add_argument('--single-end', action='store_true', default=False,
help='Produce single-end reads.' )
parser.add_argument('--paired-end', dest='single_end', action='store_false',
help='Produce paired-end reads. (default)' )
# XXX not sure if this works
#parser.add_argument(
# '--full-fragment', action='store_true', default=False,
# help='Produce reads spanning the entire fragment.')
parser.add_argument( '--read-len', '-r', type=int, default=DEFAULT_READ_LENGTH, \
help='Length of reads to produce in base pairs ' + \
'(default: %(default)s)' )
# output options
parser.add_argument( '--out_prefix', '-o', default='simulated_reads', \
help='Prefix for output FASTQ/BAM file ' + \
'(default: %(default)s)' )
parser.add_argument( '--verbose', '-v', default=False, action='store_true', \
help='Print status information.' )
args = parser.parse_args()
# set to false, but we may want to bring this option back
args.full_fragment = False
global VERBOSE
VERBOSE = args.verbose
if args.assay == 'CAGE':
args.read_len = 28
args.single_end = True
# parse normal distribution argument
if args.fl_dist_norm:
try:
mean, sd = args.fl_dist_norm.split( ':' )
args.fl_dist_norm = [ int( mean ), int( sd ) ]
except ValueError:
args.fl_dist_norm = None
print >> sys.stderr, \
"WARNING: User input mean and sd are not formatted correctly.\n"+\
"\tUsing default values.\n"
return ( args.gtf, args.fl_dist_const, args.fl_dist_norm,
args.fasta, args.quality, args.num_frags,
args.single_end, args.full_fragment,
args.read_len, args.out_prefix, args.assay )
def main():
( gtf_fp, fl_dist_const, fl_dist_norm, fasta_fn, qual_fn,
num_frags, single_end, full_fragment, read_len, out_prefix, assay )\
= parse_arguments()
try: os.mkdir(out_prefix)
except OSError:
ofname = os.path.join(out_prefix, assay + '.sorted.bam')
if os.path.isfile(ofname):
raise OSError, "File '%s' already exists" % ofname
os.chdir(out_prefix)
genes, fl_dist, fasta, quals = build_objs(
gtf_fp, fl_dist_const,
fl_dist_norm, full_fragment, read_len,
fasta_fn, qual_fn )
"""
for gene in genes:
for t in gene.transcripts:
t.chrm = "chr" + t.chrm
print t.build_gtf_lines(gene.id, {})
assert False
"""
simulate_reads( genes, fl_dist, fasta, quals, num_frags, single_end,
full_fragment, read_len, assay=assay )
if __name__ == "__main__":
main()
| gpl-3.0 | -4,307,114,510,354,580,500 | 37.33574 | 88 | 0.584377 | false | 3.781695 | false | false | false |
Netflix-Skunkworks/iep-apps | atlas-slotting/src/scripts/lift-data.py | 1 | 4221 | #!/usr/bin/env python3
# Copyright 2014-2019 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gzip
import json
import pprint
from argparse import Namespace
from datetime import datetime
from typing import Dict, List
import boto3
import requests
import sys
from boto3.dynamodb.types import Binary
from botocore.exceptions import ClientError, ProfileNotFound
def parse_args() -> Namespace:
parser = argparse.ArgumentParser(description='Lift slotting data from Edda into DynamoDB')
parser.add_argument('--profile', type=str, required=True,
help='AWS credentials profile used to write to the Atlas Slotting DynamoDB table')
parser.add_argument('--region', type=str, nargs='+', required=True,
choices=['eu-west-1', 'us-east-1', 'us-west-1', 'us-west-2'],
help='List of AWS regions where data will be lifted from Edda into DynamoDB')
parser.add_argument('--edda_name', type=str, required=True,
help='Edda DNS name, with a region placeholder, where data will be read')
parser.add_argument('--slotting_table', type=str, required=True,
help='Atlas Slotting DynamoDB table name, where data will be written')
parser.add_argument('--app_name', type=str, nargs='+', required=True,
help='List of application names that will be lifted')
parser.add_argument('--dryrun', action='store_true', required=False, default=False,
help='Enable dryrun mode, to preview changes')
return parser.parse_args()
def get_edda_data(args: Namespace, region: str) -> List[Dict]:
url = f'http://{args.edda_name.format(region)}/api/v2/group/autoScalingGroups;_expand'
r = requests.get(url)
if not r.ok:
print(f'ERROR: Failed to load Edda data from {url}')
sys.exit(1)
else:
return [asg for asg in r.json() if asg['name'].split('-')[0] in args.app_name]
def get_ddb_table(args: Namespace, region: str):
try:
session = boto3.session.Session(profile_name=args.profile)
except ProfileNotFound:
print(f'ERROR: AWS profile {args.profile} does not exist')
sys.exit(1)
dynamodb = session.resource('dynamodb', region_name=region)
table = dynamodb.Table(args.slotting_table)
try:
table.table_status
except ClientError as e:
code = e.response['Error']['Code']
if code == 'ExpiredTokenException':
print(f'ERROR: Security token in AWS profile {args.profile} has expired')
elif code == 'ResourceNotFoundException':
print(f'ERROR: Table {args.slotting_table} does not exist in {region}')
else:
pprint.pprint(e.response)
sys.exit(1)
return table
def lift_data(args: Namespace, region: str):
asgs = get_edda_data(args, region)
table = get_ddb_table(args, region)
for asg in asgs:
item = {
'name': asg['name'],
'active': True,
'data': Binary(gzip.compress(bytes(json.dumps(asg), encoding='utf-8'))),
'timestamp': int(datetime.utcnow().timestamp() * 1000)
}
if args.dryrun:
print(f'DRYRUN: PUT {asg["name"]}')
else:
print(f'PUT {asg["name"]}')
table.put_item(Item=item)
def main():
args = parse_args()
print('==== config ====')
print(f'AWS Profile: {args.profile}')
print(f'Source Edda: {args.edda_name}')
print(f'Destination Table: {args.slotting_table}')
for region in args.region:
print(f'==== {region} ====')
lift_data(args, region)
if __name__ == "__main__":
main()
| apache-2.0 | -9,102,378,163,587,709,000 | 34.175 | 106 | 0.637764 | false | 3.758682 | false | false | false |
nemonik/CoCreateLite | ccl-cookbook/files/default/cocreatelite/cocreate/views/playgrounds.py | 1 | 5229 | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from ..models import VMPlayground
from ..forms import VMPlaygroundForm, VMPlaygroundDescriptionForm, VMPlaygroundUserAccessForm, VMPlaygroundGroupAccessForm
from . import util
from ..util import single_user_mode
"""
View controllers for playground data
"""
@single_user_mode
def index(request):
"""
Show the list of playgrounds for this user.
"""
# determine all of the playgrounds this user has access to
groupids = [group.id for group in request.user.groups.all()]
print ("Group ids: " + str(groupids))
playgrounds = VMPlayground.objects.filter(creator = request.user) | VMPlayground.objects.filter(access_users__id = request.user.id) | VMPlayground.objects.filter(access_groups__id__in = groupids)
# determine all of the demo boxes from a set of playgrounds
demos = []
for playground in playgrounds:
demos = demos + playground.getDemos()
context = {
"playgrounds": playgrounds,
"demos": demos
}
return render(request, "playgrounds.html", util.fillContext(context, request))
@single_user_mode
def add(request):
"""
Add a new playground.
"""
if request.method == 'GET':
form = VMPlaygroundForm()
elif request.method == 'POST':
form = VMPlaygroundForm(request.POST)
if form.is_valid():
# hooray, let's create the playground
playground = VMPlayground.objects.create(
name = form.data['name'],
creator = request.user,
description = form.data['description'],
description_is_markdown = form.data.get('description_is_markdown', False),
environment = form.data['environment'],
)
playground.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form}
return render(request, "addPlayground.html", util.fillContext(opts, request))
@single_user_mode
def remove(request, playground_id):
"""
Remove a playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
for sandbox in playground.sandboxes.all():
sandox.delete()
playground.delete()
return HttpResponseRedirect(reverse("playgrounds"))
@single_user_mode
def playground(request, playground_id):
"""
Show the details for this playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
opts = {"playground": playground}
return render(request, "newPlaygroundDetails.html", util.fillContext(opts, request))
@single_user_mode
def alterUserAccess(request, playground_id):
"""
Alter the access control list for a playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
if request.method == 'GET':
form = VMPlaygroundUserAccessForm(instance = playground)
elif request.method == 'POST':
form = VMPlaygroundUserAccessForm(request.POST, instance=playground)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form, "playground": playground }
return render(request, "alterPlaygroundUserAccess.html", util.fillContext(opts, request))
@single_user_mode
def alterGroupAccess(request, playground_id):
"""
Alter the access control list for a playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
if request.method == 'GET':
form = VMPlaygroundGroupAccessForm(instance = playground)
elif request.method == 'POST':
form = VMPlaygroundGroupAccessForm(request.POST, instance=playground)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form, "playground": playground }
return render(request, "alterPlaygroundGroupAccess.html", util.fillContext(opts, request))
@single_user_mode
def editDesc(request, playground_id):
"""
Alter or edit the description of the playground
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
if request.method == 'GET':
form = VMPlaygroundDescriptionForm(instance = playground)
elif request.method == 'POST':
form = VMPlaygroundDescriptionForm(request.POST)
if form.is_valid():
playground.description_is_markdown = form.data['description_is_markdown']
playground.description = form.data['description']
playground.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form, "playground": playground }
return render(request, "editPlaygroundDesc.html", util.fillContext(opts, request))
| bsd-3-clause | 5,452,664,651,074,782,000 | 32.954545 | 199 | 0.643526 | false | 3.991603 | false | false | false |
roscopecoltran/scraper | .staging/meta-engines/xlinkBook/update/spider.py | 1 | 7851 | #!/usr/bin/env python
#author: wowdd1
#mail: [email protected]
#data: 2014.12.09
import requests
import json
from bs4 import BeautifulSoup;
import os,sys
import time
import re
from all_subject import subject_dict, need_update_subject_list
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append("..")
from record import Category
class Spider:
google = None
baidu = None
bing = None
yahoo = None
db_dir = None
zh_re = None
shcool = None
subject = None
url = None
count = None
deep_mind = None
category = ''
category_obj = None
proxies = {
"http": "http://127.0.0.1:8087",
"https": "http://127.0.0.1:8087",
}
proxies2 = {
"http": "http://127.0.0.1:8787",
"https": "http://127.0.0.1:8787",
}
def __init__(self):
self.google = "https://www.google.com.hk/?gws_rd=cr,ssl#safe=strict&q="
self.baidu = "http://www.baidu.com/s?word="
self.bing = "http://cn.bing.com/search?q=a+b&go=Submit&qs=n&form=QBLH&pq="
self.yahoo = "https://search.yahoo.com/search;_ylt=Atkyc2y9pQQo09zbTUWM4CWbvZx4?p="
self.db_dir = os.path.abspath('.') + "/../" + "db/"
self.zh_re=re.compile(u"[\u4e00-\u9fa5]+")
self.school = None
self.subject = None
self.url = None
self.count = 0
self.deep_mind = False
self.category_obj = Category()
def doWork(self):
return
def requestWithProxy(self, url):
return requests.get(url, proxies=self.proxies, verify=False)
def requestWithProxy2(self, url):
return requests.get(url, proxies=self.proxies2, verify=False)
def format_subject(self, subject):
match_list = []
for (k, v) in subject_dict.items():
if subject.find('/') != -1 and subject.lower()[0:subject.find('/')].strip().find(k.lower()) != -1:
match_list.append(k)
elif subject.find('/') == -1 and subject.lower().strip().find(k.lower()) != -1:
match_list.append(k)
result = subject
if len(match_list) > 1:
max_len = 0
for key in match_list:
if key.lower() == subject[0: subject.find(' ')].lower().strip():
result = subject_dict[key]
break
if len(key) > max_len:
max_len = len(key)
result = subject_dict[key]
elif len(match_list) == 1:
#print subject_dict[match_list[0]]
result = subject_dict[match_list[0]]
#print subject
if result != subject and subject.find('/') != -1:
last_index = 0
while subject.find('/', last_index + 1) != -1:
last_index = subject.find('/', last_index + 1)
return result + subject[subject.find('/') : last_index + 1]
elif result != subject:
return result + "/"
else:
if subject.strip()[len(subject) - 1 : ] != '/':
return subject + "/"
else:
return subject
def need_update_subject(self, subject):
subject_converted = self.format_subject(subject)
if subject_converted[len(subject_converted) - 1 : ] == '/':
subject_converted = subject_converted[0 : len(subject_converted) - 1]
for item in need_update_subject_list:
if subject_converted.find(item) != -1:
return True
print subject + " not config in all_subject.py, ignore it"
return False
def replace_sp_char(self, text):
while text.find('/') != -1:
text = text[text.find('/') + 1 : ]
return text.replace(",","").replace("&","").replace(":","").replace("-"," ").replace(" "," ").replace(" ","-").lower()
def get_file_name(self, subject, school):
dir_name = self.format_subject(subject)
return self.db_dir + dir_name + self.replace_sp_char(subject) + "-" + school + time.strftime("%Y")
def create_dir_by_file_name(self, file_name):
if os.path.exists(file_name) == False:
index = 0
for i in range(0, len(file_name)):
if file_name[i] == "/":
index = i
if index > 0:
if os.path.exists(file_name[0:index]) == False:
print "creating " + file_name[0:index] + " dir"
os.makedirs(file_name[0:index])
def open_db(self, file_name, append=False):
self.create_dir_by_file_name(file_name)
flag = 'w'
if append:
flag = 'a'
try:
f = open(file_name, flag)
except IOError, err:
print str(err)
return f
def do_upgrade_db(self, file_name):
tmp_file = file_name + ".tmp"
if os.path.exists(file_name) and os.path.exists(tmp_file):
print "upgrading..."
#os.system("diff -y --suppress-common-lines -EbwBi " + file_name + " " + file_name + ".tmp " + "| colordiff")
#print "remove " + file_name[file_name.find("db"):]
os.remove(file_name)
#print "rename " + file_name[file_name.find("db"):] + ".tmp"
os.rename(tmp_file, file_name)
print "upgrade done"
elif os.path.exists(tmp_file):
print "upgrading..."
#print "rename " + file_name[file_name.find("db"):] + ".tmp"
os.rename(tmp_file, file_name)
print "upgrade done"
else:
print "upgrade error"
def cancel_upgrade(self, file_name):
if os.path.exists(file_name + ".tmp"):
os.remove(file_name + ".tmp")
def close_db(self, f):
f.close()
def write_db(self, f, course_num, course_name, url, describe=""):
#if url == "":
# url = self.google + course_num + " " + course_name
if self.category != '' and describe.find('category:') == -1:
describe += ' category:' + self.category
f.write(course_num.strip() + " | " + course_name.replace("|","") + " | " + url + " | " + describe + "\n")
def get_storage_format(self,course_num, course_name, url, describe=""):
if url == "":
url = self.google + course_num + " " + course_name
return course_num.strip() + " | " + course_name.replace("|","") + " | " + url + " | " + describe
def countFileLineNum(self, file_name):
if os.path.exists(file_name):
line_count = len(open(file_name,'rU').readlines())
return line_count
return 0
def truncateUrlData(self, dir_name):
print "truncateUrlData ...."
self.create_dir_by_file_name(get_url_file_name(dir_name))
f = open(get_url_file_name(dir_name), "w+")
f.truncate()
f.close
def delZh(self, text):
if isinstance(text, unicode):
list_u = self.zh_re.findall(text)
if len(list_u) > 0 :
last_ele = list_u[len(list_u) - 1]
last_pos = text.find(last_ele)
first_pos = text.find(list_u[0])
title = ""
if first_pos == 0:
title = text[last_pos + len(last_ele):]
else:
title = text[0:first_pos] + text[last_pos + len(last_ele):].strip()
if title.find("|") != -1:
title = title.replace("|", "").strip()
return title
return text
def getKeyValue(self, option):
value_pos = option.find("value=") + 7
return option[value_pos : option.find('"', value_pos)], option[option.find(">") + 1 : option.find("</", 2)].replace("&", "").replace("\n", "").strip()
| mit | -5,365,539,054,781,821,000 | 34.524887 | 162 | 0.515858 | false | 3.560544 | false | false | false |
codeforamerica/westsac-urban-land-locator | farmsList/public/views.py | 1 | 5434 | # -*- coding: utf-8 -*-
'''Public section, including homepage and signup.'''
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask_mail import Message
from flask.ext.login import login_user, login_required, logout_user
from farmsList.extensions import mail, login_manager
from farmsList.user.models import User
from farmsList.public.forms import LoginForm, ContactLandOwnerForm
from farmsList.public.models import Farmland
from farmsList.user.forms import RegisterForm
from farmsList.user.models import Email
from farmsList.utils import flash_errors
from farmsList.database import db
blueprint = Blueprint('public', __name__, static_folder="../static")
@login_manager.user_loader
def load_user(id):
return User.get_by_id(int(id))
@blueprint.route("/", methods=["GET", "POST"])
def home():
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", 'success')
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/home.html", form=form)
@blueprint.route('/logout/')
@login_required
def logout():
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route("/register/", methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form, csrf_enabled=False)
if form.validate_on_submit():
new_user = User.create(username=form.username.data,
email=form.email.data,
password=form.password.data,
active=True)
flash("Thank you for registering. You can now log in.", 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route("/contact-land-owner/<int:farmlandId>", methods=["GET", "POST"])
def contactLandOwner(farmlandId):
form = ContactLandOwnerForm(request.form)
farmland = Farmland.query.filter(Farmland.id == farmlandId).all()[0]
if form.validate_on_submit():
address = "Unknown" if farmland.address is None else farmland.address
mainBodyContent = ("<p style=\"margin-left: 50px;\">"
"<b>Name:</b> " + form.name.data + "<br>"
"<b>Email:</b> " + form.email.data + "<br>"
"<b>Phone:</b> " + form.phone.data + "<br>"
"</p>"
"<p style=\"margin-left: 50px;\">"
"<b>What is your past experience farming?</b><br>"
"" + form.experience.data + "</p>"
"<p><br>Thanks,<br>"
"Acres"
"</p>")
# msg = Message("Inquiry: " + address + " Property", recipients=["[email protected]")
msg = Message("Inquiry: " + address + " Property", recipients=[farmland.email])
msg.html = ("<html>"
"<body>"
"<p>Someone has contacted you about your " + address + " property:</p>"
"" + mainBodyContent + ""
"</body>"
"</html>")
mail.send(msg)
Email.create(sender=msg.sender,
recipients=",".join(msg.recipients),
body=msg.html)
msg = Message("Inquiry: " + address + " Property", recipients=[form.email.data])
msg.html = ("<html>"
"<body>"
"<p>Just a note that we sent your request for more information about the " + address + " property to " + farmland.ownerName + ":</p>"
"" + mainBodyContent + ""
"</body>"
"</html>")
mail.send(msg)
Email.create(sender=msg.sender,
recipients=",".join(msg.recipients),
body=msg.html)
flash("Thanks for your inquiry! We sent your email for more information about the property. " + farmland.ownerName + " will follow up with you shortly.", 'info')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template("public/contact-land-owner.html", form=form, farmland=farmland)
@blueprint.route("/farmland-details/<int:farmlandId>")
def farmlandDetails(farmlandId):
return render_template("public/farmland-details.html")
@blueprint.route("/farmland-approval/<int:farmlandId>")
def farmlandApproval(farmlandId):
return render_template("public/farmland-approval.html")
@blueprint.route("/find-land/")
def find_land():
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", 'success')
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/find_land.html", form=form)
| bsd-3-clause | -4,999,825,131,017,609,000 | 42.822581 | 169 | 0.573979 | false | 3.923466 | false | false | false |
basicthinker/Sexain-MemController | gem5-stable/src/mem/SimpleMemory.py | 1 | 3222 | # Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Andreas Hansson
from m5.params import *
from AbstractMemory import *
class SimpleMemory(AbstractMemory):
type = 'SimpleMemory'
cxx_header = "mem/simple_mem.hh"
port = SlavePort("Slave ports")
latency = Param.Latency('40ns', "Latency on row buffer hit")
latency_miss = Param.Latency('80ns', "Latency on row buffer miss")
latency_var = Param.Latency('0ns', "Request to response latency variance")
# The memory bandwidth limit default is set to 12.8GB/s which is
# representative of a x64 DDR3-1600 channel.
bandwidth = Param.MemoryBandwidth('12.8GB/s',
"Combined read and write bandwidth")
lat_att_operate = Param.Latency('3ns', "ATT operation latency")
lat_buffer_operate = Param.Latency('3ns',
"Version buffer operation latency")
lat_nvm_read = Param.Latency('128ns', "NVM read latency")
lat_nvm_write = Param.Latency('368ns', "NVM write latency")
disable_timing = Param.Bool(True, "If THNVM is not timed")
| apache-2.0 | 5,142,405,133,285,989,000 | 50.967742 | 78 | 0.753569 | false | 4.245059 | false | false | false |
ubc/compair | alembic/versions/316f3b73962c_modified_criteria_tables.py | 1 | 2136 | """modified criteria tables
Revision ID: 316f3b73962c
Revises: 2fe3d8183c34
Create Date: 2014-09-10 15:42:55.963855
"""
# revision identifiers, used by Alembic.
revision = '316f3b73962c'
down_revision = '2fe3d8183c34'
import logging
from alembic import op
import sqlalchemy as sa
from sqlalchemy import UniqueConstraint, exc
from sqlalchemy.sql import text
from compair.models import convention
def upgrade():
try:
with op.batch_alter_table('Criteria', naming_convention=convention,
table_args=(UniqueConstraint('name'))) as batch_op:
batch_op.drop_constraint('uq_Criteria_name', type_='unique')
except exc.InternalError:
with op.batch_alter_table('Criteria', naming_convention=convention,
table_args=(UniqueConstraint('name'))) as batch_op:
batch_op.drop_constraint('name', type_='unique')
except ValueError:
logging.warning('Drop unique constraint is not support for SQLite, dropping uq_Critiera_name ignored!')
# set existing criteria's active attribute to True using server_default
with op.batch_alter_table('CriteriaAndCourses', naming_convention=convention) as batch_op:
batch_op.add_column(sa.Column('active', sa.Boolean(), default=True, server_default='1', nullable=False))
with op.batch_alter_table('Criteria', naming_convention=convention) as batch_op:
batch_op.add_column(sa.Column('public', sa.Boolean(), default=False, server_default='0', nullable=False))
# set the first criteria as public
t = {"name": "Which is better?", "public": True}
op.get_bind().execute(text("Update Criteria set public=:public where name=:name"), **t)
def downgrade():
with op.batch_alter_table('Criteria', naming_convention=convention,
table_args=(UniqueConstraint('name'))) as batch_op:
batch_op.create_unique_constraint('uq_Criteria_name', ['name'])
batch_op.drop_column('public')
with op.batch_alter_table('CriteriaAndCourses', naming_convention=convention) as batch_op:
batch_op.drop_column('active')
| gpl-3.0 | -3,058,094,704,712,623,600 | 40.882353 | 113 | 0.684925 | false | 3.589916 | false | false | false |
BT-jmichaud/l10n-switzerland | l10n_ch_payment_slip/tests/test_payment_slip.py | 1 | 9506 | # -*- coding: utf-8 -*-
# © 2014-2016 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import time
import re
import odoo.tests.common as test_common
from odoo.report import render_report
class TestPaymentSlip(test_common.TransactionCase):
_compile_get_ref = re.compile(r'[^0-9]')
def make_bank(self):
company = self.env.ref('base.main_company')
self.assertTrue(company)
partner = self.env.ref('base.main_partner')
self.assertTrue(partner)
bank = self.env['res.bank'].create(
{
'name': 'BCV',
'ccp': '01-1234-1',
'bic': '23452345',
'clearing': '234234',
}
)
bank_account = self.env['res.partner.bank'].create(
{
'partner_id': partner.id,
'bank_id': bank.id,
'bank_bic': bank.bic,
'acc_number': '01-1234-1',
'bvr_adherent_num': '1234567',
'print_bank': True,
'print_account': True,
'print_partner': True,
}
)
bank_account.onchange_acc_number_set_swiss_bank()
self.assertEqual(bank_account.ccp, '01-1234-1')
return bank_account
def make_invoice(self):
if not hasattr(self, 'bank_account'):
self.bank_account = self.make_bank()
account_model = self.env['account.account']
account_debtor = account_model.search([('code', '=', '1100')])
if not account_debtor:
account_debtor = account_model.create({
'code': 1100,
'name': 'Debitors',
'user_type_id':
self.env.ref('account.data_account_type_receivable').id,
'reconcile': True,
})
account_sale = account_model.search([('code', '=', '3200')])
if not account_sale:
account_sale = account_model.create({
'code': 3200,
'name': 'Goods sales',
'user_type_id':
self.env.ref('account.data_account_type_revenue').id,
'reconcile': False,
})
invoice = self.env['account.invoice'].create({
'partner_id': self.env.ref('base.res_partner_12').id,
'reference_type': 'none',
'name': 'A customer invoice',
'account_id': account_debtor.id,
'type': 'out_invoice',
'partner_bank_id': self.bank_account.id
})
self.env['account.invoice.line'].create({
'account_id': account_sale.id,
'product_id': False,
'quantity': 1,
'price_unit': 862.50,
'invoice_id': invoice.id,
'name': 'product that cost 862.50 all tax included',
})
invoice.action_invoice_open()
# waiting for the cache to refresh
attempt = 0
while not invoice.move_id:
invoice.refresh()
time.sleep(0.1)
attempt += 1
if attempt > 20:
break
return invoice
def test_invoice_confirmation(self):
"""Test that confirming an invoice generate slips correctly"""
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
for line in invoice.move_id.line_ids:
if line.account_id.user_type_id.type in ('payable', 'receivable'):
self.assertTrue(line.transaction_ref)
else:
self.assertFalse(line.transaction_ref)
for line in invoice.move_id.line_ids:
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
if line.account_id.user_type_id.type in ('payable', 'receivable'):
self.assertTrue(slip)
self.assertEqual(slip.amount_total, 862.50)
self.assertEqual(slip.invoice_id.id, invoice.id)
else:
self.assertFalse(slip)
def test_slip_validity(self):
"""Test that confirming slip are valid"""
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
for line in invoice.move_id.line_ids:
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
if line.account_id.user_type_id.type in ('payable', 'receivable'):
self.assertTrue(slip.reference)
self.assertTrue(slip.scan_line)
self.assertTrue(slip.slip_image)
self.assertTrue(slip.a4_pdf)
inv_num = line.invoice_id.number
line_ident = self._compile_get_ref.sub(
'', "%s%s" % (inv_num, line.id)
)
self.assertIn(line_ident, slip.reference.replace(' ', ''))
def test_print_report(self):
invoice = self.make_invoice()
data, format = render_report(
self.env.cr,
self.env.uid,
[invoice.id],
'l10n_ch_payment_slip.one_slip_per_page_from_invoice',
{},
context={'force_pdf': True},
)
self.assertTrue(data)
self.assertEqual(format, 'pdf')
def test_print_multi_report_merge_in_memory(self):
# default value as in memory
self.assertEqual(self.env.user.company_id.merge_mode, 'in_memory')
invoice1 = self.make_invoice()
invoice2 = self.make_invoice()
data, format = render_report(
self.env.cr,
self.env.uid,
[invoice1.id, invoice2.id],
'l10n_ch_payment_slip.one_slip_per_page_from_invoice',
{},
context={'force_pdf': True},
)
self.assertTrue(data)
self.assertEqual(format, 'pdf')
def test_print_multi_report_merge_on_disk(self):
self.env.user.company_id.merge_mode = 'on_disk'
invoice1 = self.make_invoice()
invoice2 = self.make_invoice()
data, format = render_report(
self.env.cr,
self.env.uid,
[invoice1.id, invoice2.id],
'l10n_ch_payment_slip.one_slip_per_page_from_invoice',
{},
context={'force_pdf': True},
)
self.assertTrue(data)
self.assertEqual(format, 'pdf')
def test_address_format(self):
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
line = invoice.move_id.line_ids[0]
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
com_partner = slip.get_comm_partner()
address_lines = slip._get_address_lines(com_partner)
self.assertEqual(
address_lines,
[u'93, Press Avenue', u'', u'73377 Le Bourget du Lac']
)
def test_address_format_no_country(self):
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
line = invoice.move_id.line_ids[0]
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
com_partner = slip.get_comm_partner()
com_partner.country_id = False
address_lines = slip._get_address_lines(com_partner)
self.assertEqual(
address_lines,
[u'93, Press Avenue', u'', u'73377 Le Bourget du Lac']
)
def test_address_format_special_format(self):
""" Test special formating without street2 """
ICP = self.env['ir.config_parameter']
ICP.set_param(
'bvr.address.format',
"%(street)s\n%(zip)s %(city)s"
)
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
line = invoice.move_id.line_ids[0]
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
com_partner = slip.get_comm_partner()
com_partner.country_id = False
address_lines = slip._get_address_lines(com_partner)
self.assertEqual(
address_lines,
[u'93, Press Avenue', u'73377 Le Bourget du Lac']
)
def test_address_length(self):
invoice = self.make_invoice()
self.assertTrue(invoice.move_id)
line = invoice.move_id.line_ids[0]
slip = self.env['l10n_ch.payment_slip'].search(
[('move_line_id', '=', line.id)]
)
com_partner = slip.get_comm_partner()
address_lines = slip._get_address_lines(com_partner)
f_size = 11
len_tests = [
(15, (11, None)),
(23, (11, None)),
(26, (10, None)),
(27, (10, None)),
(30, (9, None)),
(32, (8, 34)),
(34, (8, 34)),
(40, (8, 34))]
for text_len, result in len_tests:
com_partner.name = 'x' * text_len
res = slip._get_address_font_size(
f_size, address_lines, com_partner)
self.assertEqual(res, result, "Wrong result for len %s" % text_len)
def test_print_bvr(self):
invoice = self.make_invoice()
bvr = invoice.print_bvr()
self.assertEqual(bvr['report_name'],
'l10n_ch_payment_slip.one_slip_per_page_from_invoice')
self.assertEqual(bvr['report_file'],
'l10n_ch_payment_slip.one_slip_per_page')
| agpl-3.0 | 7,121,606,143,357,714,000 | 35.417625 | 79 | 0.523935 | false | 3.664225 | true | false | false |
ncphillips/django_rpg | rpg_base/models/encounter.py | 1 | 1907 | from django.db import models
class EncounterManager(models.Manager):
def enemy_npcs(self):
pass
def friendly_npcs(self):
pass
def players(self):
return super(EncounterManager, self).get_queryset().filter(character__player_owned=True)
class Encounter(models.Model):
name = models.CharField(max_length=75)
campaign = models.ForeignKey("Campaign")
is_running = models.BooleanField(default=False)
round = models.PositiveIntegerField(default=0)
objects = EncounterManager()
class Meta:
app_label = "rpg_base"
def __unicode__(self):
return self.name
def start(self):
"""
Sets `is_running` to True, and initiative and NPCs.
"""
for row in self.charactertemplateinencounter_set.all():
num = row.num
template = row.character_template
encounter = row.encounter
characters = template.create_characters(encounter.campaign, num=num)
for character in characters:
CharacterInEncounter.objects.create(character=character,
encounter=encounter,
hp_current=character.hp,
initiative=0)
# TODO Roll everyone's initiative.
self.is_running = True
self.save()
def end(self):
# Sum experience from enemy NPCs
# Split experience amongst players
self.is_running = False
self.save()
class CharacterInEncounter(models.Model):
"""
Characters have a rolled Initiative specific to an encounter, as well as
Hit Points.
"""
character = models.ForeignKey("Character")
encounter = models.ForeignKey(Encounter)
hp_current = models.IntegerField()
initiative = models.PositiveIntegerField | mit | -1,455,354,487,229,714,200 | 27.477612 | 96 | 0.598846 | false | 4.476526 | false | false | false |
SymbiFlow/edalize | edalize/trellis.py | 1 | 3499 | # Copyright edalize contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import os.path
from edalize.edatool import Edatool
from edalize.yosys import Yosys
from importlib import import_module
class Trellis(Edatool):
argtypes = ['vlogdefine', 'vlogparam']
@classmethod
def get_doc(cls, api_ver):
if api_ver == 0:
yosys_help = Yosys.get_doc(api_ver)
trellis_help = {
'lists' : [
{'name' : 'nextpnr_options',
'type' : 'String',
'desc' : 'Additional options for nextpnr'},
{'name' : 'yosys_synth_options',
'type' : 'String',
'desc' : 'Additional options for the synth_ecp5 command'},
]}
combined_members = []
combined_lists = trellis_help['lists']
yosys_members = yosys_help['members']
yosys_lists = yosys_help['lists']
combined_members.extend(m for m in yosys_members if m['name'] not in [i['name'] for i in combined_members])
combined_lists.extend(l for l in yosys_lists if l['name'] not in [i['name'] for i in combined_lists])
return {'description' : "Project Trellis enables a fully open-source flow for ECP5 FPGAs using Yosys for Verilog synthesis and nextpnr for place and route",
'members' : combined_members,
'lists' : combined_lists}
def configure_main(self):
# Write yosys script file
(src_files, incdirs) = self._get_fileset_files()
yosys_synth_options = self.tool_options.get('yosys_synth_options', [])
yosys_synth_options = ["-nomux"] + yosys_synth_options
yosys_edam = {
'files' : self.files,
'name' : self.name,
'toplevel' : self.toplevel,
'parameters' : self.parameters,
'tool_options' : {'yosys' : {
'arch' : 'ecp5',
'yosys_synth_options' : yosys_synth_options,
'yosys_as_subtool' : True,
}
}
}
yosys = getattr(import_module("edalize.yosys"), 'Yosys')(yosys_edam, self.work_root)
yosys.configure()
lpf_files = []
for f in src_files:
if f.file_type == 'LPF':
lpf_files.append(f.name)
elif f.file_type == 'user':
pass
if not lpf_files:
lpf_files = ['empty.lpf']
with open(os.path.join(self.work_root, lpf_files[0]), 'a'):
os.utime(os.path.join(self.work_root, lpf_files[0]), None)
elif len(lpf_files) > 1:
raise RuntimeError("trellis backend supports only one LPF file. Found {}".format(', '.join(lpf_files)))
# Write Makefile
nextpnr_options = self.tool_options.get('nextpnr_options', [])
template_vars = {
'name' : self.name,
'lpf_file' : lpf_files[0],
'nextpnr_options' : nextpnr_options,
}
self.render_template('trellis-makefile.j2',
'Makefile',
template_vars)
| bsd-2-clause | 4,870,784,944,533,456,000 | 40.164706 | 168 | 0.497285 | false | 3.790899 | false | false | false |
dietrichc/streamline-ppc-reports | examples/dfp/v201405/creative_service/get_creatives_by_statement.py | 1 | 2307 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all image creatives.
To create an image creative, run create_creatives.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeService.getCreativesByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201405')
# Create statement object to only select image creatives.
values = [{
'key': 'creativeType',
'value': {
'xsi_type': 'TextValue',
'value': 'ImageCreative'
}
}]
query = 'WHERE creativeType = :creativeType'
statement = dfp.FilterStatement(query, values)
# Get creatives by statement.
while True:
response = creative_service.getCreativesByStatement(
statement.ToStatement())
creatives = response['results']
if creatives:
# Display results.
for creative in creatives:
print ('Creative with id \'%s\', name \'%s\', and type \'%s\' was '
'found.' % (creative['id'], creative['name'],
creative['Creative.Type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 | 8,719,420,815,159,008,000 | 31.041667 | 77 | 0.691374 | false | 3.903553 | false | false | false |
sssundar/Drone | rotation/viz.py | 1 | 5332 | # Python script to visualize rotation about a non-body axis.
# Let the lab frame be the inertial frame S.
# Let the origin of the rigid body be O, in the inertial frame S'.
# Let r_ss' be the vector from S to S'.
# Let the body frame relative to O be S''.
# Consider a fixed point on the body, r_s' in S', and r_s'' in S''.
# Assume the body is subject to zero external torques.
# It must be rotating about a fixed axis, n, by Euler's rotation theorem.
# It must have a constant angular velocity about that axis by d/dt L = sum(T_external) = 0 and L = Jw about the rotation axis.
# Let R be the rotation matrix mapping a vector in S'' to S', with inverse R^T
# We know r_s' = R r_s''
# We know d/dt r_s' = (dR/dt R^T) * (R r_s'') = (dR/dt R^T) r_s'
# Therefore we expect (dR/dt R^T) to be the operator (w x) in the S' frame.
# The goal of this script is to visualize this.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
import numpy as np
from numpy import pi as pi
from numpy import cos as c
from numpy import sin as s
from numpy import dot as dot
from numpy import transpose as transpose
# The axis phi is a rotation about the z axis in the body frame (yaw)
# The axis theta is a rotation about the y axis in the phi-rotated body frame (pitch)
# The axis psi is a rotation about the x axis in the phi, theta-rotated body frame (roll)
def R(phi, theta, psi):
R = np.zeros((3,3))
R[0,0] = c(phi)*c(theta)
R[1,0] = s(phi)*c(theta)
R[2,0] = -s(theta)
R[0,1] = -s(phi)*c(psi) + c(phi)*s(theta)*s(psi)
R[1,1] = c(phi)*c(psi) + s(phi)*s(theta)*s(psi)
R[2,1] = c(theta)*s(psi)
R[0,2] = s(phi)*s(psi) + c(phi)*s(theta)*c(psi)
R[1,2] = -c(phi)*s(psi) + s(phi)*s(theta)*c(psi)
R[2,2] = c(theta)*c(psi)
return R
# Rotate z-axis (0,0,1) by pi radians about x-axis. Should end up at (0,0,-1) cutting across y.
# Rotate (0,0,-1) by pi radians about y-axis. Should end up at (0,0,1) again, cutting across x.
# Try both at the same time. Should still end up at (0,0,1).
def test_R():
e3_spp = np.array((0,0,1))
vectors = []
for k in np.linspace(0,pi,100):
vectors.append(dot(R(0,0,k), e3_spp))
e3_spp = vectors[-1]
for k in np.linspace(0,pi,100):
vectors.append(dot(R(0,k,0), e3_spp))
e3_spp = vectors[-1]
for k in np.linspace(0,pi,100):
vectors.append(dot(R(0,k,k), e3_spp))
xs = [k[0] for k in vectors]
ys = [k[1] for k in vectors]
zs = [k[2] for k in vectors]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(xs=xs,ys=ys,zs=zs)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
# Sets values lower than epsilon to zero.
# Prints the result with precision 0.3f.
def sanitize_matrix(A):
print ""
epsilon = 0.001
for r in xrange(3):
text = ""
for c in xrange(3):
if abs(A[r, c]) < epsilon:
A[r,c] = 0
text += "%6.2f,\t" % A[r,c]
print text[:-2]
print ""
def sanitize_vector(a):
print ""
epsilon = 0.001
text = ""
for r in xrange(3):
if abs(a[r]) < epsilon:
a[r] = 0
text += "%6.2f,\t" % a[r]
print text[:-2]
print ""
def vectorize(W):
v = np.zeros(3)
v[0] = W[1,0]
v[1] = W[0,2]
v[2] = W[2,1]
return v
# This is the (w x) operator, W, with respect to changing body yaw, pitch, and roll.
# It is dR/dt R^T. The arguments are the current Euler angles and their time derivatives.
def W(phi, theta, psi, dphi, dtheta, dpsi):
Rp = np.zeros((3,3))
Rp[0,0] = (-s(phi)*dphi)*c(theta)
Rp[0,0] += c(phi)*(-s(theta)*dtheta)
Rp[1,0] = (c(phi)*dphi)*c(theta)
Rp[1,0] += s(phi)*(-s(theta)*dtheta)
Rp[2,0] = -c(theta)*dtheta
Rp[0,1] = (-c(phi)*dphi)*c(psi)
Rp[0,1] += -s(phi)*(-s(psi)*dpsi)
Rp[0,1] += (-s(phi)*dphi)*s(theta)*s(psi)
Rp[0,1] += c(phi)*(c(theta)*dtheta)*s(psi)
Rp[0,1] += c(phi)*s(theta)*(c(psi)*dpsi)
Rp[1,1] = (-s(phi)*dphi)*c(psi)
Rp[1,1] += c(phi)*(-s(psi)*dpsi)
Rp[1,1] += (c(phi)*dphi)*s(theta)*s(psi)
Rp[1,1] += s(phi)*(c(theta)*dtheta)*s(psi)
Rp[1,1] += s(phi)*s(theta)*(c(psi)*dpsi)
Rp[2,1] = (-s(theta)*dtheta)*s(psi)
Rp[2,1] += c(theta)*(c(psi)*dpsi)
Rp[0,2] = (c(phi)*dphi)*s(psi)
Rp[0,2] += s(phi)*(c(psi)*dpsi)
Rp[0,2] += (-s(phi)*dphi)*s(theta)*c(psi)
Rp[0,2] += c(phi)*(c(theta)*dtheta)*c(psi)
Rp[0,2] += c(phi)*s(theta)*(-s(psi)*dpsi)
Rp[1,2] = (s(phi)*dphi)*s(psi)
Rp[1,2] += -c(phi)*(c(psi)*dpsi)
Rp[1,2] += (c(phi)*dphi)*s(theta)*c(psi)
Rp[1,2] += s(phi)*(c(theta)*dtheta)*c(psi)
Rp[1,2] += s(phi)*s(theta)*(-s(psi)*dpsi)
Rp[2,2] = (-s(theta)*dtheta)*c(psi)
Rp[2,2] += c(theta)*(-s(psi)*dpsi)
w_i = vectorize(dot(Rp, transpose(R(phi,theta,psi))))
w_b = dot(transpose(R(phi,theta,psi)), w_i)
return (w_i, w_b)
def test_W():
# Is the effective w for a rotation of x rad/s about ek just.. ek*x,
# regardless of the angle about axis ek? We expect W = -W^T as well.
# sanitize_matrix(W(3*pi/12,0,0,2*pi,0,0)[0])
# sanitize_matrix(W(0,3*pi/12,0,0,2*pi,0)[0])
# sanitize_matrix(W(0,0,3*pi/12,0,0,2*pi)[0])
# Let's see what it looks like once we've rotated a bit.
# It's still skew antisymmetric with zero trace! This looks like the operation (w x)!!!!
phi, theta, psi = (pi/4, 3*pi/12, -pi)
w_i, w_b = W(phi, theta, psi, pi, 2*pi, 3*pi)
def Main():
test_W()
if __name__ == "__main__":
Main()
| gpl-3.0 | 8,605,246,386,922,294,000 | 29.295455 | 126 | 0.597524 | false | 2.332458 | false | false | false |
mozilla/normandy | normandy/recipes/tests/test_checks.py | 1 | 4355 | from datetime import timedelta
from django.db.utils import ProgrammingError
import pytest
import requests.exceptions
from normandy.recipes import checks, signing
from normandy.recipes.tests import ActionFactory, RecipeFactory, SignatureFactory, UserFactory
@pytest.mark.django_db
class TestSignaturesUseGoodCertificates(object):
def test_it_works(self):
assert checks.signatures_use_good_certificates(None) == []
def test_it_fails_if_a_signature_does_not_verify(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = None
recipe = RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
mock_verify_x5u.side_effect = signing.BadCertificate("testing exception")
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, None)
assert len(errors) == 1
assert errors[0].id == checks.ERROR_BAD_SIGNING_CERTIFICATE
assert recipe.approved_revision.name in errors[0].msg
def test_it_ignores_signatures_without_x5u(self):
recipe = RecipeFactory(approver=UserFactory(), signed=True)
recipe.signature.x5u = None
recipe.signature.save()
actions = ActionFactory(signed=True)
actions.signature.x5u = None
actions.signature.save()
assert checks.signatures_use_good_certificates(None) == []
def test_it_ignores_signatures_not_in_use(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = None
recipe = RecipeFactory(approver=UserFactory(), signed=True)
SignatureFactory(x5u="https://example.com/bad_x5u") # unused signature
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
def side_effect(x5u, *args):
if "bad" in x5u:
raise signing.BadCertificate("testing exception")
return True
mock_verify_x5u.side_effect = side_effect
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, None)
assert errors == []
def test_it_passes_expire_early_setting(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = 7
recipe = RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, timedelta(7))
assert errors == []
def test_it_reports_x5u_network_errors(self, mocker):
RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
mock_verify_x5u.side_effect = requests.exceptions.ConnectionError
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once()
assert len(errors) == 1
assert errors[0].id == checks.ERROR_COULD_NOT_VERIFY_CERTIFICATE
@pytest.mark.django_db
class TestRecipeSignatureAreCorrect:
def test_it_warns_if_a_field_isnt_available(self, mocker):
"""This is to allow for un-applied to migrations to not break running migrations."""
RecipeFactory(approver=UserFactory(), signed=True)
mock_canonical_json = mocker.patch("normandy.recipes.models.Recipe.canonical_json")
mock_canonical_json.side_effect = ProgrammingError("error for testing")
errors = checks.recipe_signatures_are_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.WARNING_COULD_NOT_CHECK_SIGNATURES
@pytest.mark.django_db
class TestActionSignatureAreCorrect:
def test_it_warns_if_a_field_isnt_available(self, mocker):
"""This is to allow for un-applied to migrations to not break running migrations."""
ActionFactory(signed=True)
mock_canonical_json = mocker.patch("normandy.recipes.models.Action.canonical_json")
mock_canonical_json.side_effect = ProgrammingError("error for testing")
errors = checks.action_signatures_are_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.WARNING_COULD_NOT_CHECK_SIGNATURES
| mpl-2.0 | -1,509,006,960,226,237,200 | 44.842105 | 94 | 0.7031 | false | 3.662742 | true | false | false |
Dwii/Master-Thesis | implementation/Palabos/cavity_benchmark/plot_benchmark.py | 1 | 1854 | # Display a list of *.dat files in a bar chart.
# Based on an example from https://chrisalbon.com/python/matplotlib_grouped_bar_plot.html
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
if len(sys.argv) > 3 and (len(sys.argv)-3) % 2 :
print("usage: python3 {0} <benchmark> <image path> (<dat1> <legend1> [<dat2> <legend2>] .. [<datN> <legendN>] ) ".format(os.path.basename(sys.argv[0])))
exit(1)
benchmark = sys.argv[1]
image_path = sys.argv[2]
groups = (len(sys.argv)-3)/2
# Load benchark
domains = ()
nb_setups = 0
for line in open(benchmark,'r'):
n, snx, sny, snz = line.split()
domains += ( r"{0}$^3$".format(snx), ) #+= ( "{0}x{1}x{2}".format(snx, sny, snz), )
nb_setups += 1
# Setting the positions and width for the bars
pos = list(range(nb_setups))
width = 1 / (groups+2)
# Plotting the bars
fig, ax = plt.subplots(figsize=(10,5))
prop_iter = iter(plt.rcParams['axes.prop_cycle'])
legends = ()
maxLups = 0
for i, argi in enumerate(range(3, len(sys.argv), 2)):
mlups = np.array(list(map(float, open(sys.argv[argi])))) / 1E6
legends += ( sys.argv[argi+1], )
maxLups = max(maxLups, max(mlups))
plt.bar([p + width*i for p in pos],
mlups,
width,
alpha=0.5,
color=next(prop_iter)['color'])
# Set the y axis label
ax.set_ylabel('MLUPS')
ax.set_xlabel('Taille du sous-domaine')
# Set the chart's title
#ax.set_title(title)
# Set the position of the x ticks
ax.set_xticks([p + 1.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(domains)
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*4)
#plt.ylim([0, maxLups] )
# Adding the legend and showing the plot
plt.legend(legends, loc='upper center')
ax.yaxis.grid()
plt.savefig(image_path)
plt.tight_layout()
plt.show() | mit | -7,959,568,732,201,215,000 | 26.279412 | 156 | 0.641855 | false | 2.714495 | false | false | false |
AxelTLarsson/robot-localisation | robot_localisation/main.py | 1 | 6009 | """
This module contains the logic to run the simulation.
"""
import sys
import os
import argparse
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from robot_localisation.grid import Grid, build_transition_matrix
from robot_localisation.robot import Robot, Sensor
from robot_localisation.hmm_filter import FilterState
def help_text():
"""
Return a helpful text explaining usage of the program.
"""
return """
------------------------------- HMM Filtering ---------------------------------
Type a command to get started. Type 'quit' or 'q' to quit.
Valid commands (all commands are case insensitive):
ENTER move the robot one step further in the simulation,
will also output current pose and estimated
position of the robot
help show this help text
show T show the transition matrix T
show f show the filter column vector
show O show the observation matrix
quit | q quit the program
-------------------------------------------------------------------------------
"""
def main():
parser = argparse.ArgumentParser(description='Robot localisation with HMM')
parser.add_argument(
'-r', '--rows',
type=int,
help='the number of rows on the grid, default is 4',
default=4)
parser.add_argument(
'-c', '--columns',
type=int,
help='the number of columns on the grid, default is 4',
default=4)
args = parser.parse_args()
# Initialise the program
size = (args.rows, args.columns)
the_T_matrix = build_transition_matrix(*size)
the_filter = FilterState(transition=the_T_matrix)
the_sensor = Sensor()
the_grid = Grid(*size)
the_robot = Robot(the_grid, the_T_matrix)
sensor_value = None
obs = None
print(help_text())
print("Grid size is {} x {}".format(size[0], size[1]))
print(the_robot)
print("The sensor says: {}".format(sensor_value))
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
print("The HMM filter thinks the robot is at {}".format(filter_est))
print("The Manhattan distance is: {}".format(
manhattan(the_robot.get_position(), pos_est)))
np.set_printoptions(linewidth=1000)
# Main loop
while True:
user_command = str(input('> '))
if user_command.upper() == 'QUIT' or user_command.upper() == 'Q':
break
elif user_command.upper() == 'HELP':
print(help_text())
elif user_command.upper() == 'SHOW T':
print(the_T_matrix)
elif user_command.upper() == 'SHOW F':
print(the_filter.belief_matrix)
elif user_command.upper() == 'SHOW O':
print(obs)
elif not user_command:
# take a step then approximate etc.
the_robot.step()
sensor_value = the_sensor.get_position(the_robot)
obs = the_sensor.get_obs_matrix(sensor_value, size)
the_filter.forward(obs)
print(the_robot)
print("The sensor says: {}".format(sensor_value))
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
print("The HMM filter thinks the robot is at {}".format(filter_est))
print("The Manhattan distance is: {}".format(
manhattan(the_robot.get_position(), pos_est)))
else:
print("Unknown command!")
def manhattan(pos1, pos2):
"""
Calculate the Manhattan distance between pos1 and pos2.
"""
x1, y1 = pos1
x2, y2 = pos2
return abs(x1-x2) + abs(y1-y2)
def automated_run():
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 7))
navg = 20
nsteps = 10
for size in (2, 2), (3, 3), (4, 4), (5, 5), (10, 10):
avg_distances = np.zeros(shape=(nsteps+1,))
for n in range(navg):
distances = list()
none_values = list()
the_T_matrix = build_transition_matrix(*size)
the_filter = FilterState(transition=the_T_matrix)
the_sensor = Sensor()
the_grid = Grid(*size)
the_robot = Robot(the_grid, the_T_matrix)
# get the manhattan distance at the start
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
distances.append(manhattan(the_robot.get_position(), pos_est))
for i in range(nsteps):
# take a step then approximate etc.
the_robot.step()
sensor_value = the_sensor.get_position(the_robot)
if sensor_value is None:
none_values.append(i) # keep track of where None was returned
obs = the_sensor.get_obs_matrix(sensor_value, size)
the_filter.forward(obs)
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
distances.append(manhattan(the_robot.get_position(), pos_est))
avg_distances += np.array(distances)
avg_distances /= navg
base_line, = plt.plot(avg_distances, label="Grid size {}".format(size))
# for point in none_values:
# plt.scatter(point, distances[point], marker='o',
# color=base_line.get_color(), s=40)
plt.legend()
plt.xlim(0, nsteps)
plt.ylim(0,)
plt.ylabel("Manhattan distance")
plt.xlabel("Steps")
plt.title("Manhattan distance from true position and inferred position \n"
"from the hidden Markov model (average over %s runs)" % navg)
fig.savefig("automated_run.png")
plt.show()
if __name__ == '__main__':
main()
# automated_run()
| mit | 1,122,709,431,503,210,400 | 33.337143 | 82 | 0.564487 | false | 3.786389 | false | false | false |
confpack/confpacker | libconfpacker/packagers/base/__init__.py | 1 | 4696 | from __future__ import absolute_import
from datetime import datetime
import logging
import os
import os.path
import subprocess
import yaml
from cpcommon import cd
from .task import Task
class Package(object):
def src_path(self, *path):
return os.path.join(self.src_directory, *path)
def __init__(self, name, src_directory, build_version):
self.logger = logging.getLogger("confpacker")
self.name = name
self.src_directory = src_directory
self.build_version = build_version
self.meta = self.load_meta()
self.main_tasks = self.load_tasks()
self.main_handlers = self.load_handlers(ignore_error=True)
self.vars = self.load_vars(ignore_error=True)
self.secrets = self.load_secrets(ignore_error=True)
self.files = self.scan_files()
self.templates = self.scan_templates()
def _load_yml_file(self, filepath, expected_type, ignore_error=False):
if not os.path.exists(filepath):
if ignore_error:
return expected_type()
raise LookupError("cannot find {}".format(filepath))
with open(filepath) as f:
thing = yaml.load(f.read())
if thing is None and ignore_error:
return expected_type()
if not isinstance(thing, expected_type):
raise TypeError("expected a {} but got a {} in {}".format(expected_type, type(thing), filepath))
return thing
def load_meta(self):
meta_path = self.src_path("meta.yml")
return self._load_yml_file(meta_path, dict, ignore_error=True)
def load_tasks(self, filename="main.yml", ignore_error=False):
tasks_path = self.src_path("tasks", filename)
return [Task(rt) for rt in self._load_yml_file(tasks_path, list, ignore_error=ignore_error)]
def load_handlers(self, filename="main.yml", ignore_error=False):
handlers_path = self.src_path("handlers", filename)
return self._load_yml_file(handlers_path, list, ignore_error=ignore_error)
def load_vars(self, filename="main.yml", directory="vars", ignore_error=False):
vars_path = self.src_path(directory, filename)
return self._load_yml_file(vars_path, dict, ignore_error=ignore_error)
def load_secrets(self, filename="main.yml", ignore_error=False):
# TODO: this is not yet implemented
return {}
def scan_directory_for_files(self, directory):
base_path = self.src_path(directory)
if not os.path.isdir(base_path):
return []
files = []
for root, dirs, files_in_dir in os.walk(base_path):
for filename in files_in_dir:
path = os.path.join(root, filename)
if path.startswith(base_path):
target_path = path[len(base_path):]
else:
# TODO: This may happen for a symlink. Need to be investigated
raise RuntimeError("file path {} does not start with src directory path {}?".format(path, self.src_directory))
files.append((path, target_path))
return files
def scan_files(self):
return self.scan_directory_for_files("files")
def scan_templates(self):
return self.scan_directory_for_files("templates")
class BasePackager(object):
def __init__(self, build_config, output_dir):
self.logger = logging.getLogger("confpacker")
self.build_config = build_config
self.output_dir = os.path.abspath(output_dir)
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
def get_source_git_sha(self):
with cd(self.build_config.src_directory):
if os.path.isdir(".git"):
sha = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).strip()
else:
sha = ""
return sha
def get_timestamp(self):
return datetime.now().strftime("%Y%m%d%H%M%S")
def get_build_version(self):
timestamp = self.get_timestamp()
git_sha = self.get_source_git_sha()
build_version = timestamp
if git_sha:
build_version = build_version + "-" + git_sha
return build_version
def build(self):
build_version = self.get_build_version()
this_out_dir = os.path.join(self.output_dir, build_version)
if os.path.exists(this_out_dir):
raise RuntimeError("{} already exists? this should not happen".format(this_out_dir))
os.mkdir(this_out_dir)
for pkg_name, pkg_src_path in self.build_config.package_paths.items():
package = Package(pkg_name, pkg_src_path, build_version)
this_package_out_dir = os.path.join(this_out_dir, pkg_name)
os.mkdir(this_package_out_dir)
self.build_one(package, build_version, this_package_out_dir)
def build_one(self, package, build_version, out_dir):
"""Builds one package
out_dir is for this package. The final should emit a file at <out_dir>/package.<typename>
"""
raise NotImplementedError
| apache-2.0 | -5,239,246,716,933,589,000 | 30.945578 | 120 | 0.672487 | false | 3.48368 | false | false | false |
mclaughlin6464/pearce | bin/optimization/sloppy_joes_optimization_indiv_bins.py | 1 | 1573 | from pearce.emulator import OriginalRecipe, ExtraCrispy, SpicyBuffalo, LemonPepperWet
from pearce.mocks import cat_dict
import numpy as np
from os import path
from SloppyJoes import lazy_wrapper
training_file = '/scratch/users/swmclau2/xi_zheng07_cosmo_lowmsat/PearceRedMagicXiCosmoFixedNd.hdf5'
em_method = 'gp'
fixed_params = {'z':0.0, 'r': 0.19118072}
#emu = SpicyBuffalo(training_file, method = em_method, fixed_params=fixed_params,
# custom_mean_function = 'linear', downsample_factor = 0.01)
emu = OriginalRecipe(training_file, method = em_method, fixed_params=fixed_params,
custom_mean_function = 'linear', downsample_factor = 0.01)
def resids_bins(p, gps, xs, ys, yerrs):
res = []
p_np = np.array(p).reshape((len(gps), -1))
for gp, x, y,yerr, dy, p in zip(gps, xs, ys,yerrs, emu.downsample_y, p_np):
gp.set_parameter_vector(p)
gp.recompute()
r = (gp.predict(dy, x, return_cov=False)-y)/(yerr+1e-5)
res.append(r)
#print res[0].shape
return np.hstack(res)
def resids(p, gp, x, y, yerr):
p = np.array(p)
gp.set_parameter_vector(p)
gp.recompute()
res = (gp.predict(emu.downsample_y, x, return_cov=False)-y)/(yerr+1e-5)
#print res[0].shape
return res
n_hps = len(emu._emulator.get_parameter_vector())
#vals = np.ones((n_hps*emu.n_bins))
vals = np.ones((n_hps,))
args = (emu._emulator, emu.x, emu.y, emu.yerr)
result = lazy_wrapper(resids, vals, func_args = args, print_level = 3)\
print result
np.savetxt('sloppy_joes_result_indiv_bins.npy', result)
| mit | 905,506,763,249,726,100 | 33.195652 | 100 | 0.664336 | false | 2.6 | false | false | false |
Alexanderkorn/Automatisation | oude scripts/Self/IFScraper.py | 1 | 2199 | __author__ = 'alexander'
import urllib2
import os
from lib import pyperclip
def PageScrape(pageurl):
hdr= {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.110 Safari/537.36'}
req = urllib2.Request(pageurl, "", hdr)
response = urllib2.urlopen(req)
html = response.read()
search = 'Gallery:'
for i in range(len(html)-len(search)):
if search == html[i:i+len(search)]:
foldername = html[i+len(search)-1:]
foldername = foldername.split('<')[3].split('>')[1]
while foldername[-1]=='.' or foldername[-1]==' ':
foldername = foldername[:-1]
search = 'original=\"'
imgnum = 1
imgcount = 0
for i in range(len(html)-len(search)):
if search == html[i:i+len(search)]:
imgcount += 1
print "\n\nThere are "+str(imgcount)+" pics in the gallery: "+foldername+"."
contnum = 2
contnum = raw_input("Would you like to download them all? 1=yes 2=no: ")
foldername = 'Downloads/'+foldername
if contnum == '1':
print '\n'
try:
os.makedirs(foldername)
except:
print "Error, make sure there is no directory with this script"
return 0
for i in range(len(html)-len(search)):
if search == html[i:i+len(search)]:
imgurl = html[i+len(search):]
imgurl = imgurl.split('"')[0]
if imgurl[-4] == '.':
imgname = foldername+'/'+str(imgnum)+imgurl[-4:]
else:
imgname = foldername+'/'+str(imgnum)+imgurl[-5:]
f = open(imgname, 'wb')
f.write(urllib2.urlopen(imgurl).read())
f.close()
print '\t'+str(imgnum)+'/'+str(imgcount)+ ' completed\n'
imgnum += 1
return 0
urltest = pyperclip.paste()
print "URL in clipboard: "+ urltest
use = raw_input("\nWould you like to use the above url? 1=yes 2=input other: ")
if use == '1':
url = urltest
else:
url = raw_input("\nEnter the url: ")
PageScrape(url)
| gpl-3.0 | -8,366,557,063,171,999,000 | 36.271186 | 135 | 0.554343 | false | 3.367534 | false | false | false |
jdumas/autobib | pdftitle.py | 1 | 14035 | #!/usr/bin/env python2.7
# https://gist.github.com/nevesnunes/84b2eb7a2cf63cdecd170c139327f0d6
"""
Extract title from PDF file.
Dependencies:
pip install --user unidecode pyPDF PDFMiner
Usage:
find . -name "*.pdf" | xargs -I{} pdftitle -d tmp --rename {}
Limitations:
- No processing of CID keyed fonts. PDFMiner seems to decode them
in some methods (e.g. PDFTextDevice.render_string()).
- Some `LTTextLine` elements report incorrect height, leading to some
blocks of text being consider bigger than title text.
- Heuristics are used to judge invalid titles, implying the possibility of
false positives.
"""
import getopt
import os
import re
import string
import subprocess
import sys
import unidecode
from pyPdf import PdfFileReader
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams, LTChar, LTFigure, LTTextBox, LTTextLine
__all__ = ['pdf_title']
def make_parsing_state(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('ParsingState', (), enums)
CHAR_PARSING_STATE = make_parsing_state('INIT_X', 'INIT_D', 'INSIDE_WORD')
def log(text):
if IS_LOG_ON:
print('--- ' + text)
IS_LOG_ON = False
MIN_CHARS = 6
MAX_WORDS = 20
MAX_CHARS = MAX_WORDS * 10
TOLERANCE = 1e-06
def sanitize(filename):
"""Turn string into a valid file name.
"""
# If the title was picked up from text, it may be too large.
# Preserve a certain number of words and characters
words = filename.split(' ')
filename = ' '.join(words[0:MAX_WORDS])
if len(filename) > MAX_CHARS:
filename = filename[0:MAX_CHARS]
# Preserve letters with diacritics
try:
filename = unidecode.unidecode(filename.encode('utf-8').decode('utf-8'))
except UnicodeDecodeError:
print("*** Skipping invalid title decoding for file %s! ***" % filename)
# Preserve subtitle and itemization separators
filename = re.sub(r',', ' ', filename)
filename = re.sub(r': ', ' - ', filename)
# Strip repetitions
filename = re.sub(r'\.pdf(\.pdf)*$', '', filename)
filename = re.sub(r'[ \t][ \t]*', ' ', filename)
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
return ''.join([c for c in filename if c in valid_chars])
def meta_title(filename):
"""Title from pdf metadata.
"""
docinfo = PdfFileReader(file(filename, 'rb')).getDocumentInfo()
if docinfo is None:
return ''
return docinfo.title if docinfo.title else ''
def junk_line(line):
"""Judge if a line is not appropriate for a title.
"""
too_small = len(line.strip()) < MIN_CHARS
is_placeholder_text = bool(re.search(r'^[0-9 \t-]+(abstract|introduction)?\s+$|^(abstract|unknown|title|untitled):?$', line.strip().lower()))
is_copyright_info = bool(re.search(r'paper\s+title|technical\s+report|proceedings|preprint|to\s+appear|submission|(integrated|international).*conference|transactions\s+on|symposium\s+on|downloaded\s+from\s+http', line.lower()))
# NOTE: Titles which only contain a number will be discarded
stripped_to_ascii = ''.join([c for c in line.strip() if c in string.ascii_letters])
ascii_length = len(stripped_to_ascii)
stripped_to_chars = re.sub(r'[ \t\n]', '', line.strip())
chars_length = len(stripped_to_chars)
is_serial_number = ascii_length < chars_length / 2
return too_small or is_placeholder_text or is_copyright_info or is_serial_number
def empty_str(s):
return len(s.strip()) == 0
def is_close(a, b, relative_tolerance=TOLERANCE):
return abs(a-b) <= relative_tolerance * max(abs(a), abs(b))
def update_largest_text(line, y0, size, largest_text):
log('update size: ' + str(size))
log('largest_text size: ' + str(largest_text['size']))
# Sometimes font size is not correctly read, so we
# fallback to text y0 (not even height may be calculated).
# In this case, we consider the first line of text to be a title.
if ((size == largest_text['size'] == 0) and (y0 - largest_text['y0'] < -TOLERANCE)):
return largest_text
# If it is a split line, it may contain a new line at the end
line = re.sub(r'\n$', ' ', line)
if (size - largest_text['size'] > TOLERANCE):
largest_text = {
'contents': line,
'y0': y0,
'size': size
}
# Title spans multiple lines
elif is_close(size, largest_text['size']):
largest_text['contents'] = largest_text['contents'] + line
largest_text['y0'] = y0
return largest_text
def extract_largest_text(obj, largest_text):
# Skip first letter of line when calculating size, as articles
# may enlarge it enough to be bigger then the title size.
# Also skip other elements such as `LTAnno`.
for i, child in enumerate(obj):
if isinstance(child, LTTextLine):
log('lt_obj child line: ' + str(child))
for j, child2 in enumerate(child):
if j > 1 and isinstance(child2, LTChar):
largest_text = update_largest_text(child.get_text(), child2.y0, child2.size, largest_text)
# Only need to parse size of one char
break
elif i > 1 and isinstance(child, LTChar):
log('lt_obj child char: ' + str(child))
largest_text = update_largest_text(obj.get_text(), child.y0, child.size, largest_text)
# Only need to parse size of one char
break
return largest_text
def extract_figure_text(lt_obj, largest_text):
"""
Extract text contained in a `LTFigure`.
Since text is encoded in `LTChar` elements, we detect separate lines
by keeping track of changes in font size.
"""
text = ''
line = ''
y0 = 0
size = 0
char_distance = 0
char_previous_x1 = 0
state = CHAR_PARSING_STATE.INIT_X
for child in lt_obj:
log('child: ' + str(child))
# Ignore other elements
if not isinstance (child, LTChar):
continue
char_y0 = child.y0
char_size = child.size
char_text = child.get_text()
decoded_char_text = unidecode.unidecode(char_text.encode('utf-8').decode('utf-8'))
log('char: ' + str(char_size) + ' ' + str(decoded_char_text))
# A new line was detected
if char_size != size:
log('new line')
largest_text = update_largest_text(line, y0, size, largest_text)
text += line + '\n'
line = char_text
y0 = char_y0
size = char_size
char_previous_x1 = child.x1
state = CHAR_PARSING_STATE.INIT_D
else:
# Spaces may not be present as `LTChar` elements,
# so we manually add them.
# NOTE: A word starting with lowercase can't be
# distinguished from the current word.
char_current_distance = abs(child.x0 - char_previous_x1)
log('char_current_distance: ' + str(char_current_distance))
log('char_distance: ' + str(char_distance))
log('state: ' + str(state))
# Initialization
if state == CHAR_PARSING_STATE.INIT_X:
char_previous_x1 = child.x1
state = CHAR_PARSING_STATE.INIT_D
elif state == CHAR_PARSING_STATE.INIT_D:
# Update distance only if no space is detected
if (char_distance > 0) and (char_current_distance < char_distance * 2.5):
char_distance = char_current_distance
if (char_distance < 0.1):
char_distance = 0.1
state = CHAR_PARSING_STATE.INSIDE_WORD
# If the x-position decreased, then it's a new line
if (state == CHAR_PARSING_STATE.INSIDE_WORD) and (child.x1 < char_previous_x1):
log('x-position decreased')
line += ' '
char_previous_x1 = child.x1
state = CHAR_PARSING_STATE.INIT_D
# Large enough distance: it's a space
elif (state == CHAR_PARSING_STATE.INSIDE_WORD) and (char_current_distance > char_distance * 8.5):
log('space detected')
log('char_current_distance: ' + str(char_current_distance))
log('char_distance: ' + str(char_distance))
line += ' '
char_previous_x1 = child.x1
# When larger distance is detected between chars, use it to
# improve our heuristic
elif (state == CHAR_PARSING_STATE.INSIDE_WORD) and (char_current_distance > char_distance) and (char_current_distance < char_distance * 2.5):
char_distance = char_current_distance
char_previous_x1 = child.x1
# Chars are sequential
else:
char_previous_x1 = child.x1
child_text = child.get_text()
if not empty_str(child_text):
line += child_text
return (largest_text, text)
def pdf_text(filename):
fp = open(filename, 'rb')
parser = PDFParser(fp)
doc = PDFDocument(parser, '')
parser.set_document(doc)
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
text = ''
largest_text = {
'contents': '',
'y0': 0,
'size': 0
}
for page in PDFPage.create_pages(doc):
interpreter.process_page(page)
layout = device.get_result()
for lt_obj in layout:
log('lt_obj: ' + str(lt_obj))
if isinstance(lt_obj, LTFigure):
(largest_text, figure_text) = extract_figure_text(lt_obj, largest_text)
text += figure_text
elif isinstance(lt_obj, (LTTextBox, LTTextLine)):
# Ignore body text blocks
stripped_to_chars = re.sub(r'[ \t\n]', '', lt_obj.get_text().strip())
if (len(stripped_to_chars) > MAX_CHARS * 2):
continue
largest_text = extract_largest_text(lt_obj, largest_text)
text += lt_obj.get_text() + '\n'
# Remove unprocessed CID text
largest_text['contents'] = re.sub(r'(\(cid:[0-9 \t-]*\))*', '', largest_text['contents'])
# Only parse the first page
return (largest_text, text)
def title_start(lines):
for i, line in enumerate(lines):
if not empty_str(line) and not junk_line(line):
return i
return 0
def title_end(lines, start, max_lines=2):
for i, line in enumerate(lines[start+1:start+max_lines+1], start+1):
if empty_str(line):
return i
return start + 1
def text_title(filename):
"""Extract title from PDF's text.
"""
(largest_text, lines_joined) = pdf_text(filename)
if empty_str(largest_text['contents']):
lines = lines_joined.strip().split('\n')
i = title_start(lines)
j = title_end(lines, i)
text = ' '.join(line.strip() for line in lines[i:j])
else:
text = largest_text['contents'].strip()
# Strip dots, which conflict with os.path's splittext()
text = re.sub(r'\.', '', text)
# Strip extra whitespace
text = re.sub(r'[\t\n]', '', text)
return text
def pdftotext_title(filename):
"""Extract title using `pdftotext`
"""
command = 'pdftotext {} -'.format(re.sub(' ', '\\ ', filename))
process = subprocess.Popen([command], \
shell=True, \
stdout=subprocess.PIPE, \
stderr=subprocess.PIPE)
out, err = process.communicate()
lines = out.strip().split('\n')
i = title_start(lines)
j = title_end(lines, i)
text = ' '.join(line.strip() for line in lines[i:j])
# Strip dots, which conflict with os.path's splittext()
text = re.sub(r'\.', '', text)
# Strip extra whitespace
text = re.sub(r'[\t\n]', '', text)
return text
def valid_title(title):
return not empty_str(title) and not junk_line(title) and empty_str(os.path.splitext(title)[1])
def pdf_title(filename):
"""Extract title using one of multiple strategies.
"""
try:
title = meta_title(filename)
if valid_title(title):
return title
except Exception as e:
print("*** Skipping invalid metadata for file %s! ***" % filename)
print(e)
try:
title = text_title(filename)
if valid_title(title):
return title
except Exception as e:
print("*** Skipping invalid parsing for file %s! ***" % filename)
print(e)
title = pdftotext_title(filename)
if valid_title(title):
return title
return os.path.basename(os.path.splitext(filename)[0])
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:], 'nd:', ['dry-run', 'rename'])
dry_run = False
rename = False
target_dir = "."
for opt, arg in opts:
if opt in ['-n', '--dry-run']:
dry_run = True
elif opt in ['--rename']:
rename = True
elif opt in ['-d']:
target_dir = arg
if len(args) == 0:
print("Usage: %s [-d output] [--dry-run] [--rename] filenames" % sys.argv[0])
sys.exit(1)
for filename in args:
title = pdf_title(filename)
title = sanitize(' '.join(title.split()))
if rename:
new_name = os.path.join(target_dir, title + ".pdf")
print("%s => %s" % (filename, new_name))
if not dry_run:
if os.path.exists(new_name):
print("*** Target %s already exists! ***" % new_name)
else:
os.rename(filename, new_name)
else:
print(title)
| gpl-3.0 | 4,369,930,326,702,331,000 | 34.441919 | 231 | 0.591307 | false | 3.603338 | false | false | false |
yaybu/touchdown | touchdown/provisioner/fuselage.py | 1 | 4827 | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import re
from touchdown.core import argument, errors, resource, serializers
from . import provisioner
try:
import fuselage
from fuselage import argument as f_args, builder, bundle, resources
except ImportError:
raise errors.Error(
"You need the fuselage package to use the fuselage_bundle resource"
)
def underscore(title):
return re.sub(r"(?<=[a-z])(?=[A-Z])", u"_", title).lower()
arguments = {
f_args.Boolean: lambda resource_type, klass, arg: argument.Boolean(field=arg),
f_args.String: lambda resource_type, klass, arg: argument.String(field=arg),
f_args.FullPath: lambda resource_type, klass, arg: argument.String(field=arg),
f_args.File: lambda resource_type, klass, arg: argument.String(field=arg),
f_args.Integer: lambda resource_type, klass, arg: argument.Integer(field=arg),
f_args.Octal: lambda resource_type, klass, arg: argument.Integer(field=arg),
f_args.Dict: lambda resource_type, klass, arg: argument.Dict(field=arg),
f_args.List: lambda resource_type, klass, arg: argument.List(field=arg),
f_args.SubscriptionArgument: lambda resource_type, klass, arg: argument.List(
field=arg
),
f_args.PolicyArgument: lambda resource_type, klass, arg: argument.String(
field=arg, choices=resource_type.policies.keys()
),
}
class FuselageResource(resource.Resource):
@classmethod
def adapt(base_klass, resource_type):
args = {
"resource_name": underscore(resource_type.__resource_name__),
"fuselage_class": resource_type,
"root": argument.Resource(Bundle),
}
for arg, klass in resource_type.__args__.items():
args[arg] = arguments[klass.__class__](resource_type, klass, arg)
cls = type(resource_type.__resource_name__, (base_klass,), args)
def _(self, **kwargs):
arguments = {"parent": self}
arguments.update(kwargs)
resource = cls(**arguments)
if not self.resources:
self.resources = []
self.resources.append(resource)
self.add_dependency(resource)
return resource
setattr(Bundle, "add_%s" % cls.resource_name, _)
return cls
class BundleSerializer(serializers.Serializer):
def render(self, runner, value):
b = bundle.ResourceBundle()
for res in value:
b.add(res.fuselage_class(**serializers.Resource().render(runner, res)))
return builder.build(b)
def pending(self, runner, value):
for res in value:
if serializers.Resource().pending(runner, res):
return True
return False
class Bundle(provisioner.Provisioner):
resource_name = "fuselage_bundle"
always_apply = argument.Boolean()
resources = argument.List(
argument.Resource(FuselageResource),
field="script",
serializer=BundleSerializer(),
)
sudo = argument.Boolean(field="sudo", default=True)
class Describe(provisioner.Describe):
name = "describe"
resource = Bundle
def describe_object(self):
if self.resource.always_apply:
return {"Results": "Pending"}
if not self.resource.target:
# If target is not set we are probably dealing with an AMI... YUCK
# Bail out
return {"Result": "Pending"}
serializer = serializers.Resource()
if serializer.pending(self.runner, self.resource):
return {"Result": "Pending"}
kwargs = serializer.render(self.runner, self.resource)
try:
client = self.runner.get_plan(self.resource.target).get_client()
except errors.ServiceNotReady:
return {"Result": "Pending"}
try:
client.run_script(kwargs["script"], ["-s"])
except errors.CommandFailed as e:
if e.exit_code == 254:
return {"Result": "Success"}
return {"Result": "Pending"}
class Apply(provisioner.Apply):
resource = Bundle
for attr, value in vars(resources).items():
if type(value) == fuselage.resource.ResourceType:
locals()[attr] = FuselageResource.adapt(value)
| apache-2.0 | -1,469,942,013,499,236,900 | 30.966887 | 83 | 0.645743 | false | 3.982673 | false | false | false |
yukisakurai/hhana | mva/plotting/utils.py | 1 | 4190 | import ROOT
from itertools import izip
from matplotlib import cm
from rootpy.plotting.style.atlas.labels import ATLAS_label
from rootpy.memory.keepalive import keepalive
from .. import ATLAS_LABEL
def set_colors(hists, colors='jet'):
if isinstance(colors, basestring):
colors = cm.get_cmap(colors, len(hists))
if hasattr(colors, '__call__'):
for i, h in enumerate(hists):
color = colors((i + 1) / float(len(hists) + 1))
h.SetColor(color)
else:
for h, color in izip(hists, colors):
h.SetColor(color)
def category_lumi_atlas(pad, category_label=None,
data_info=None, atlas_label=None,
textsize=20):
left, right, bottom, top = pad.margin_pixels
height = float(pad.height_pixels)
# draw the category label
if category_label:
label = ROOT.TLatex(
1. - pad.GetRightMargin(),
1. - (textsize - 2) / height,
category_label)
label.SetNDC()
label.SetTextFont(43)
label.SetTextSize(textsize)
label.SetTextAlign(31)
with pad:
label.Draw()
keepalive(pad, label)
# draw the luminosity label
if data_info is not None:
plabel = ROOT.TLatex(
1. - pad.GetLeftMargin() - 0.25,
1. - (top + textsize + 60) / height,
str(data_info))
plabel.SetNDC()
plabel.SetTextFont(43)
plabel.SetTextSize(textsize)
plabel.SetTextAlign(31)
with pad:
plabel.Draw()
keepalive(pad, plabel)
# draw the ATLAS label
if atlas_label is not False:
label = atlas_label or ATLAS_LABEL
ATLAS_label(pad.GetLeftMargin() + 0.03,
1. - (top + textsize + 15) / height,
sep=0.132, pad=pad, sqrts=None,
text=label,
textsize=textsize)
pad.Update()
pad.Modified()
def label_plot(pad, template, xaxis, yaxis,
ylabel='Events', xlabel=None,
units=None, data_info=None,
category_label=None,
atlas_label=None,
extra_label=None,
extra_label_position='left',
textsize=22):
# set the axis labels
binw = list(template.xwidth())
binwidths = list(set(['%.2g' % w for w in binw]))
if units is not None:
if xlabel is not None:
xlabel = '%s [%s]' % (xlabel, units)
if ylabel and len(binwidths) == 1 and binwidths[0] != '1':
# constant width bins
ylabel = '%s / %s %s' % (ylabel, binwidths[0], units)
elif ylabel and len(binwidths) == 1 and binwidths[0] != '1':
ylabel = '%s / %s' % (ylabel, binwidths[0])
if ylabel:
yaxis.SetTitle(ylabel)
if xlabel:
xaxis.SetTitle(xlabel)
left, right, bottom, top = pad.margin_pixels
height = float(pad.height_pixels)
category_lumi_atlas(pad, category_label, data_info, atlas_label)
# draw the extra label
if extra_label is not None:
if extra_label_position == 'left':
label = ROOT.TLatex(pad.GetLeftMargin() + 0.03,
1. - (top + 2 * (textsize + 40)) / height,
extra_label)
else: # right
label = ROOT.TLatex(1. - pad.GetRightMargin() - 0.03,
1. - (top + 2 * (textsize + 40)) / height,
extra_label)
label.SetTextAlign(31)
label.SetNDC()
label.SetTextFont(43)
label.SetTextSize(textsize)
with pad:
label.Draw()
keepalive(pad, label)
pad.Update()
pad.Modified()
# class rootpy.plotting.Legend(
# entries, pad=None,
# leftmargin=0.5, topmargin=0.05, rightmargin=0.05,
# entryheight=0.06, entrysep=0.02, margin=0.3,
# textfont=None, textsize=None, header=None)
def legend_params(position, textsize):
return dict(
leftmargin=0.48, topmargin=0.03, rightmargin=0.05,
entryheight=0.05,
entrysep=0.01,
margin=0.25,
textsize=textsize)
| gpl-3.0 | 7,927,911,205,312,527,000 | 30.742424 | 74 | 0.548449 | false | 3.512154 | false | false | false |
O-T-L/PyOptimization | parameters/optimizer/epsilon_moea.py | 1 | 4120 | """
Copyright (C) 2014, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
def epsilon(config, problem):
if type(problem).__name__ == 'DTLZ1':
table = {
3: 0.033,
4: 0.052,
5: 0.059,
6: 0.0554,
8: 0.0549,
10: 0.0565,
}
_epsilon = table[problem.GetNumberOfObjectives()]
epsilon = [_epsilon] * problem.GetNumberOfObjectives()
return [epsilon]
elif type(problem).__name__ == 'DTLZ2':
table = {
2: 0.006,
3: 0.06,
4: 0.1312,
5: 0.1927,
6: 0.234,
8: 0.29,
10: 0.308,
}
_epsilon = table[problem.GetNumberOfObjectives()]
epsilon = [_epsilon] * problem.GetNumberOfObjectives()
return [epsilon]
elif type(problem).__name__ == 'DTLZ3':
table = {
3: 0.06,
4: 0.1385,
5: 0.2,
6: 0.227,
8: 0.1567,
10: 0.85,
}
_epsilon = table[problem.GetNumberOfObjectives()]
epsilon = [_epsilon] * problem.GetNumberOfObjectives()
return [epsilon]
elif type(problem).__name__ == 'DTLZ4':
table = {
3: 0.06,
4: 0.1312,
5: 0.1927,
6: 0.234,
8: 0.29,
10: 0.308,
}
_epsilon = table[problem.GetNumberOfObjectives()]
epsilon = [_epsilon] * problem.GetNumberOfObjectives()
return [epsilon]
elif type(problem).__name__ == 'DTLZ5':
table = {
3: 0.0052,
4: 0.042,
5: 0.0785,
6: 0.11,
8: 0.1272,
10: 0.1288,
}
_epsilon = table[problem.GetNumberOfObjectives()]
epsilon = [_epsilon] * problem.GetNumberOfObjectives()
return [epsilon]
elif type(problem).__name__ == 'DTLZ6':
table = {
3: 0.0227,
4: 0.12,
5: 0.3552,
6: 0.75,
8: 1.15,
10: 1.45,
}
_epsilon = table[problem.GetNumberOfObjectives()]
epsilon = [_epsilon] * problem.GetNumberOfObjectives()
return [epsilon]
elif type(problem).__name__ == 'DTLZ7':
table = {
2: 0.005,
3: 0.048,
4: 0.105,
5: 0.158,
6: 0.15,
8: 0.225,
10: 0.46,
}
_epsilon = table[problem.GetNumberOfObjectives()]
epsilon = [_epsilon] * problem.GetNumberOfObjectives()
return [epsilon]
elif type(problem).__name__ == 'ConvexDTLZ2':
table = {
2: 0.0075,
3: 0.035,
4: 0.039,
5: 0.034,
6: 0.0273,
8: 0.0184,
10: 0.0153,
}
_epsilon = table[problem.GetNumberOfObjectives()]
epsilon = [_epsilon] * problem.GetNumberOfObjectives()
return [epsilon]
elif type(problem).__name__ == 'DTLZ5I':
if problem.GetNumberOfObjectives() == 10:
table = {
3: 0.06,
4: 0.12,
5: 0.16,
6: 0.2,
7: 0.24,
8: 0.25,
9: 0.26,
}
_epsilon = table[problem.GetManifold() + 1]
epsilon = [_epsilon] * problem.GetNumberOfObjectives()
return [epsilon]
raise Exception(type(problem).__name__, problem.GetNumberOfObjectives())
| lgpl-3.0 | -8,347,849,438,186,841,000 | 29.932331 | 76 | 0.495139 | false | 3.643933 | false | false | false |
tulsawebdevs/django-multi-gtfs | multigtfs/models/trip.py | 1 | 4025 | #
# Copyright 2012-2014 John Whitlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.contrib.gis.geos import LineString
from django.utils.encoding import python_2_unicode_compatible
from jsonfield import JSONField
from multigtfs.models.base import models, Base
@python_2_unicode_compatible
class Trip(Base):
"""A trip along a route
This implements trips.txt in the GTFS feed
"""
route = models.ForeignKey('Route', on_delete=models.CASCADE)
service = models.ForeignKey(
'Service', null=True, blank=True, on_delete=models.SET_NULL)
trip_id = models.CharField(
max_length=255, db_index=True,
help_text="Unique identifier for a trip.")
headsign = models.CharField(
max_length=255, blank=True,
help_text="Destination identification for passengers.")
short_name = models.CharField(
max_length=63, blank=True,
help_text="Short name used in schedules and signboards.")
direction = models.CharField(
max_length=1, blank=True,
choices=(('0', '0'), ('1', '1')),
help_text="Direction for bi-directional routes.")
block = models.ForeignKey(
'Block', null=True, blank=True, on_delete=models.SET_NULL,
help_text="Block of sequential trips that this trip belongs to.")
shape = models.ForeignKey(
'Shape', null=True, blank=True, on_delete=models.SET_NULL,
help_text="Shape used for this trip")
geometry = models.LineStringField(
null=True, blank=True,
help_text='Geometry cache of Shape or Stops')
wheelchair_accessible = models.CharField(
max_length=1, blank=True,
choices=(
('0', 'No information'),
('1', 'Some wheelchair accommodation'),
('2', 'No wheelchair accommodation')),
help_text='Are there accommodations for riders with wheelchair?')
bikes_allowed = models.CharField(
max_length=1, blank=True,
choices=(
('0', 'No information'),
('1', 'Some bicycle accommodation'),
('2', 'No bicycles allowed')),
help_text='Are bicycles allowed?')
extra_data = JSONField(default={}, blank=True, null=True)
def update_geometry(self, update_parent=True):
"""Update the geometry from the Shape or Stops"""
original = self.geometry
if self.shape:
self.geometry = self.shape.geometry
else:
stoptimes = self.stoptime_set.order_by('stop_sequence')
if stoptimes.count() > 1:
self.geometry = LineString(
[st.stop.point.coords for st in stoptimes])
if self.geometry != original:
self.save()
if update_parent:
self.route.update_geometry()
def __str__(self):
return "%s-%s" % (self.route, self.trip_id)
class Meta:
db_table = 'trip'
app_label = 'multigtfs'
_column_map = (
('route_id', 'route__route_id'),
('service_id', 'service__service_id'),
('trip_id', 'trip_id'),
('trip_headsign', 'headsign'),
('trip_short_name', 'short_name'),
('direction_id', 'direction'),
('block_id', 'block__block_id'),
('shape_id', 'shape__shape_id'),
('wheelchair_accessible', 'wheelchair_accessible'),
('bikes_allowed', 'bikes_allowed'),
)
_filename = 'trips.txt'
_rel_to_feed = 'route__feed'
_unique_fields = ('trip_id',)
| apache-2.0 | -85,679,634,461,076,020 | 36.268519 | 74 | 0.623354 | false | 3.790019 | false | false | false |
taotaocoule/stock | spider/data/bond.py | 1 | 1159 | # 国债指数:id=0000121;http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id=0000121&TYPE=k&js=(x)&rtntype=5&isCR=false&fsData1518154947301=fsData1518154947301
# 沪市企业: id=0000131;http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id=0000131&TYPE=k&js=(x)&rtntype=5&isCR=false&fsData1518156740923=fsData1518156740923
# 深圳企业:id=3994812;http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id=3994812&TYPE=k&js=(x)&rtntype=5&isCR=false&fsData1518156947700=fsData1518156947700
import urllib.request
import pandas as pd
import json
class Bond(object):
"""docstring for Bond"""
def __init__(self):
self.index = {
'国债指数':'0000121',
'沪市企业债':'0000131',
'深圳企业债':'3994812'
}
def bond_index(self,id):
url = r'http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id={}&TYPE=k&js=(x)&rtntype=5&isCR=false&fsData1518154947301=fsData1518154947301'.format(id)
raw = json.loads(urllib.request.urlopen(url).read())
head = ['日期','开盘','收盘','最高','最低','成交量','成交金额','振幅']
return pd.DataFrame(list(map(lambda x:x.split(','),raw['data'])),columns=head) | mit | -7,352,327,051,511,593,000 | 46.5 | 155 | 0.707981 | false | 2.020873 | false | false | false |
skatsuta/aerospike-training | book/exercise/Key-valueOperations/Python/Program.py | 1 | 8944 | #!/usr/bin/env python
#
# * Copyright 2012-2014 by Aerospike.
# *
# * Permission is hereby granted, free of charge, to any person obtaining a copy
# * of this software and associated documentation files (the "Software"), to
# * deal in the Software without restriction, including without limitation the
# * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# * sell copies of the Software, and to permit persons to whom the Software is
# * furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included in
# * all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# * IN THE SOFTWARE.
#
from __future__ import print_function
import aerospike
import sys
from optparse import OptionParser
from UserService import UserService
from TweetService import TweetService
#
# * @author Raghavendra Kumar
#
class Program(object):
client=None
seedHost = str()
port = int()
namespace = str()
set = str()
writePolicy = {}
policy = {}
def __init__(self, host, port, namespace, set):
# TODO: Establish a connection to Aerospike cluster
# Exercise 1
print("\nTODO: Establish a connection to Aerospike cluster");
self.client = aerospike.client({ 'hosts': [ (host, port) ] }).connect()
self.seedHost = host
self.port = port
self.namespace = namespace
self.set = set
self.writePolicy = {}
self.policy = {}
@classmethod
def main(cls, args):
usage = "usage: %prog [options] "
optparser = OptionParser(usage=usage, add_help_option=False)
optparser.add_option( "--help", dest="help", action="store_true", help="Displays this message.")
optparser.add_option( "-h", "--host", dest="host", type="string", default="127.0.0.1", metavar="<ADDRESS>", help="Address of Aerospike server (default: 127.0.0.1)")
optparser.add_option( "-p", "--port", dest="port", type="int", default=3000, metavar="<PORT>", help="Port of the Aerospike server (default: 3000)")
optparser.add_option( "-n", "--namespace", dest="namespace", type="string", metavar="<NAMESPACE>", help="Namespace (default: test)")
optparser.add_option( "-s", "--set", dest="set", type="string",metavar="<SET>", help="Set (default: demo)")
(options, args) = optparser.parse_args()
if options.help:
optparser.print_help()
print()
sys.exit(1)
aero=Program(options.host,options.port,options.namespace,options.set)
aero.work()
def work(self):
print("***** Welcome to Aerospike Developer Training *****\n")
print("INFO: Connecting to Aerospike cluster...")
# Establish connection to Aerospike server
# TODO: Check to see if the cluster connection succeeded
# Exercise 1
if not True:
print("\nERROR: Connection to Aerospike cluster failed! Please check the server settings and try again!")
else:
print("\nINFO: Connection to Aerospike cluster succeeded!\n")
# Create instance of UserService
us = UserService(self.client)
# Create instance of TweetService
ts = TweetService(self.client)
# Present options
print("\nWhat would you like to do:\n")
print("1> Create A User And A Tweet\n")
print("2> Read A User Record\n")
print("3> Batch Read Tweets For A User\n")
print("4> Scan All Tweets For All Users\n")
print("5> Record UDF -- Update User Password\n")
print("6> Query Tweets By Username And Users By Tweet Count Range\n")
print("7> Stream UDF -- Aggregation Based on Tweet Count By Region\n")
print("0> Exit\n")
print("\nSelect 0-7 and hit enter:\n")
try:
feature=int(raw_input('Input:'))
except ValueError:
print("Input a valid feature number")
sys.exit(0)
if feature != 0:
if feature==1:
print("\n********** Your Selection: Create User And A Tweet **********\n")
us.createUser()
ts.createTweet()
elif feature==2:
print("\n********** Your Selection: Read A User Record **********\n")
us.getUser()
elif feature==3:
print("\n********** Your Selection: Batch Read Tweets For A User **********\n")
us.batchGetUserTweets()
elif feature==4:
print("\n********** Your Selection: Scan All Tweets For All Users **********\n")
ts.scanAllTweetsForAllUsers()
elif feature==5:
print("\n********** Your Selection: Update User Password using CAS **********\n")
us.updatePasswordUsingCAS()
elif feature==6:
print("\n********** Your Selection: Query Tweets By Username And Users By Tweet Count Range **********\n")
ts.queryTweetsByUsername()
ts.queryUsersByTweetCount()
elif feature==7:
print("\n********** Your Selection: Stream UDF -- Aggregation Based on Tweet Count By Region **********\n")
us.aggregateUsersByTweetCountByRegion()
elif feature==12:
print("\n********** Create Users **********\n")
us.createUsers()
elif feature==23:
print("\n********** Create Tweets **********\n")
ts.createTweets()
else:
print ("Enter a Valid number from above menue !!")
# TODO: Close Aerospike cluster connection
# Exercise 1
print("\nTODO: Close Aerospike cluster connection");
#
# * example method calls
#
def readPartial(self, userName):
""" Python read specific bins """
(key, metadata, record) = self.client.get(("test", "users", userName), ("username", "password", "gender", "region") )
return record
def readMeta(self, userName):
""" not supported in Python Client """
def write(self, username, password):
""" Python read-modify-write """
meta = None
wr_policy = {
AS_POLICY_W_GEN: AS_POLICY_GEN_EQ
}
key = ("test", "users", username)
self.client.put(key,{"username": username,"password": password},meta,wr_policy)
def delete(self, username):
""" Delete Record """
key = ("test", "users", username)
self.client.remove(key)
def exisis(self, username):
""" Python key exists """
key = ("test", "users", username)
(key,itsHere) = self.client.exists(key)
# itsHere should not be Null
return itsHere
def add(self, username):
""" Add """
key = ("test", "users", username)
self.client.put(key, {"tweetcount":1})
def touch(self, username):
""" Not supported in Python Client """
def append(self, username):
""" Not supported in Python Client """
def connectWithClientPolicy(self):
""" Connect with Client configs """
config = { 'hosts': [ ( '127.0.0.1', 3000 )
],
'policies': { 'timeout': 1000 # milliseconds
} }
client = aerospike.client(config)
def deleteBin(self, username):
key = ("test", "users", username)
# Set bin value to null to drop bin.
self.client.put(key, {"interests": None} )
AS_POLICY_W_GEN = "generation"
AS_POLICY_GEN_UNDEF = 0 # Use default value
AS_POLICY_GEN_IGNORE = 1 # Write a record, regardless of generation.
AS_POLICY_GEN_EQ = 2 # Write a record, ONLY if generations are equal
AS_POLICY_GEN_GT = 3 # Write a record, ONLY if local generation is
# greater-than remote generation.
AS_POLICY_GEN_DUP = 4 # Write a record creating a duplicate, ONLY if
if __name__ == '__main__':
import sys
Program.main(sys.argv)
| mit | -1,456,257,365,734,570,800 | 42.629268 | 172 | 0.55814 | false | 4.224846 | true | false | false |
xianian/qt-creator | share/qtcreator/debugger/gdbbridge.py | 1 | 64687 |
try:
import __builtin__
except:
import builtins
try:
import gdb
except:
pass
import os
import os.path
import sys
import struct
import types
def warn(message):
print("XXX: %s\n" % message.encode("latin1"))
from dumper import *
#######################################################################
#
# Infrastructure
#
#######################################################################
def safePrint(output):
try:
print(output)
except:
out = ""
for c in output:
cc = ord(c)
if cc > 127:
out += "\\\\%d" % cc
elif cc < 0:
out += "\\\\%d" % (cc + 256)
else:
out += c
print(out)
def registerCommand(name, func):
class Command(gdb.Command):
def __init__(self):
super(Command, self).__init__(name, gdb.COMMAND_OBSCURE)
def invoke(self, args, from_tty):
safePrint(func(args))
Command()
#######################################################################
#
# Types
#
#######################################################################
PointerCode = gdb.TYPE_CODE_PTR
ArrayCode = gdb.TYPE_CODE_ARRAY
StructCode = gdb.TYPE_CODE_STRUCT
UnionCode = gdb.TYPE_CODE_UNION
EnumCode = gdb.TYPE_CODE_ENUM
FlagsCode = gdb.TYPE_CODE_FLAGS
FunctionCode = gdb.TYPE_CODE_FUNC
IntCode = gdb.TYPE_CODE_INT
FloatCode = gdb.TYPE_CODE_FLT # Parts of GDB assume that this means complex.
VoidCode = gdb.TYPE_CODE_VOID
#SetCode = gdb.TYPE_CODE_SET
RangeCode = gdb.TYPE_CODE_RANGE
StringCode = gdb.TYPE_CODE_STRING
#BitStringCode = gdb.TYPE_CODE_BITSTRING
#ErrorTypeCode = gdb.TYPE_CODE_ERROR
MethodCode = gdb.TYPE_CODE_METHOD
MethodPointerCode = gdb.TYPE_CODE_METHODPTR
MemberPointerCode = gdb.TYPE_CODE_MEMBERPTR
ReferenceCode = gdb.TYPE_CODE_REF
CharCode = gdb.TYPE_CODE_CHAR
BoolCode = gdb.TYPE_CODE_BOOL
ComplexCode = gdb.TYPE_CODE_COMPLEX
TypedefCode = gdb.TYPE_CODE_TYPEDEF
NamespaceCode = gdb.TYPE_CODE_NAMESPACE
#Code = gdb.TYPE_CODE_DECFLOAT # Decimal floating point.
#Code = gdb.TYPE_CODE_MODULE # Fortran
#Code = gdb.TYPE_CODE_INTERNAL_FUNCTION
#######################################################################
#
# Convenience
#
#######################################################################
# Just convienience for 'python print ...'
class PPCommand(gdb.Command):
def __init__(self):
super(PPCommand, self).__init__("pp", gdb.COMMAND_OBSCURE)
def invoke(self, args, from_tty):
print(eval(args))
PPCommand()
# Just convienience for 'python print gdb.parse_and_eval(...)'
class PPPCommand(gdb.Command):
def __init__(self):
super(PPPCommand, self).__init__("ppp", gdb.COMMAND_OBSCURE)
def invoke(self, args, from_tty):
print(gdb.parse_and_eval(args))
PPPCommand()
def scanStack(p, n):
p = int(p)
r = []
for i in xrange(n):
f = gdb.parse_and_eval("{void*}%s" % p)
m = gdb.execute("info symbol %s" % f, to_string=True)
if not m.startswith("No symbol matches"):
r.append(m)
p += f.type.sizeof
return r
class ScanStackCommand(gdb.Command):
def __init__(self):
super(ScanStackCommand, self).__init__("scanStack", gdb.COMMAND_OBSCURE)
def invoke(self, args, from_tty):
if len(args) == 0:
args = 20
safePrint(scanStack(gdb.parse_and_eval("$sp"), int(args)))
ScanStackCommand()
#######################################################################
#
# Import plain gdb pretty printers
#
#######################################################################
class PlainDumper:
def __init__(self, printer):
self.printer = printer
self.typeCache = {}
def __call__(self, d, value):
printer = self.printer.invoke(value)
lister = getattr(printer, "children", None)
children = [] if lister is None else list(lister())
d.putType(self.printer.name)
val = printer.to_string()
if isinstance(val, str):
d.putValue(val)
else: # Assuming LazyString
d.putStdStringHelper(val.address, val.length, val.type.sizeof)
d.putNumChild(len(children))
if d.isExpanded():
with Children(d):
for child in children:
d.putSubItem(child[0], child[1])
def importPlainDumpers(args):
if args == "off":
gdb.execute("disable pretty-printer .* .*")
else:
theDumper.importPlainDumpers()
registerCommand("importPlainDumpers", importPlainDumpers)
class OutputSafer:
def __init__(self, d):
self.d = d
def __enter__(self):
self.savedOutput = self.d.output
self.d.output = []
def __exit__(self, exType, exValue, exTraceBack):
if self.d.passExceptions and not exType is None:
showException("OUTPUTSAFER", exType, exValue, exTraceBack)
self.d.output = self.savedOutput
else:
self.savedOutput.extend(self.d.output)
self.d.output = self.savedOutput
return False
#def couldBePointer(p, align):
# typeobj = lookupType("unsigned int")
# ptr = gdb.Value(p).cast(typeobj)
# d = int(str(ptr))
# warn("CHECKING : %s %d " % (p, ((d & 3) == 0 and (d > 1000 or d == 0))))
# return (d & (align - 1)) and (d > 1000 or d == 0)
Value = gdb.Value
def stripTypedefs(typeobj):
typeobj = typeobj.unqualified()
while typeobj.code == TypedefCode:
typeobj = typeobj.strip_typedefs().unqualified()
return typeobj
#######################################################################
#
# The Dumper Class
#
#######################################################################
class Dumper(DumperBase):
def __init__(self):
DumperBase.__init__(self)
# These values will be kept between calls to 'showData'.
self.isGdb = True
self.childEventAddress = None
self.typeCache = {}
self.typesReported = {}
self.typesToReport = {}
self.qtNamespaceToReport = None
self.qmlEngines = []
self.qmlBreakpoints = []
def prepare(self, args):
self.output = []
self.currentIName = ""
self.currentPrintsAddress = True
self.currentChildType = ""
self.currentChildNumChild = -1
self.currentMaxNumChild = -1
self.currentNumChild = -1
self.currentValue = ReportItem()
self.currentType = ReportItem()
self.currentAddress = None
# The guess does not need to be updated during a showData()
# as the result is fixed during that time (ignoring "active"
# dumpers causing loading of shared objects etc).
self.currentQtNamespaceGuess = None
self.resultVarName = args.get("resultvarname", "")
self.expandedINames = set(args.get("expanded", []))
self.stringCutOff = int(args.get("stringcutoff", 10000))
self.displayStringLimit = int(args.get("displaystringlimit", 100))
self.typeformats = args.get("typeformats", {})
self.formats = args.get("formats", {})
self.watchers = args.get("watchers", {})
self.qmlcontext = int(args.get("qmlcontext", "0"), 0)
self.useDynamicType = int(args.get("dyntype", "0"))
self.useFancy = int(args.get("fancy", "0"))
self.forceQtNamespace = int(args.get("forcens", "0"))
self.passExceptions = int(args.get("passExceptions", "0"))
self.nativeMixed = int(args.get("nativemixed", "0"))
self.autoDerefPointers = int(args.get("autoderef", "0"))
self.partialUpdate = int(args.get("partial", "0"))
self.fallbackQtVersion = 0x50200
self.sortStructMembers = bool(args.get("sortStructMembers", True))
#warn("NAMESPACE: '%s'" % self.qtNamespace())
#warn("EXPANDED INAMES: %s" % self.expandedINames)
#warn("WATCHERS: %s" % self.watchers)
def listOfLocals(self):
frame = gdb.selected_frame()
try:
block = frame.block()
#warn("BLOCK: %s " % block)
except RuntimeError as error:
#warn("BLOCK IN FRAME NOT ACCESSIBLE: %s" % error)
return []
except:
warn("BLOCK NOT ACCESSIBLE FOR UNKNOWN REASONS")
return []
items = []
shadowed = {}
while True:
if block is None:
warn("UNEXPECTED 'None' BLOCK")
break
for symbol in block:
name = symbol.print_name
if name == "__in_chrg" or name == "__PRETTY_FUNCTION__":
continue
# "NotImplementedError: Symbol type not yet supported in
# Python scripts."
#warn("SYMBOL %s (%s): " % (symbol, name))
if name in shadowed:
level = shadowed[name]
name1 = "%s@%s" % (name, level)
shadowed[name] = level + 1
else:
name1 = name
shadowed[name] = 1
#warn("SYMBOL %s (%s, %s)): " % (symbol, name, symbol.name))
item = self.LocalItem()
item.iname = "local." + name1
item.name = name1
try:
item.value = frame.read_var(name, block)
#warn("READ 1: %s" % item.value)
items.append(item)
continue
except:
pass
try:
#warn("READ 2: %s" % item.value)
item.value = frame.read_var(name)
items.append(item)
continue
except:
# RuntimeError: happens for
# void foo() { std::string s; std::wstring w; }
# ValueError: happens for (as of 2010/11/4)
# a local struct as found e.g. in
# gcc sources in gcc.c, int execute()
pass
try:
#warn("READ 3: %s %s" % (name, item.value))
item.value = gdb.parse_and_eval(name)
#warn("ITEM 3: %s" % item.value)
items.append(item)
except:
# Can happen in inlined code (see last line of
# RowPainter::paintChars(): "RuntimeError:
# No symbol \"__val\" in current context.\n"
pass
# The outermost block in a function has the function member
# FIXME: check whether this is guaranteed.
if not block.function is None:
break
block = block.superblock
return items
# Hack to avoid QDate* dumper timeouts with GDB 7.4 on 32 bit
# due to misaligned %ebx in SSE calls (qstring.cpp:findChar)
# This seems to be fixed in 7.9 (or earlier)
def canCallLocale(self):
return False if self.is32bit() else True
def showData(self, args):
self.prepare(args)
partialVariable = args.get("partialVariable", "")
isPartial = len(partialVariable) > 0
#
# Locals
#
self.output.append('data=[')
if self.qmlcontext:
locals = self.extractQmlVariables(self.qmlcontext)
elif isPartial:
parts = partialVariable.split('.')
name = parts[1]
item = self.LocalItem()
item.iname = parts[0] + '.' + name
item.name = name
try:
if parts[0] == 'local':
frame = gdb.selected_frame()
item.value = frame.read_var(name)
else:
item.name = self.hexdecode(name)
item.value = gdb.parse_and_eval(item.name)
except RuntimeError as error:
item.value = error
except:
item.value = "<no value>"
locals = [item]
else:
locals = self.listOfLocals()
# Take care of the return value of the last function call.
if len(self.resultVarName) > 0:
try:
item = self.LocalItem()
item.name = self.resultVarName
item.iname = "return." + self.resultVarName
item.value = self.parseAndEvaluate(self.resultVarName)
locals.append(item)
except:
# Don't bother. It's only supplementary information anyway.
pass
locals.sort(key = lambda item: item.name)
for item in locals:
value = self.downcast(item.value) if self.useDynamicType else item.value
with OutputSafer(self):
self.anonNumber = -1
if item.iname == "local.argv" and str(value.type) == "char **":
self.putSpecialArgv(value)
else:
# A "normal" local variable or parameter.
with TopLevelItem(self, item.iname):
self.put('iname="%s",' % item.iname)
self.put('name="%s",' % item.name)
self.putItem(value)
with OutputSafer(self):
self.handleWatches(args)
self.output.append('],typeinfo=[')
for name in self.typesToReport.keys():
typeobj = self.typesToReport[name]
# Happens e.g. for '(anonymous namespace)::InsertDefOperation'
if not typeobj is None:
self.output.append('{name="%s",size="%s"}'
% (self.hexencode(name), typeobj.sizeof))
self.output.append(']')
self.typesToReport = {}
if self.forceQtNamespace:
self.qtNamepaceToReport = self.qtNamespace()
if self.qtNamespaceToReport:
self.output.append(',qtnamespace="%s"' % self.qtNamespaceToReport)
self.qtNamespaceToReport = None
self.output.append(',partial="%d"' % isPartial)
safePrint(''.join(self.output))
def enterSubItem(self, item):
if not item.iname:
item.iname = "%s.%s" % (self.currentIName, item.name)
#warn("INAME %s" % item.iname)
self.put('{')
#if not item.name is None:
if isinstance(item.name, str):
self.put('name="%s",' % item.name)
item.savedIName = self.currentIName
item.savedValue = self.currentValue
item.savedType = self.currentType
item.savedCurrentAddress = self.currentAddress
self.currentIName = item.iname
self.currentValue = ReportItem();
self.currentType = ReportItem();
self.currentAddress = None
def exitSubItem(self, item, exType, exValue, exTraceBack):
#warn("CURRENT VALUE: %s: %s %s" % (self.currentIName, self.currentValue, self.currentType))
if not exType is None:
if self.passExceptions:
showException("SUBITEM", exType, exValue, exTraceBack)
self.putNumChild(0)
self.putSpecialValue(SpecialNotAccessibleValue)
try:
if self.currentType.value:
typeName = self.stripClassTag(self.currentType.value)
if len(typeName) > 0 and typeName != self.currentChildType:
self.put('type="%s",' % typeName) # str(type.unqualified()) ?
if self.currentValue.value is None:
self.put('value="",encoding="%d","numchild="0",'
% SpecialNotAccessibleValue)
else:
if not self.currentValue.encoding is None:
self.put('valueencoded="%d",' % self.currentValue.encoding)
if self.currentValue.elided:
self.put('valueelided="%d",' % self.currentValue.elided)
self.put('value="%s",' % self.currentValue.value)
except:
pass
if not self.currentAddress is None:
self.put(self.currentAddress)
self.put('},')
self.currentIName = item.savedIName
self.currentValue = item.savedValue
self.currentType = item.savedType
self.currentAddress = item.savedCurrentAddress
return True
def parseAndEvaluate(self, exp):
return gdb.parse_and_eval(exp)
def callHelper(self, value, func, args):
# args is a tuple.
arg = ""
for i in range(len(args)):
if i:
arg += ','
a = args[i]
if (':' in a) and not ("'" in a):
arg = "'%s'" % a
else:
arg += a
#warn("CALL: %s -> %s(%s)" % (value, func, arg))
typeName = self.stripClassTag(str(value.type))
if typeName.find(":") >= 0:
typeName = "'" + typeName + "'"
# 'class' is needed, see http://sourceware.org/bugzilla/show_bug.cgi?id=11912
#exp = "((class %s*)%s)->%s(%s)" % (typeName, value.address, func, arg)
ptr = value.address if value.address else self.pokeValue(value)
exp = "((%s*)%s)->%s(%s)" % (typeName, ptr, func, arg)
#warn("CALL: %s" % exp)
result = gdb.parse_and_eval(exp)
#warn(" -> %s" % result)
if not value.address:
gdb.parse_and_eval("free(0x%x)" % ptr)
return result
def childWithName(self, value, name):
try:
return value[name]
except:
return None
def isBadPointer(self, value):
try:
target = value.dereference()
target.is_optimized_out # Access test.
return False
except:
return True
def makeValue(self, typeobj, init):
typename = "::" + self.stripClassTag(str(typeobj));
# Avoid malloc symbol clash with QVector.
gdb.execute("set $d = (%s*)calloc(sizeof(%s), 1)" % (typename, typename))
gdb.execute("set *$d = {%s}" % init)
value = gdb.parse_and_eval("$d").dereference()
#warn(" TYPE: %s" % value.type)
#warn(" ADDR: %s" % value.address)
#warn(" VALUE: %s" % value)
return value
def makeExpression(self, value):
typename = "::" + self.stripClassTag(str(value.type))
#warn(" TYPE: %s" % typename)
#exp = "(*(%s*)(&%s))" % (typename, value.address)
exp = "(*(%s*)(%s))" % (typename, value.address)
#warn(" EXP: %s" % exp)
return exp
def makeStdString(init):
# Works only for small allocators, but they are usually empty.
gdb.execute("set $d=(std::string*)calloc(sizeof(std::string), 2)");
gdb.execute("call($d->basic_string(\"" + init +
"\",*(std::allocator<char>*)(1+$d)))")
value = gdb.parse_and_eval("$d").dereference()
#warn(" TYPE: %s" % value.type)
#warn(" ADDR: %s" % value.address)
#warn(" VALUE: %s" % value)
return value
def childAt(self, value, index):
field = value.type.fields()[index]
try:
# Official access in GDB 7.6 or later.
return value[field]
except:
pass
try:
# Won't work with anon entities, tradionally with empty
# field name, but starting with GDB 7.7 commit b5b08fb4
# with None field name.
return value[field.name]
except:
pass
# FIXME: Cheat. There seems to be no official way to access
# the real item, so we pass back the value. That at least
# enables later ...["name"] style accesses as gdb handles
# them transparently.
return value
def fieldAt(self, typeobj, index):
return typeobj.fields()[index]
def simpleValue(self, value):
return str(value)
def directBaseClass(self, typeobj, index = 0):
for f in typeobj.fields():
if f.is_base_class:
if index == 0:
return f.type
index -= 1;
return None
def directBaseObject(self, value, index = 0):
for f in value.type.fields():
if f.is_base_class:
if index == 0:
return value.cast(f.type)
index -= 1;
return None
def checkPointer(self, p, align = 1):
if not self.isNull(p):
p.dereference()
def pointerValue(self, p):
return toInteger(p)
def isNull(self, p):
# The following can cause evaluation to abort with "UnicodeEncodeError"
# for invalid char *, as their "contents" is being examined
#s = str(p)
#return s == "0x0" or s.startswith("0x0 ")
#try:
# # Can fail with: "RuntimeError: Cannot access memory at address 0x5"
# return p.cast(self.lookupType("void").pointer()) == 0
#except:
# return False
try:
# Can fail with: "RuntimeError: Cannot access memory at address 0x5"
return toInteger(p) == 0
except:
return False
def templateArgument(self, typeobj, position):
try:
# This fails on stock 7.2 with
# "RuntimeError: No type named myns::QObject.\n"
return typeobj.template_argument(position)
except:
# That's something like "myns::QList<...>"
return self.lookupType(self.extractTemplateArgument(str(typeobj.strip_typedefs()), position))
def numericTemplateArgument(self, typeobj, position):
# Workaround for gdb < 7.1
try:
return int(typeobj.template_argument(position))
except RuntimeError as error:
# ": No type named 30."
msg = str(error)
msg = msg[14:-1]
# gdb at least until 7.4 produces for std::array<int, 4u>
# for template_argument(1): RuntimeError: No type named 4u.
if msg[-1] == 'u':
msg = msg[0:-1]
return int(msg)
def intType(self):
self.cachedIntType = self.lookupType('int')
self.intType = lambda: self.cachedIntType
return self.cachedIntType
def charType(self):
return self.lookupType('char')
def sizetType(self):
return self.lookupType('size_t')
def charPtrType(self):
return self.lookupType('char*')
def voidPtrType(self):
return self.lookupType('void*')
def addressOf(self, value):
return toInteger(value.address)
def createPointerValue(self, address, pointeeType):
# This might not always work:
# a Python 3 based GDB due to the bug addressed in
# https://sourceware.org/ml/gdb-patches/2013-09/msg00571.html
try:
return gdb.Value(address).cast(pointeeType.pointer())
except:
# Try _some_ fallback (good enough for the std::complex dumper)
return gdb.parse_and_eval("(%s*)%s" % (pointeeType, address))
def intSize(self):
return 4
def ptrSize(self):
self.cachedPtrSize = self.lookupType('void*').sizeof
self.ptrSize = lambda: self.cachedPtrSize
return self.cachedPtrSize
def pokeValue(self, value):
"""
Allocates inferior memory and copies the contents of value.
Returns a pointer to the copy.
"""
# Avoid malloc symbol clash with QVector
size = value.type.sizeof
data = value.cast(gdb.lookup_type("unsigned char").array(0, int(size - 1)))
string = ''.join("\\x%02x" % int(data[i]) for i in range(size))
exp = '(%s*)memcpy(calloc(%s, 1), "%s", %s)' % (value.type, size, string, size)
#warn("EXP: %s" % exp)
return toInteger(gdb.parse_and_eval(exp))
def createValue(self, address, referencedType):
try:
return gdb.Value(address).cast(referencedType.pointer()).dereference()
except:
# Try _some_ fallback (good enough for the std::complex dumper)
return gdb.parse_and_eval("{%s}%s" % (referencedType, address))
def setValue(self, address, typename, value):
cmd = "set {%s}%s=%s" % (typename, address, value)
gdb.execute(cmd)
def setValues(self, address, typename, values):
cmd = "set {%s[%s]}%s={%s}" \
% (typename, len(values), address, ','.join(map(str, values)))
gdb.execute(cmd)
def selectedInferior(self):
try:
# gdb.Inferior is new in gdb 7.2
self.cachedInferior = gdb.selected_inferior()
except:
# Pre gdb 7.4. Right now we don't have more than one inferior anyway.
self.cachedInferior = gdb.inferiors()[0]
# Memoize result.
self.selectedInferior = lambda: self.cachedInferior
return self.cachedInferior
def readRawMemory(self, addr, size):
mem = self.selectedInferior().read_memory(addr, size)
if sys.version_info[0] >= 3:
mem.tobytes()
return mem
def extractInt64(self, addr):
return struct.unpack("q", self.readRawMemory(addr, 8))[0]
def extractUInt64(self, addr):
return struct.unpack("Q", self.readRawMemory(addr, 8))[0]
def extractInt(self, addr):
return struct.unpack("i", self.readRawMemory(addr, 4))[0]
def extractUInt(self, addr):
return struct.unpack("I", self.readRawMemory(addr, 4))[0]
def extractShort(self, addr):
return struct.unpack("h", self.readRawMemory(addr, 2))[0]
def extractUShort(self, addr):
return struct.unpack("H", self.readRawMemory(addr, 2))[0]
def extractByte(self, addr):
return struct.unpack("b", self.readRawMemory(addr, 1))[0]
def findStaticMetaObject(self, typename):
return self.findSymbol(typename + "::staticMetaObject")
def findSymbol(self, symbolName):
try:
result = gdb.lookup_global_symbol(symbolName)
return result.value() if result else 0
except:
pass
# Older GDB ~7.4
try:
address = gdb.parse_and_eval("&'%s'" % symbolName)
typeobj = gdb.lookup_type(self.qtNamespace() + "QMetaObject")
return self.createPointerValue(address, typeobj)
except:
return 0
def put(self, value):
self.output.append(value)
def childRange(self):
if self.currentMaxNumChild is None:
return xrange(0, toInteger(self.currentNumChild))
return xrange(min(toInteger(self.currentMaxNumChild), toInteger(self.currentNumChild)))
def isArmArchitecture(self):
return 'arm' in gdb.TARGET_CONFIG.lower()
def isQnxTarget(self):
return 'qnx' in gdb.TARGET_CONFIG.lower()
def isWindowsTarget(self):
# We get i686-w64-mingw32
return 'mingw' in gdb.TARGET_CONFIG.lower()
def qtVersionString(self):
try:
return str(gdb.lookup_symbol("qVersion")[0].value()())
except:
pass
try:
ns = self.qtNamespace()
return str(gdb.parse_and_eval("((const char*(*)())'%sqVersion')()" % ns))
except:
pass
return None
def qtVersion(self):
try:
version = self.qtVersionString()
(major, minor, patch) = version[version.find('"')+1:version.rfind('"')].split('.')
qtversion = 0x10000 * int(major) + 0x100 * int(minor) + int(patch)
self.qtVersion = lambda: qtversion
return qtversion
except:
# Use fallback until we have a better answer.
return self.fallbackQtVersion
def isQt3Support(self):
if self.qtVersion() >= 0x050000:
return False
else:
try:
# This will fail on Qt 4 without Qt 3 support
gdb.execute("ptype QChar::null", to_string=True)
self.cachedIsQt3Suport = True
except:
self.cachedIsQt3Suport = False
# Memoize good results.
self.isQt3Support = lambda: self.cachedIsQt3Suport
return self.cachedIsQt3Suport
def putAddress(self, addr):
if self.currentPrintsAddress and not self.isCli:
try:
# addr can be "None", int(None) fails.
#self.put('addr="0x%x",' % int(addr))
self.currentAddress = 'addr="0x%x",' % toInteger(addr)
except:
pass
def putSimpleValue(self, value, encoding = None, priority = 0):
self.putValue(value, encoding, priority)
def putPointerValue(self, value):
# Use a lower priority
if value is None:
self.putEmptyValue(-1)
else:
self.putValue("0x%x" % value.cast(
self.lookupType("unsigned long")), None, -1)
def stripNamespaceFromType(self, typeName):
typename = self.stripClassTag(typeName)
ns = self.qtNamespace()
if len(ns) > 0 and typename.startswith(ns):
typename = typename[len(ns):]
pos = typename.find("<")
# FIXME: make it recognize foo<A>::bar<B>::iterator?
while pos != -1:
pos1 = typename.rfind(">", pos)
typename = typename[0:pos] + typename[pos1+1:]
pos = typename.find("<")
return typename
def isMovableType(self, typeobj):
if typeobj.code == PointerCode:
return True
if self.isSimpleType(typeobj):
return True
return self.isKnownMovableType(self.stripNamespaceFromType(str(typeobj)))
def putSubItem(self, component, value, tryDynamic=True):
with SubItem(self, component):
self.putItem(value, tryDynamic)
def isSimpleType(self, typeobj):
code = typeobj.code
return code == BoolCode \
or code == CharCode \
or code == IntCode \
or code == FloatCode \
or code == EnumCode
def simpleEncoding(self, typeobj):
code = typeobj.code
if code == BoolCode or code == CharCode:
return Hex2EncodedInt1
if code == IntCode:
if str(typeobj).find("unsigned") >= 0:
if typeobj.sizeof == 1:
return Hex2EncodedUInt1
if typeobj.sizeof == 2:
return Hex2EncodedUInt2
if typeobj.sizeof == 4:
return Hex2EncodedUInt4
if typeobj.sizeof == 8:
return Hex2EncodedUInt8
else:
if typeobj.sizeof == 1:
return Hex2EncodedInt1
if typeobj.sizeof == 2:
return Hex2EncodedInt2
if typeobj.sizeof == 4:
return Hex2EncodedInt4
if typeobj.sizeof == 8:
return Hex2EncodedInt8
if code == FloatCode:
if typeobj.sizeof == 4:
return Hex2EncodedFloat4
if typeobj.sizeof == 8:
return Hex2EncodedFloat8
return None
def isReferenceType(self, typeobj):
return typeobj.code == gdb.TYPE_CODE_REF
def isStructType(self, typeobj):
return typeobj.code == gdb.TYPE_CODE_STRUCT
def isFunctionType(self, typeobj):
return typeobj.code == MethodCode or typeobj.code == FunctionCode
def putItem(self, value, tryDynamic=True):
if value is None:
# Happens for non-available watchers in gdb versions that
# need to use gdb.execute instead of gdb.parse_and_eval
self.putSpecialValue(SpecialNotAvailableValue)
self.putType("<unknown>")
self.putNumChild(0)
return
typeobj = value.type.unqualified()
typeName = str(typeobj)
if value.is_optimized_out:
self.putSpecialValue(SpecialOptimizedOutValue)
self.putType(typeName)
self.putNumChild(0)
return
tryDynamic &= self.useDynamicType
self.addToCache(typeobj) # Fill type cache
if tryDynamic:
self.putAddress(value.address)
# FIXME: Gui shows references stripped?
#warn(" ")
#warn("REAL INAME: %s" % self.currentIName)
#warn("REAL TYPE: %s" % value.type)
#warn("REAL CODE: %s" % value.type.code)
#warn("REAL VALUE: %s" % value)
if typeobj.code == ReferenceCode:
try:
# Try to recognize null references explicitly.
if toInteger(value.address) == 0:
self.putSpecialValue(SpecialNullReferenceValue)
self.putType(typeName)
self.putNumChild(0)
return
except:
pass
if tryDynamic:
try:
# Dynamic references are not supported by gdb, see
# http://sourceware.org/bugzilla/show_bug.cgi?id=14077.
# Find the dynamic type manually using referenced_type.
value = value.referenced_value()
value = value.cast(value.dynamic_type)
self.putItem(value)
self.putBetterType("%s &" % value.type)
return
except:
pass
try:
# FIXME: This throws "RuntimeError: Attempt to dereference a
# generic pointer." with MinGW's gcc 4.5 when it "identifies"
# a "QWidget &" as "void &" and with optimized out code.
self.putItem(value.cast(typeobj.target().unqualified()))
self.putBetterType("%s &" % self.currentType.value)
return
except RuntimeError:
self.putSpecialValue(SpecialOptimizedOutValue)
self.putType(typeName)
self.putNumChild(0)
return
if typeobj.code == IntCode or typeobj.code == CharCode:
self.putType(typeName)
if typeobj.sizeof == 1:
# Force unadorned value transport for char and Co.
self.putValue(int(value) & 0xff)
else:
self.putValue(value)
self.putNumChild(0)
return
if typeobj.code == FloatCode or typeobj.code == BoolCode:
self.putType(typeName)
self.putValue(value)
self.putNumChild(0)
return
if typeobj.code == EnumCode:
self.putType(typeName)
self.putValue("%s (%d)" % (value, value))
self.putNumChild(0)
return
if typeobj.code == ComplexCode:
self.putType(typeName)
self.putValue("%s" % value)
self.putNumChild(0)
return
if typeobj.code == TypedefCode:
if typeName in self.qqDumpers:
self.putType(typeName)
self.qqDumpers[typeName](self, value)
return
typeobj = stripTypedefs(typeobj)
# The cast can destroy the address?
#self.putAddress(value.address)
# Workaround for http://sourceware.org/bugzilla/show_bug.cgi?id=13380
if typeobj.code == ArrayCode:
value = self.parseAndEvaluate("{%s}%s" % (typeobj, value.address))
else:
try:
value = value.cast(typeobj)
except:
self.putValue("<optimized out typedef>")
self.putType(typeName)
self.putNumChild(0)
return
self.putItem(value)
self.putBetterType(typeName)
return
if typeobj.code == ArrayCode:
self.putCStyleArray(value)
return
if typeobj.code == PointerCode:
# This could still be stored in a register and
# potentially dereferencable.
self.putFormattedPointer(value)
return
if typeobj.code == MethodPointerCode \
or typeobj.code == MethodCode \
or typeobj.code == FunctionCode \
or typeobj.code == MemberPointerCode:
self.putType(typeName)
self.putValue(value)
self.putNumChild(0)
return
if typeName.startswith("<anon"):
# Anonymous union. We need a dummy name to distinguish
# multiple anonymous unions in the struct.
self.putType(typeobj)
self.putSpecialValue(SpecialEmptyStructureValue)
self.anonNumber += 1
with Children(self, 1):
self.listAnonymous(value, "#%d" % self.anonNumber, typeobj)
return
if typeobj.code == StringCode:
# FORTRAN strings
size = typeobj.sizeof
data = self.readMemory(value.address, size)
self.putValue(data, Hex2EncodedLatin1, 1)
self.putType(typeobj)
if typeobj.code != StructCode and typeobj.code != UnionCode:
warn("WRONG ASSUMPTION HERE: %s " % typeobj.code)
self.check(False)
if tryDynamic:
self.putItem(self.expensiveDowncast(value), False)
return
if self.tryPutPrettyItem(typeName, value):
return
# D arrays, gdc compiled.
if typeName.endswith("[]"):
n = value["length"]
base = value["ptr"]
self.putType(typeName)
self.putItemCount(n)
if self.isExpanded():
self.putArrayData(base.type.target(), base, n)
return
#warn("GENERIC STRUCT: %s" % typeobj)
#warn("INAME: %s " % self.currentIName)
#warn("INAMES: %s " % self.expandedINames)
#warn("EXPANDED: %s " % (self.currentIName in self.expandedINames))
staticMetaObject = self.extractStaticMetaObject(value.type)
if staticMetaObject:
self.putQObjectNameValue(value)
self.putType(typeName)
self.putEmptyValue()
self.putNumChild(len(typeobj.fields()))
if self.currentIName in self.expandedINames:
innerType = None
with Children(self, 1, childType=innerType):
self.putFields(value)
if staticMetaObject:
self.putQObjectGuts(value, staticMetaObject)
def toBlob(self, value):
size = toInteger(value.type.sizeof)
if value.address:
return self.extractBlob(value.address, size)
# No address. Possibly the result of an inferior call.
y = value.cast(gdb.lookup_type("unsigned char").array(0, int(size - 1)))
buf = bytearray(struct.pack('x' * size))
for i in range(size):
buf[i] = int(y[i])
return Blob(bytes(buf))
def extractBlob(self, base, size):
inferior = self.selectedInferior()
return Blob(inferior.read_memory(base, size))
def readCString(self, base):
inferior = self.selectedInferior()
mem = ""
while True:
char = inferior.read_memory(base, 1)[0]
if not char:
break
mem += char
base += 1
#if sys.version_info[0] >= 3:
# return mem.tobytes()
return mem
def putFields(self, value, dumpBase = True):
fields = value.type.fields()
if self.sortStructMembers:
def sortOrder(field):
if field.is_base_class:
return 0
if field.name and field.name.startswith("_vptr."):
return 1
return 2
fields.sort(key = lambda field: "%d%s" % (sortOrder(field), field.name))
#warn("TYPE: %s" % value.type)
#warn("FIELDS: %s" % fields)
baseNumber = 0
for field in fields:
#warn("FIELD: %s" % field)
#warn(" BITSIZE: %s" % field.bitsize)
#warn(" ARTIFICIAL: %s" % field.artificial)
# Since GDB commit b5b08fb4 anonymous structs get also reported
# with a 'None' name.
if field.name is None:
if value.type.code == ArrayCode:
# An array.
typeobj = stripTypedefs(value.type)
innerType = typeobj.target()
p = value.cast(innerType.pointer())
for i in xrange(int(typeobj.sizeof / innerType.sizeof)):
with SubItem(self, i):
self.putItem(p.dereference())
p = p + 1
else:
# Something without a name.
self.anonNumber += 1
with SubItem(self, str(self.anonNumber)):
self.putItem(value[field])
continue
# Ignore vtable pointers for virtual inheritance.
if field.name.startswith("_vptr."):
with SubItem(self, "[vptr]"):
# int (**)(void)
n = 100
self.putType(" ")
self.putValue(value[field.name])
self.putNumChild(n)
if self.isExpanded():
with Children(self):
p = value[field.name]
for i in xrange(n):
if toInteger(p.dereference()) != 0:
with SubItem(self, i):
self.putItem(p.dereference())
self.putType(" ")
p = p + 1
continue
#warn("FIELD NAME: %s" % field.name)
#warn("FIELD TYPE: %s" % field.type)
if field.is_base_class:
# Field is base type. We cannot use field.name as part
# of the iname as it might contain spaces and other
# strange characters.
if dumpBase:
baseNumber += 1
with UnnamedSubItem(self, "@%d" % baseNumber):
baseValue = value.cast(field.type)
self.putBaseClassName(field.name)
self.putAddress(baseValue.address)
self.putItem(baseValue, False)
elif len(field.name) == 0:
# Anonymous union. We need a dummy name to distinguish
# multiple anonymous unions in the struct.
self.anonNumber += 1
self.listAnonymous(value, "#%d" % self.anonNumber,
field.type)
else:
# Named field.
with SubItem(self, field.name):
#bitsize = getattr(field, "bitsize", None)
#if not bitsize is None:
# self.put("bitsize=\"%s\"" % bitsize)
self.putItem(self.downcast(value[field.name]))
def putBaseClassName(self, name):
self.put('iname="%s",' % self.currentIName)
self.put('name="[%s]",' % name)
def listAnonymous(self, value, name, typeobj):
for field in typeobj.fields():
#warn("FIELD NAME: %s" % field.name)
if field.name:
with SubItem(self, field.name):
self.putItem(value[field.name])
else:
# Further nested.
self.anonNumber += 1
name = "#%d" % self.anonNumber
#iname = "%s.%s" % (selitem.iname, name)
#child = SameItem(item.value, iname)
with SubItem(self, name):
self.put('name="%s",' % name)
self.putEmptyValue()
fieldTypeName = str(field.type)
if fieldTypeName.endswith("<anonymous union>"):
self.putType("<anonymous union>")
elif fieldTypeName.endswith("<anonymous struct>"):
self.putType("<anonymous struct>")
else:
self.putType(fieldTypeName)
with Children(self, 1):
self.listAnonymous(value, name, field.type)
#def threadname(self, maximalStackDepth, objectPrivateType):
# e = gdb.selected_frame()
# out = ""
# ns = self.qtNamespace()
# while True:
# maximalStackDepth -= 1
# if maximalStackDepth < 0:
# break
# e = e.older()
# if e == None or e.name() == None:
# break
# if e.name() == ns + "QThreadPrivate::start" \
# or e.name() == "_ZN14QThreadPrivate5startEPv@4":
# try:
# thrptr = e.read_var("thr").dereference()
# d_ptr = thrptr["d_ptr"]["d"].cast(objectPrivateType).dereference()
# try:
# objectName = d_ptr["objectName"]
# except: # Qt 5
# p = d_ptr["extraData"]
# if not self.isNull(p):
# objectName = p.dereference()["objectName"]
# if not objectName is None:
# data, size, alloc = self.stringData(objectName)
# if size > 0:
# s = self.readMemory(data, 2 * size)
#
# thread = gdb.selected_thread()
# inner = '{valueencoded="';
# inner += str(Hex4EncodedLittleEndianWithoutQuotes)+'",id="'
# inner += str(thread.num) + '",value="'
# inner += s
# #inner += self.encodeString(objectName)
# inner += '"},'
#
# out += inner
# except:
# pass
# return out
def threadnames(self, maximalStackDepth):
# FIXME: This needs a proper implementation for MinGW, and only there.
# Linux, Mac and QNX mirror the objectName() to the underlying threads,
# so we get the names already as part of the -thread-info output.
return '[]'
#out = '['
#oldthread = gdb.selected_thread()
#if oldthread:
# try:
# objectPrivateType = gdb.lookup_type(ns + "QObjectPrivate").pointer()
# inferior = self.selectedInferior()
# for thread in inferior.threads():
# thread.switch()
# out += self.threadname(maximalStackDepth, objectPrivateType)
# except:
# pass
# oldthread.switch()
#return out + ']'
def importPlainDumper(self, printer):
name = printer.name.replace("::", "__")
self.qqDumpers[name] = PlainDumper(printer)
self.qqFormats[name] = ""
def importPlainDumpers(self):
for obj in gdb.objfiles():
for printers in obj.pretty_printers + gdb.pretty_printers:
for printer in printers.subprinters:
self.importPlainDumper(printer)
def qtNamespace(self):
if not self.currentQtNamespaceGuess is None:
return self.currentQtNamespaceGuess
# This only works when called from a valid frame.
try:
cand = "QArrayData::shared_null"
symbol = gdb.lookup_symbol(cand)[0]
if symbol:
ns = symbol.name[:-len(cand)]
self.qtNamespaceToReport = ns
self.qtNamespace = lambda: ns
return ns
except:
pass
try:
# This is Qt, but not 5.x.
cand = "QByteArray::shared_null"
symbol = gdb.lookup_symbol(cand)[0]
if symbol:
ns = symbol.name[:-len(cand)]
self.qtNamespaceToReport = ns
self.qtNamespace = lambda: ns
self.fallbackQtVersion = 0x40800
return ns
except:
pass
try:
# Last fall backs.
s = gdb.execute("ptype QByteArray", to_string=True)
if s.find("QMemArray") >= 0:
# Qt 3.
self.qtNamespaceToReport = ""
self.qtNamespace = lambda: ""
self.qtVersion = lambda: 0x30308
self.fallbackQtVersion = 0x30308
return ""
# Seemingly needed with Debian's GDB 7.4.1
ns = s[s.find("class")+6:s.find("QByteArray")]
if len(ns):
self.qtNamespaceToReport = ns
self.qtNamespace = lambda: ns
return ns
except:
pass
self.currentQtNamespaceGuess = ""
return ""
def assignValue(self, args):
typeName = self.hexdecode(args['type'])
expr = self.hexdecode(args['expr'])
value = self.hexdecode(args['value'])
simpleType = int(args['simpleType'])
ns = self.qtNamespace()
if typeName.startswith(ns):
typeName = typeName[len(ns):]
typeName = typeName.replace("::", "__")
pos = typeName.find('<')
if pos != -1:
typeName = typeName[0:pos]
if typeName in self.qqEditable and not simpleType:
#self.qqEditable[typeName](self, expr, value)
expr = gdb.parse_and_eval(expr)
self.qqEditable[typeName](self, expr, value)
else:
cmd = "set variable (%s)=%s" % (expr, value)
gdb.execute(cmd)
def hasVTable(self, typeobj):
fields = typeobj.fields()
if len(fields) == 0:
return False
if fields[0].is_base_class:
return hasVTable(fields[0].type)
return str(fields[0].type) == "int (**)(void)"
def dynamicTypeName(self, value):
if self.hasVTable(value.type):
#vtbl = str(gdb.parse_and_eval("{int(*)(int)}%s" % int(value.address)))
try:
# Fails on 7.1 due to the missing to_string.
vtbl = gdb.execute("info symbol {int*}%s" % int(value.address),
to_string = True)
pos1 = vtbl.find("vtable ")
if pos1 != -1:
pos1 += 11
pos2 = vtbl.find(" +", pos1)
if pos2 != -1:
return vtbl[pos1 : pos2]
except:
pass
return str(value.type)
def downcast(self, value):
try:
return value.cast(value.dynamic_type)
except:
pass
#try:
# return value.cast(self.lookupType(self.dynamicTypeName(value)))
#except:
# pass
return value
def expensiveDowncast(self, value):
try:
return value.cast(value.dynamic_type)
except:
pass
try:
return value.cast(self.lookupType(self.dynamicTypeName(value)))
except:
pass
return value
def addToCache(self, typeobj):
typename = str(typeobj)
if typename in self.typesReported:
return
self.typesReported[typename] = True
self.typesToReport[typename] = typeobj
def enumExpression(self, enumType, enumValue):
return self.qtNamespace() + "Qt::" + enumValue
def lookupType(self, typestring):
typeobj = self.typeCache.get(typestring)
#warn("LOOKUP 1: %s -> %s" % (typestring, typeobj))
if not typeobj is None:
return typeobj
if typestring == "void":
typeobj = gdb.lookup_type(typestring)
self.typeCache[typestring] = typeobj
self.typesToReport[typestring] = typeobj
return typeobj
#try:
# typeobj = gdb.parse_and_eval("{%s}&main" % typestring).typeobj
# if not typeobj is None:
# self.typeCache[typestring] = typeobj
# self.typesToReport[typestring] = typeobj
# return typeobj
#except:
# pass
# See http://sourceware.org/bugzilla/show_bug.cgi?id=13269
# gcc produces "{anonymous}", gdb "(anonymous namespace)"
# "<unnamed>" has been seen too. The only thing gdb
# understands when reading things back is "(anonymous namespace)"
if typestring.find("{anonymous}") != -1:
ts = typestring
ts = ts.replace("{anonymous}", "(anonymous namespace)")
typeobj = self.lookupType(ts)
if not typeobj is None:
self.typeCache[typestring] = typeobj
self.typesToReport[typestring] = typeobj
return typeobj
#warn(" RESULT FOR 7.2: '%s': %s" % (typestring, typeobj))
# This part should only trigger for
# gdb 7.1 for types with namespace separators.
# And anonymous namespaces.
ts = typestring
while True:
#warn("TS: '%s'" % ts)
if ts.startswith("class "):
ts = ts[6:]
elif ts.startswith("struct "):
ts = ts[7:]
elif ts.startswith("const "):
ts = ts[6:]
elif ts.startswith("volatile "):
ts = ts[9:]
elif ts.startswith("enum "):
ts = ts[5:]
elif ts.endswith(" const"):
ts = ts[:-6]
elif ts.endswith(" volatile"):
ts = ts[:-9]
elif ts.endswith("*const"):
ts = ts[:-5]
elif ts.endswith("*volatile"):
ts = ts[:-8]
else:
break
if ts.endswith('*'):
typeobj = self.lookupType(ts[0:-1])
if not typeobj is None:
typeobj = typeobj.pointer()
self.typeCache[typestring] = typeobj
self.typesToReport[typestring] = typeobj
return typeobj
try:
#warn("LOOKING UP '%s'" % ts)
typeobj = gdb.lookup_type(ts)
except RuntimeError as error:
#warn("LOOKING UP '%s': %s" % (ts, error))
# See http://sourceware.org/bugzilla/show_bug.cgi?id=11912
exp = "(class '%s'*)0" % ts
try:
typeobj = self.parseAndEvaluate(exp).type.target()
except:
# Can throw "RuntimeError: No type named class Foo."
pass
except:
#warn("LOOKING UP '%s' FAILED" % ts)
pass
if not typeobj is None:
self.typeCache[typestring] = typeobj
self.typesToReport[typestring] = typeobj
return typeobj
# This could still be None as gdb.lookup_type("char[3]") generates
# "RuntimeError: No type named char[3]"
self.typeCache[typestring] = typeobj
self.typesToReport[typestring] = typeobj
return typeobj
def stackListFrames(self, args):
def fromNativePath(str):
return str.replace('\\', '/')
limit = int(args['limit'])
if limit <= 0:
limit = 10000
options = args['options']
opts = {}
if options == "nativemixed":
opts["nativemixed"] = 1
self.prepare(opts)
self.output = []
frame = gdb.newest_frame()
i = 0
self.currentCallContext = None
while i < limit and frame:
with OutputSafer(self):
name = frame.name()
functionName = "??" if name is None else name
fileName = ""
objfile = ""
fullName = ""
pc = frame.pc()
sal = frame.find_sal()
line = -1
if sal:
line = sal.line
symtab = sal.symtab
if not symtab is None:
objfile = fromNativePath(symtab.objfile.filename)
fileName = fromNativePath(symtab.filename)
fullName = symtab.fullname()
if fullName is None:
fullName = ""
else:
fullName = fromNativePath(fullName)
if self.nativeMixed:
if self.isReportableQmlFrame(functionName):
engine = frame.read_var("engine")
h = self.extractQmlLocation(engine)
self.put(('frame={level="%s",func="%s",file="%s",'
'fullname="%s",line="%s",language="js",addr="0x%x"}')
% (i, h['functionName'], h['fileName'], h['fileName'],
h['lineNumber'], h['context']))
i += 1
frame = frame.older()
continue
if self.isInternalQmlFrame(functionName):
frame = frame.older()
self.put(('frame={level="%s",addr="0x%x",func="%s",'
'file="%s",fullname="%s",line="%s",'
'from="%s",language="c",usable="0"}') %
(i, pc, functionName, fileName, fullName, line, objfile))
i += 1
frame = frame.older()
continue
self.put(('frame={level="%s",addr="0x%x",func="%s",'
'file="%s",fullname="%s",line="%s",'
'from="%s",language="c"}') %
(i, pc, functionName, fileName, fullName, line, objfile))
frame = frame.older()
i += 1
safePrint(''.join(self.output))
def createResolvePendingBreakpointsHookBreakpoint(self, args):
class Resolver(gdb.Breakpoint):
def __init__(self, dumper, args):
self.dumper = dumper
self.args = args
spec = "qt_v4ResolvePendingBreakpointsHook"
print("Preparing hook to resolve pending QML breakpoint at %s" % args)
super(Resolver, self).\
__init__(spec, gdb.BP_BREAKPOINT, internal=True, temporary=False)
def stop(self):
bp = self.dumper.doInsertQmlBreakpoint(args)
print("Resolving QML breakpoint %s -> %s" % (args, bp))
self.enabled = False
return False
self.qmlBreakpoints.append(Resolver(self, args))
def exitGdb(self, _):
gdb.execute("quit")
def loadDumpers(self, args):
self.setupDumpers()
def reportDumpers(self, msg):
print(msg)
def profile1(self, args):
"""Internal profiling"""
import tempfile
import cProfile
tempDir = tempfile.gettempdir() + "/bbprof"
cProfile.run('theDumper.showData(%s)' % args, tempDir)
import pstats
pstats.Stats(tempDir).sort_stats('time').print_stats()
def profile2(self, args):
import timeit
print(timeit.repeat('theDumper.showData(%s)' % args,
'from __main__ import theDumper', number=10))
class CliDumper(Dumper):
def __init__(self):
Dumper.__init__(self)
self.childrenPrefix = '['
self.chidrenSuffix = '] '
self.indent = 0
self.isCli = True
def reportDumpers(self, msg):
return msg
def enterSubItem(self, item):
if not item.iname:
item.iname = "%s.%s" % (self.currentIName, item.name)
self.indent += 1
self.putNewline()
if isinstance(item.name, str):
self.output += item.name + ' = '
item.savedIName = self.currentIName
item.savedValue = self.currentValue
item.savedType = self.currentType
item.savedCurrentAddress = self.currentAddress
self.currentIName = item.iname
self.currentValue = ReportItem();
self.currentType = ReportItem();
self.currentAddress = None
def exitSubItem(self, item, exType, exValue, exTraceBack):
self.indent -= 1
#warn("CURRENT VALUE: %s: %s %s" %
# (self.currentIName, self.currentValue, self.currentType))
if not exType is None:
if self.passExceptions:
showException("SUBITEM", exType, exValue, exTraceBack)
self.putNumChild(0)
self.putSpecialValue(SpecialNotAccessibleValue)
try:
if self.currentType.value:
typeName = self.stripClassTag(self.currentType.value)
self.put('<%s> = {' % typeName)
if self.currentValue.value is None:
self.put('<not accessible>')
else:
value = self.currentValue.value
if self.currentValue.encoding is Hex2EncodedLatin1:
value = self.hexdecode(value)
elif self.currentValue.encoding is Hex2EncodedUtf8:
value = self.hexdecode(value)
elif self.currentValue.encoding is Hex4EncodedLittleEndian:
b = bytes.fromhex(value)
value = codecs.decode(b, 'utf-16')
self.put('"%s"' % value)
if self.currentValue.elided:
self.put('...')
if self.currentType.value:
self.put('}')
except:
pass
if not self.currentAddress is None:
self.put(self.currentAddress)
self.currentIName = item.savedIName
self.currentValue = item.savedValue
self.currentType = item.savedType
self.currentAddress = item.savedCurrentAddress
return True
def putNewline(self):
self.output += '\n' + ' ' * self.indent
def put(self, line):
if self.output.endswith('\n'):
self.output = self.output[0:-1]
self.output += line
def putNumChild(self, numchild):
pass
def putBaseClassName(self, name):
pass
def putOriginalAddress(self, value):
pass
def putAddressRange(self, base, step):
return True
def showData(self, args):
args['fancy'] = 1
args['passException'] = 1
args['autoderef'] = 1
name = args['varlist']
self.prepare(args)
self.output = name + ' = '
frame = gdb.selected_frame()
value = frame.read_var(name)
with TopLevelItem(self, name):
self.putItem(value)
return self.output
# Global instance.
if gdb.parameter('height') is None:
theDumper = Dumper()
else:
import codecs
theDumper = CliDumper()
######################################################################
#
# ThreadNames Command
#
#######################################################################
def threadnames(arg):
return theDumper.threadnames(int(arg))
registerCommand("threadnames", threadnames)
#######################################################################
#
# Native Mixed
#
#######################################################################
#class QmlEngineCreationTracker(gdb.Breakpoint):
# def __init__(self):
# spec = "QQmlEnginePrivate::init"
# super(QmlEngineCreationTracker, self).\
# __init__(spec, gdb.BP_BREAKPOINT, internal=True)
#
# def stop(self):
# engine = gdb.parse_and_eval("q_ptr")
# print("QML engine created: %s" % engine)
# theDumper.qmlEngines.append(engine)
# return False
#
#QmlEngineCreationTracker()
class TriggeredBreakpointHookBreakpoint(gdb.Breakpoint):
def __init__(self):
spec = "qt_v4TriggeredBreakpointHook"
super(TriggeredBreakpointHookBreakpoint, self).\
__init__(spec, gdb.BP_BREAKPOINT, internal=True)
def stop(self):
print("QML engine stopped.")
return True
TriggeredBreakpointHookBreakpoint()
| lgpl-2.1 | 4,238,433,531,481,384,400 | 34.328782 | 105 | 0.519471 | false | 4.142885 | false | false | false |
ClearCorp/server-tools | external_file_location/models/task.py | 1 | 8567 | # coding: utf-8
# @ 2015 Valentin CHEMIERE @ Akretion
# © @author Mourad EL HADJ MIMOUNE <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
import openerp
from openerp import tools
from base64 import b64encode
import os
import datetime
import logging
_logger = logging.getLogger(__name__)
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
variable_start_string="${",
variable_end_string="}",
line_statement_prefix="%",
trim_blocks=True, # do not output newline after blocks
)
mako_template_env.globals.update({
'str': str,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class Task(models.Model):
_name = 'external.file.task'
_description = 'External file task'
name = fields.Char(required=True)
method_type = fields.Selection(
[('import', 'Import'), ('export', 'Export')],
required=True)
filename = fields.Char(help='File name which is imported.'
'You can use file pattern like *.txt'
'to import all txt files')
filepath = fields.Char(help='Path to imported/exported file')
location_id = fields.Many2one('external.file.location', string='Location',
required=True)
attachment_ids = fields.One2many('ir.attachment.metadata', 'task_id',
string='Attachment')
move_path = fields.Char(string='Move Path',
help='Imported File will be moved to this path')
new_name = fields.Char(string='New Name',
help='Imported File will be renamed to this name'
'Name can use mako template where obj is an '
'ir_attachement. template exemple : '
' ${obj.name}-${obj.create_date}.csv')
md5_check = fields.Boolean(help='Control file integrity after import with'
' a md5 file')
after_import = fields.Selection(selection='_get_action',
help='Action after import a file')
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get(
'external.file.task'))
file_type = fields.Selection(
selection=[],
string="File Type",
help="The file type determines an import method to be used "
"to parse and transform data before their import in ERP")
active = fields.Boolean(default=True)
def _get_action(self):
return [('rename', 'Rename'),
('move', 'Move'),
('move_rename', 'Move & Rename'),
('delete', 'Delete'),
]
@api.multi
def _prepare_attachment_vals(self, datas, filename, md5_datas):
self.ensure_one()
vals = {
'name': filename,
'datas': b64encode(datas),
'datas_fname': filename,
'task_id': self.id,
'external_hash': md5_datas,
'file_type': self.file_type or False,
}
return vals
@api.model
def _template_render(self, template, record):
try:
template = mako_template_env.from_string(tools.ustr(template))
except Exception:
_logger.exception("Failed to load template %r", template)
variables = {'obj': record}
try:
render_result = template.render(variables)
except Exception:
_logger.exception(
"Failed to render template %r using values %r" %
(template, variables))
render_result = u""
if render_result == u"False":
render_result = u""
return render_result
@api.model
def run_task_scheduler(self, domain=None):
if domain is None:
domain = []
tasks = self.env['external.file.task'].search(domain)
for task in tasks:
if task.method_type == 'import':
task.run_import()
elif task.method_type == 'export':
task.run_export()
@api.multi
def run_import(self):
self.ensure_one()
protocols = self.env['external.file.location']._get_classes()
cls = protocols.get(self.location_id.protocol)[1]
attach_obj = self.env['ir.attachment.metadata']
with cls.connect(self.location_id) as conn:
md5_datas = ''
for file_name in conn.listdir(path=self.filepath,
wildcard=self.filename or '',
files_only=True):
with api.Environment.manage():
with openerp.registry(
self.env.cr.dbname).cursor() as new_cr:
new_env = api.Environment(new_cr, self.env.uid,
self.env.context)
try:
full_path = os.path.join(self.filepath, file_name)
file_data = conn.open(full_path, 'rb')
datas = file_data.read()
if self.md5_check:
md5_file = conn.open(full_path + '.md5', 'rb')
md5_datas = md5_file.read().rstrip('\r\n')
attach_vals = self._prepare_attachment_vals(
datas, file_name, md5_datas)
attachment = attach_obj.with_env(new_env).create(
attach_vals)
new_full_path = False
if self.after_import == 'rename':
new_name = self._template_render(
self.new_name, attachment)
new_full_path = os.path.join(
self.filepath, new_name)
elif self.after_import == 'move':
new_full_path = os.path.join(
self.move_path, file_name)
elif self.after_import == 'move_rename':
new_name = self._template_render(
self.new_name, attachment)
new_full_path = os.path.join(
self.move_path, new_name)
if new_full_path:
conn.rename(full_path, new_full_path)
if self.md5_check:
conn.rename(
full_path + '.md5',
new_full_path + '/md5')
if self.after_import == 'delete':
conn.remove(full_path)
if self.md5_check:
conn.remove(full_path + '.md5')
except Exception, e:
new_env.cr.rollback()
raise e
else:
new_env.cr.commit()
@api.multi
def run_export(self):
self.ensure_one()
attachment_obj = self.env['ir.attachment.metadata']
attachments = attachment_obj.search(
[('task_id', '=', self.id), ('state', '!=', 'done')])
for attachment in attachments:
attachment.run()
| agpl-3.0 | 3,746,848,047,542,242,000 | 39.40566 | 79 | 0.494747 | false | 4.573412 | false | false | false |
eldstal/cardcinogen | card.py | 1 | 2572 | #!/bin/env python3
import unittest
import os
import sys
import util
import log
from PIL import Image
from layout import SimpleLayout, ComplexLayout
class CardTemplate:
""" Parsed version of a JSON card template """
def __init__(self, json, rootdir="."):
self.front_name = util.get_default(json, "front-image", "front.png")
self.hidden_name = util.get_default(json, "hidden-image", "hidden.png")
self.layouts = []
for j in util.get_default(json, "layouts", []):
self.type = util.get_default(j, "type", "simple")
if (self.type == "complex"):
self.layouts.append(ComplexLayout(j, rootdir))
else:
self.layouts.append(SimpleLayout(j, rootdir))
front_path = os.path.join(rootdir, self.front_name)
hidden_path = os.path.join(rootdir, self.hidden_name)
# Make sure we have valid images and they all have matching sizes
self.front = util.default_image(front_path, (372, 520))
self.hidden = util.default_image(hidden_path, self.front.size, self.front.size)
def make_card(self, textgen):
""" Generate a single card """
if (len(self.layouts) == 0):
log.log.write("Warning: No layouts specified.")
return None
face = self.front.copy()
for l in self.layouts:
overlay = l.render(face.size, textgen)
if (overlay is None):
# This layout is done generating cards.
# This happens when, eventually, textgen runs out of card texts for a given layout.
continue
# We have a card! Return it and that's that.
face.paste(overlay, mask=overlay)
return face
# None of the layouts can generate any cards. We're done.
return None
#
# Unit tests
#
class TestCardStuff(unittest.TestCase):
def test_default(self):
tmpl_default = CardTemplate({})
self.assertEqual(tmpl_default.front_name, "front.png")
self.assertEqual(tmpl_default.hidden_name, "hidden.png")
self.assertEqual(tmpl_default.labels, [])
# Override all settings
dic = {
"front-image": "card-front.jpeg",
"hidden-image": "card-hidden.jpeg",
"layout": [
{
"x": 10
},
{
"y": 20
}
]
}
tmpl = CardTemplate(dic)
self.assertEqual(tmpl.front_name, dic["front-image"])
self.assertEqual(tmpl.hidden_name, dic["hidden-image"])
self.assertEqual(len(tmpl.labels), 2)
self.assertEqual(tmpl.labels[0].x, dic["layout"][0]["x"])
self.assertEqual(tmpl.labels[1].y, dic["layout"][1]["y"])
if __name__ == '__main__':
unittest.main()
| mit | 445,096,269,937,110,460 | 26.073684 | 91 | 0.630638 | false | 3.475676 | true | false | false |
dpaiton/OpenPV | pv-core/python/pvtools/writepvpfile.py | 1 | 10534 | import numpy as np
import scipy.sparse as sp
import pdb
from readpvpheader import headerPattern, extendedHeaderPattern
def checkData(data):
#Check if dictionary
if not isinstance(data, dict):
raise ValueError("Input data structure must be a dictionary with the keys \"values\" and \"time\"")
#Check for fields values and time
if not 'values' in data.keys():
raise ValueError("Input data structure missing \"values\" key");
if not 'time' in data.keys():
raise ValueError("Input data structure missing \"time\" key");
values = data["values"]
time = data["time"]
#Make sure the 2 arrays are numpy arrays or sparse matrices
if not sp.issparse(values) and not type(values).__module__ == np.__name__:
raise ValueError("Values field must be either a sparse matrix or a numpy array")
#If time is a list, convert to numpy array
if type(time) == list:
data["time"] = np.array(data["time"])
time = data["time"]
if not type(time).__module__ == np.__name__:
raise ValueError("Time field must be either a numpy array or a list")
#Check dimensions of values and time
if sp.issparse(values):
if not values.ndim == 2:
raise ValueError("Sparse values must have 2 dimensions")
else:
if not values.ndim == 4 and not values.ndim == 6:
raise ValueError("Dense values must have either 4 or 6 dimensions")
#Check that sizes of values and time matches
valuesShape = values.shape
timeShape = time.shape
if not valuesShape[0] == timeShape[0]:
raise ValueError("Values must have the same number of frames as time (" + str(valuesShape[0]) + " vs " + str(timeShape[0]) + ")")
#Values should be single floats, time should be double floats
data["values"] = data["values"].astype(np.float32)
data["time"] = data["time"].astype(np.float64)
#Dense values must be c-contiguous
if(not sp.issparse(data["values"]) and not data["values"].flags["C_CONTIGUOUS"]):
data["values"] = data["values"].copy(order='C')
def generateHeader(data, inShape):
#data["values"] can be one of 3 shapes: dense 4d mat for activity, dense 6d mat for weights
#scipy coo_sparse matrix for sparse activity
header = {}
values = data["values"]
#If sparse matrix, write as sparse format
if(sp.issparse(values)):
if(inShape == None):
raise ValueError("Sparse values must have shape input when generating header")
if len(inShape) != 3:
raise ValueError("Shape parameter must be a 3 tuple of (ny, nx, nf)")
(ny, nx, nf) = inShape
(numFrames, numFeat) = values.shape
if(not numFeat == ny*nx*nf):
raise ValueError("Shape provided does not match the data shape (" + str(ny) + "*" + str(nx) + "*" + str(nf) + " vs " + str(numFeat) + ")")
header["headersize"] = np.uint32(80)
header["numparams"] = np.uint32(20)
header["filetype"] = np.uint32(6)
header["nx"] = np.uint32(nx)
header["ny"] = np.uint32(ny)
header["nf"] = np.uint32(nf)
header["numrecords"] = np.uint32(1)
header["recordsize"] = np.uint32(0) #Not used in sparse activity
header["datasize"] = np.uint32(8) #Int/float are 4 bytes each
header["datatype"] = np.uint32(4) #Type is location-value pair
header["nxprocs"] = np.uint32(1) #No longer used
header["nyprocs"] = np.uint32(1)
header["nxGlobal"] = np.uint32(nx)
header["nyGlobal"] = np.uint32(ny)
header["kx0"] = np.uint32(0)
header["ky0"] = np.uint32(0)
header["nbatch"] = np.uint32(1)
header["nbands"] = np.uint32(numFrames)
header["time"] = np.float64(data["time"][0])
#If 4d dense matrix, write as dense format
elif(values.ndim == 4):
(numFrames, ny, nx, nf) = values.shape
header["headersize"] = np.uint32(80)
header["numparams"] = np.uint32(20)
header["filetype"] = np.uint32(4)
header["nx"] = np.uint32(nx)
header["ny"] = np.uint32(ny)
header["nf"] = np.uint32(nf)
header["numrecords"] = np.uint32(1)
header["recordsize"] = np.uint32(nx*ny*nf) #Not used in sparse activity
header["datasize"] = np.uint32(4) #floats are 4 bytes
header["datatype"] = np.uint32(3) #Type is float
header["nxprocs"] = np.uint32(1) #No longer used
header["nyprocs"] = np.uint32(1)
header["nxGlobal"] = np.uint32(nx)
header["nyGlobal"] = np.uint32(ny)
header["kx0"] = np.uint32(0)
header["ky0"] = np.uint32(0)
header["nbatch"] = np.uint32(1)
header["nbands"] = np.uint32(numFrames)
header["time"] = np.float64(data["time"][0])
#If 6d dense matrix, write as weights format
elif(values.ndim == 6):
(numFrames, numArbors, numKernels, nyp, nxp, nfp) = values.shape
header["headersize"] = np.uint32(104)
header["numparams"] = np.uint32(26)
header["filetype"] = np.uint32(5)
header["nx"] = np.uint32(1) #size not used by weights
header["ny"] = np.uint32(1)
header["nf"] = np.uint32(numKernels) #Pre nf
header["numrecords"] = np.uint32(numArbors)
#Each data for arbor is preceded by nxp(2 bytes), ny (2 bytes) and offset (4 bytes)
header["recordsize"] = np.uint32(numKernels * (8+4*nxp*nyp*nfp))
header["datasize"] = np.uint32(4) #floats are 4 bytes
header["datatype"] = np.uint32(3) #float type
header["nxprocs"] = np.uint32(1)
header["nyprocs"] = np.uint32(1)
header["nxGlobal"] = np.uint32(1)
header["nyGlobal"] = np.uint32(1)
header["kx0"] = np.uint32(0)
header["ky0"] = np.uint32(0)
header["nbatch"] = np.uint32(1)
header["nbands"] = np.uint32(numArbors) #For weights, numArbors is stored in nbands, no field for numFrames
#This field will be updated on write
header["time"] = np.float64(data["time"][0])
#Weights have extended header
header["nxp"] = np.uint32(nxp)
header["nyp"] = np.uint32(nyp)
header["nfp"] = np.uint32(nfp)
header["wMax"] = np.uint32(1) #This field will be updated on write
header["wMin"] = np.uint32(1) #This field will be updated on write
header["numpatches"] = np.uint32(numKernels)
return header
def writepvpfile(filename, data, shape=None, useExistingHeader=False):
#Check data structure
checkData(data)
if not 'header' in data.keys():
if useExistingHeader:
raise ValueError("Must specify a \"header\" field if using existing header")
#Data can either have a header field or not
#Generate header if no header field
if not useExistingHeader:
#If it doesn't exist, generate header
data["header"] = generateHeader(data, shape)
# To get ordered list of header params
if data["header"]['numparams'] == 26:
hPattern = extendedHeaderPattern
else:
hPattern = headerPattern
with open(filename, 'wb') as stream:
if data["header"]['filetype'] == 1:
print('Filetype 1 not yet supported for write pvp')
elif data["header"]['filetype'] == 2:
print('Filetype 2 not yet supported for write pvp')
elif data["header"]['filetype'] == 3:
print('Filetype 3 not yet supported for write pvp')
elif data["header"]['filetype'] == 4:
(numFrames, ny, nx, nf) = data["values"].shape
#Write out header
for headerEntry in hPattern:
stream.write(headerEntry[1](data["header"][headerEntry[0]]))
for dataFrame in range(numFrames):
stream.write(data["time"][dataFrame])
stream.write(data["values"][dataFrame, :, :, :])
elif data["header"]['filetype'] == 5:
(numFrames, numArbors, numKernels, nyp, nxp, nfp) = data["values"].shape
# Type 5's have a header in each frame
#Make a copy of header dictionary to avoid changing
#the header field
tmpHeader = data["header"].copy()
for dataFrame in range(numFrames):
#Set header fields that change from frame to frame
tmpHeader["time"] = np.float64(data["time"][dataFrame])
##wMax and wMin are int32's, whereas the max and min might not be an int
#tmpHeader["wMax"] = np.uint32(np.max(data["values"][dataFrame, :, :, :, :, :]))
#tmpHeader["wMin"] = np.uint32(np.min(data["values"][dataFrame, :, :, :, :, :]))
for headerEntry in hPattern:
stream.write(headerEntry[1](tmpHeader[headerEntry[0]]))
#Within each patch, we write out each nxp, nyp, and offset
for dataArbor in range(numArbors):
for dataKernel in range(numKernels):
stream.write(np.uint16(nxp))
stream.write(np.uint16(nyp))
stream.write(np.uint32(0)) #Offset is always 0 for kernels
stream.write(data["values"][dataFrame, dataArbor, dataKernel, :, :, :])
#Sparse values
elif data["header"]['filetype'] == 6:
(numFrames, numData) = data["values"].shape
# Copied from filetype 4
for headerEntry in hPattern:
stream.write(headerEntry[1](data["header"][headerEntry[0]]))
for dataFrame in range(numFrames):
frameVals = data["values"].getrow(dataFrame)
count = frameVals.nnz
index = frameVals.indices
value = frameVals.data
#Write time first, followed by count, followed by values
stream.write(data["time"][dataFrame])
stream.write(np.uint32(count))
for i in range(count):
stream.write(np.uint32(index[i]))
stream.write(np.float32(value[i]))
if __name__ == "__main__":
data = {}
values = np.ones((2, 10))
data["values"] = sp.coo_matrix(values)
data["time"] = range(2)
writepvpfile("test.pvp", data, shape=(2, 5, 1))
| epl-1.0 | 6,473,318,771,748,574,000 | 43.447257 | 150 | 0.57566 | false | 3.675506 | false | false | false |
thinkAmi-sandbox/Bottle-sample | e.g._bbs_app/bbs.py | 1 | 1890 | import datetime
import pickle
from pathlib import Path
from bottle import Bottle, run, get, post, redirect, request, response, jinja2_template
class Message(object):
def __init__(self, title, handle, message):
self.title = title
self.handle = handle
self.message = message
self.created_at = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
# テストコードで扱えるよう、変数appにインスタンスをセット
app = Bottle()
@app.get('/')
def get_form():
# Cookieの値をUnicodeで取得するため、getunicode()メソッドを使う
# https://bottlepy.org/docs/dev/tutorial.html#introducing-formsdict
# https://bottlepy.org/docs/dev/api.html#bottle.FormsDict
# handle = request.get_cookie('handle') #=> 「ã」がセットされてしまう
handle = request.cookies.getunicode('handle', default='')
messages = read_messages()
return jinja2_template('bbs.html', handle=handle, messages=messages)
@app.post('/')
def post_form():
response.set_cookie('handle', request.forms.get('handle'))
message = Message(
# こちらもHTML上での文字化けを防ぐため、get()ではなくgetunicode()を使う
title=request.forms.getunicode('title'),
handle=request.forms.getunicode('handle'),
message=request.forms.getunicode('message'),
)
messages = read_messages()
messages.append(message)
with open('bbs.pickle', mode='wb') as f:
pickle.dump(messages, f)
redirect('/')
@app.get('/delete_cookie')
def delete_cookie():
response.delete_cookie('handle')
redirect('/')
def read_messages():
if Path('bbs.pickle').exists():
with open('bbs.pickle', mode='rb') as f:
return pickle.load(f)
return []
if __name__ == "__main__":
run(app, host="localhost", port=8080, debug=True, reloader=True) | unlicense | -4,043,259,285,766,576,000 | 27.8 | 87 | 0.653735 | false | 2.972461 | false | false | false |
capitalone/cloud-custodian | tools/c7n_azure/c7n_azure/provisioning/deployment_unit.py | 1 | 1645 | import logging
from abc import ABCMeta, abstractmethod
from c7n.utils import local_session
from c7n_azure.session import Session
class DeploymentUnit(metaclass=ABCMeta):
log = logging.getLogger('custodian.azure.deployment_unit.DeploymentUnit')
def __init__(self, client):
self.type = ""
self.session = local_session(Session)
self.client = self.session.client(client)
def get(self, params):
result = self._get(params)
if result:
self.log.info('Found %s "%s".' % (self.type, params['name']))
else:
self.log.info('%s "%s" not found.' % (self.type, params['name']))
return result
def check_exists(self):
return self.get() is not None
def provision(self, params):
self.log.info('Creating %s "%s"' % (self.type, params['name']))
result = self._provision(params)
if result:
self.log.info('%s "%s" successfully created' % (self.type, params['name']))
else:
self.log.info('Failed to create %s "%s"' % (self.type, params['name']))
return result
def provision_if_not_exists(self, params):
result = self.get(params)
if result is None:
if 'id' in params.keys():
raise Exception('%s with %s id is not found' % (self.type, params['id']))
result = self.provision(params)
return result
@abstractmethod
def _get(self, params):
raise NotImplementedError()
@abstractmethod
def _provision(self, params):
raise NotImplementedError()
| apache-2.0 | -3,710,982,216,230,997,000 | 30.254902 | 89 | 0.579331 | false | 4.09204 | false | false | false |
iYgnohZ/crack-geetest | geetest/geetest.py | 1 | 4035 | # -*- coding: utf-8 -*-
import time
import uuid
import StringIO
from PIL import Image
from selenium.webdriver.common.action_chains import ActionChains
class BaseGeetestCrack(object):
"""验证码破解基础类"""
def __init__(self, driver):
self.driver = driver
self.driver.maximize_window()
def input_by_id(self, text=u"中国移动", element_id="keyword_qycx"):
"""输入查询关键词
:text: Unicode, 要输入的文本
:element_id: 输入框网页元素id
"""
input_el = self.driver.find_element_by_id(element_id)
input_el.clear()
input_el.send_keys(text)
time.sleep(3.5)
def click_by_id(self, element_id="popup-submit"):
"""点击查询按钮
:element_id: 查询按钮网页元素id
"""
search_el = self.driver.find_element_by_id(element_id)
search_el.click()
time.sleep(3.5)
def calculate_slider_offset(self):
"""计算滑块偏移位置,必须在点击查询按钮之后调用
:returns: Number
"""
img1 = self.crop_captcha_image()
self.drag_and_drop(x_offset=5)
img2 = self.crop_captcha_image()
w1, h1 = img1.size
w2, h2 = img2.size
if w1 != w2 or h1 != h2:
return False
left = 0
flag = False
for i in xrange(45, w1):
for j in xrange(h1):
if not self.is_pixel_equal(img1, img2, i, j):
left = i
flag = True
break
if flag:
break
if left == 45:
left -= 2
return left
def is_pixel_equal(self, img1, img2, x, y):
pix1 = img1.load()[x, y]
pix2 = img2.load()[x, y]
if (abs(pix1[0] - pix2[0] < 60) and abs(pix1[1] - pix2[1] < 60) and abs(pix1[2] - pix2[2] < 60)):
return True
else:
return False
def crop_captcha_image(self, element_id="gt_box"):
"""截取验证码图片
:element_id: 验证码图片网页元素id
:returns: StringIO, 图片内容
"""
captcha_el = self.driver.find_element_by_class_name(element_id)
location = captcha_el.location
size = captcha_el.size
left = int(location['x'])
top = int(location['y'])
left = 1010
top = 535
# right = left + int(size['width'])
# bottom = top + int(size['height'])
right = left + 523
bottom = top + 235
print(left, top, right, bottom)
screenshot = self.driver.get_screenshot_as_png()
screenshot = Image.open(StringIO.StringIO(screenshot))
captcha = screenshot.crop((left, top, right, bottom))
captcha.save("%s.png" % uuid.uuid4().get_hex())
return captcha
def get_browser_name(self):
"""获取当前使用浏览器名称
:returns: TODO
"""
return str(self.driver).split('.')[2]
def drag_and_drop(self, x_offset=0, y_offset=0, element_class="gt_slider_knob"):
"""拖拽滑块
:x_offset: 相对滑块x坐标偏移
:y_offset: 相对滑块y坐标偏移
:element_class: 滑块网页元素CSS类名
"""
dragger = self.driver.find_element_by_class_name(element_class)
action = ActionChains(self.driver)
action.drag_and_drop_by_offset(dragger, x_offset, y_offset).perform()
# 这个延时必须有,在滑动后等待回复原状
time.sleep(8)
def move_to_element(self, element_class="gt_slider_knob"):
"""鼠标移动到网页元素上
:element: 目标网页元素
"""
time.sleep(3)
element = self.driver.find_element_by_class_name(element_class)
action = ActionChains(self.driver)
action.move_to_element(element).perform()
time.sleep(4.5)
def crack(self):
"""执行破解程序
"""
raise NotImplementedError
| mit | 1,481,177,789,686,042,400 | 25.435714 | 105 | 0.543367 | false | 2.816591 | false | false | false |
j5shi/Thruster | pylibs/idlelib/IdleHistory.py | 1 | 4239 | "Implement Idle Shell history mechanism with History class"
from idlelib.configHandler import idleConf
class History:
''' Implement Idle Shell history mechanism.
store - Store source statement (called from PyShell.resetoutput).
fetch - Fetch stored statement matching prefix already entered.
history_next - Bound to <<history-next>> event (default Alt-N).
history_prev - Bound to <<history-prev>> event (default Alt-P).
'''
def __init__(self, text):
'''Initialize data attributes and bind event methods.
.text - Idle wrapper of tk Text widget, with .bell().
.history - source statements, possibly with multiple lines.
.prefix - source already entered at prompt; filters history list.
.pointer - index into history.
.cyclic - wrap around history list (or not).
'''
self.text = text
self.history = []
self.prefix = None
self.pointer = None
self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool")
text.bind("<<history-previous>>", self.history_prev)
text.bind("<<history-next>>", self.history_next)
def history_next(self, event):
"Fetch later statement; start with ealiest if cyclic."
self.fetch(reverse=False)
return "break"
def history_prev(self, event):
"Fetch earlier statement; start with most recent."
self.fetch(reverse=True)
return "break"
def fetch(self, reverse):
'''Fetch statememt and replace current line in text widget.
Set prefix and pointer as needed for successive fetches.
Reset them to None, None when returning to the start line.
Sound bell when return to start line or cannot leave a line
because cyclic is False.
'''
nhist = len(self.history)
pointer = self.pointer
prefix = self.prefix
if pointer is not None and prefix is not None:
if self.text.compare("insert", "!=", "end-1c") or \
self.text.get("iomark", "end-1c") != self.history[pointer]:
pointer = prefix = None
self.text.mark_set("insert", "end-1c") # != after cursor move
if pointer is None or prefix is None:
prefix = self.text.get("iomark", "end-1c")
if reverse:
pointer = nhist # will be decremented
else:
if self.cyclic:
pointer = -1 # will be incremented
else: # abort history_next
self.text.bell()
return
nprefix = len(prefix)
while 1:
pointer += -1 if reverse else 1
if pointer < 0 or pointer >= nhist:
self.text.bell()
if not self.cyclic and pointer < 0: # abort history_prev
return
else:
if self.text.get("iomark", "end-1c") != prefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", prefix)
pointer = prefix = None
break
item = self.history[pointer]
if item[:nprefix] == prefix and len(item) > nprefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", item)
break
self.text.see("insert")
self.text.tag_remove("sel", "1.0", "end")
self.pointer = pointer
self.prefix = prefix
def store(self, source):
"Store Shell input statement into history list."
source = source.strip()
if len(source) > 2:
# avoid duplicates
try:
self.history.remove(source)
except ValueError:
pass
self.history.append(source)
self.pointer = None
self.prefix = None
if __name__ == "__main__":
from test import test_support as support
support.use_resources = ['gui']
from unittest import main
main('idlelib.idle_test.test_idlehistory', verbosity=2, exit=False)
| gpl-2.0 | 6,686,773,657,945,853,000 | 37.990566 | 80 | 0.548714 | false | 4.424843 | false | false | false |
Youwotma/splash | splash/kernel/kernel.py | 1 | 9476 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import six
import sys
import lupa
from ipykernel.kernelapp import IPKernelApp
from ipykernel.eventloops import loop_qt5
from jupyter_client.kernelspec import install_kernel_spec
from twisted.internet import defer
import splash
from splash.lua import get_version, get_main_sandboxed, get_main
from splash.browser_tab import BrowserTab
from splash.lua_runtime import SplashLuaRuntime
from splash.qtrender_lua import (
Splash, MainCoroutineRunner, StoredExceptions, Extras
)
from splash.qtutils import init_qt_app
from splash.render_options import RenderOptions
from splash import defaults
from splash.kernel.kernelbase import Kernel
from splash.utils import BinaryCapsule
from splash.kernel.completer import Completer
from splash.kernel.inspections import Inspector
from splash.kernel.errors import error_repr
import splash.server as server
def install(user=True):
""" Install IPython kernel specification """
name = 'splash-py2' if six.PY2 else 'splash-py3'
folder = os.path.join(os.path.dirname(__file__), 'kernels', name)
install_kernel_spec(folder, kernel_name="splash", user=user, replace=True)
def init_browser(network_manager_factory):
# TODO: support the same command-line options as HTTP server.
# from splash.server import start_logging
# class opts(object):
# logfile = "./kernel.log"
# start_logging(opts)
proxy_factory = None # TODO
data = {}
data['uid'] = id(data)
tab = BrowserTab(
network_manager=network_manager_factory(),
splash_proxy_factory=proxy_factory,
verbosity=2, # TODO
render_options=RenderOptions(data, defaults.MAX_TIMEOUT), # TODO: timeout
visible=True,
)
return tab
class DeferredSplashRunner(object):
def __init__(self, lua, splash, sandboxed, log=None, render_options=None):
self.lua = lua
self.splash = splash
self.sandboxed = sandboxed
if log is None:
self.log = self.splash.tab.logger.log
else:
self.log = log
self.runner = MainCoroutineRunner(
lua=self.lua,
log=self.log,
splash=splash,
sandboxed=self.sandboxed,
)
def run(self, main_coro):
"""
Run main_coro Lua coroutine, passing it a Splash
instance as an argument. Return a Deferred.
"""
d = defer.Deferred()
def return_result(result):
d.callback(result)
def return_error(err):
d.errback(err)
self.runner.start(
main_coro=main_coro,
return_result=return_result,
return_error=return_error,
)
return d
class SplashKernel(Kernel):
implementation = 'Splash'
implementation_version = splash.__version__
language = 'Lua'
language_version = get_version()
language_info = {
'name': 'Splash',
'mimetype': 'application/x-lua',
'display_name': 'Splash',
'language': 'lua',
'codemirror_mode': {
"name": "text/x-lua",
},
'file_extension': '.lua',
'pygments_lexer': 'lua',
'version': get_version(),
}
banner = "Splash kernel - write browser automation scripts interactively"
help_links = [
{
'text': "Splash Tutorial",
'url': 'http://splash.readthedocs.org/en/latest/scripting-tutorial.html'
},
{
'text': "Splash Reference",
'url': 'http://splash.readthedocs.org/en/latest/scripting-ref.html'
},
{
'text': "Programming in Lua",
'url': 'http://www.lua.org/pil/contents.html'
},
{
'text': "Lua 5.2 Manual",
'url': 'http://www.lua.org/manual/5.2/'
},
]
sandboxed = False
def __init__(self, **kwargs):
super(SplashKernel, self).__init__(**kwargs)
self.tab = init_browser(SplashKernel.network_manager_factory)
self.lua = SplashLuaRuntime(self.sandboxed, "", ())
self.exceptions = StoredExceptions()
self.splash = Splash(
lua=self.lua,
exceptions=self.exceptions,
tab=self.tab
)
self.lua.add_to_globals("splash", self.splash.get_wrapped())
self.extras = Extras(self.lua, self.exceptions)
self.extras.inject_to_globals()
self.runner = DeferredSplashRunner(self.lua, self.splash, self.sandboxed) #, self.log_msg)
self.completer = Completer(self.lua)
self.inspector = Inspector(self.lua)
#
# try:
# sys.stdout.write = self._print
# sys.stderr.write = self._print
# except:
# pass # Can't change stdout
def send_execute_reply(self, stream, ident, parent, md, reply_content):
def done(result):
reply, result, ct = result
if result:
data = {
'text/plain': result if isinstance(result, six.text_type) else str(result),
}
if isinstance(result, BinaryCapsule):
if result.content_type in {'image/png', 'image/jpeg'}:
data[result.content_type] = result.as_b64()
self._publish_execute_result(parent, data, {}, self.execution_count)
super(SplashKernel, self).send_execute_reply(stream, ident, parent, md, reply)
assert isinstance(reply_content, defer.Deferred)
reply_content.addCallback(done)
def do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=False):
def success(res):
result, content_type, headers, status_code = res
reply = {
'status': 'ok',
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
return reply, result, content_type or 'text/plain'
def error(failure):
text = "<unknown error>"
try:
failure.raiseException()
except Exception as e:
text = error_repr(e)
reply = {
'status': 'error',
'execution_count': self.execution_count,
'ename': '',
'evalue': text,
'traceback': []
}
return reply, text, 'text/plain'
try:
try:
# XXX: this ugly formatting is important for exception
# line numbers to be displayed properly!
lua_source = 'local repr = require("repr"); function main(splash) return repr(%s) end' % code
main_coro = self._get_main(lua_source)
except lupa.LuaSyntaxError:
try:
lines = code.splitlines(False)
lua_source = '''local repr = require("repr"); function main(splash) %s
return repr(%s)
end
''' % ("\n".join(lines[:-1]), lines[-1])
main_coro = self._get_main(lua_source)
except lupa.LuaSyntaxError:
lua_source = "function main(splash) %s end" % code
main_coro = self._get_main(lua_source)
except (lupa.LuaSyntaxError, lupa.LuaError) as e:
d = defer.Deferred()
d.addCallbacks(success, error)
d.errback(e)
return d
except Exception:
d = defer.Deferred()
d.addCallbacks(success, error)
d.errback()
return d
d = self.runner.run(main_coro)
d.addCallbacks(success, error)
return d
def do_complete(self, code, cursor_pos):
return self.completer.complete(code, cursor_pos)
def do_inspect(self, code, cursor_pos, detail_level=0):
return self.inspector.help(code, cursor_pos, detail_level)
def _publish_execute_result(self, parent, data, metadata, execution_count):
msg = {
u'data': data,
u'metadata': metadata,
u'execution_count': execution_count
}
self.session.send(self.iopub_socket, u'execute_result', msg,
parent=parent, ident=self._topic('execute_result')
)
def log_msg(self, text, min_level=2):
self._print(text + "\n")
def _print(self, message):
stream_content = {'name': 'stdout', 'text': message, 'metadata': dict()}
self.log.debug('Write: %s' % message)
self.send_response(self.iopub_socket, 'stream', stream_content)
def _get_main(self, lua_source):
if self.sandboxed:
main, env = get_main_sandboxed(self.lua, lua_source)
else:
main, env = get_main(self.lua, lua_source)
return self.lua.create_coroutine(main)
def server_factory(network_manager_factory, verbosity, **kwargs):
init_qt_app(verbose=verbosity >= 5)
SplashKernel.network_manager_factory = network_manager_factory
kernel = IPKernelApp.instance(kernel_class=SplashKernel)
kernel.initialize()
kernel.kernel.eventloop = loop_qt5
kernel.start()
def start():
splash_args = os.environ.get('SPLASH_ARGS', '').split()
server.main(jupyter=True, argv=splash_args, server_factory=server_factory)
| bsd-3-clause | 2,472,990,847,262,979,600 | 32.249123 | 109 | 0.577353 | false | 3.801043 | false | false | false |
smARTLab-liv/smartlabatwork-release | slaw_smach/src/slaw_smach/slaw_smach.py | 1 | 11996 | #!/usr/bin/env python
import rospy
from ArmStates import *
from MoveStates import *
from ObjectDetectState import *
from DecisionStates import *
from std_srvs.srv import Empty, EmptyResponse
from std_msgs.msg import Bool
## TODO after Eindhoven: Add failsafe if hole not detected
## add states if object too far or too close to gripper
class Smach():
def __init__(self):
rospy.init_node('slaw_smach')
self.sm = smach.StateMachine(outcomes=['end'])
with self.sm:
### MOVE STATE WITH RECOVER
smach.StateMachine.add('MoveToNext', MoveStateUserData(), transitions = {'reached':'DecideAfterMove', 'not_reached': 'RecoverMove', 'failed': 'DeleteCurGoal'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
smach.StateMachine.add('RecoverMove', RecoverState(), transitions = {'done':'MoveToNext'}, remapping = {'pose_in':'pose', 'pose_out': 'pose'})
### END MOVE STATE WITH RECOVER
##Decision state after Move:
smach.StateMachine.add('DecideAfterMove', DecideAfterMoveState(),transitions = {'BNT': 'ScanMatcher_BNT', 'Pickup':'ScanMatcher_Pickup', 'Place':'ScanMatcher_Place', 'End':'end'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
######BNT SPECIFIC
smach.StateMachine.add('ScanMatcher_BNT', ScanMatcher(), transitions = {'reached':'SleepState', 'not_reached':'ScanMatcher_BNT', 'failed':'SleepState'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
smach.StateMachine.add('SleepState', SleepState(), transitions = {'done':'DeleteCurGoal'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
########END BNT
##### DELETE CURRENT GOAL OR GET NEXT GOAL
smach.StateMachine.add('DeleteCurGoal', DeleteCurrentGoalState(), transitions = {'done':'MoveToNext'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
smach.StateMachine.add('GetNextGoal', GetNextGoalState(), transitions = {'done':'MoveToNext'}, remapping = {'pose_in':'pose','object_in':'object', 'pose_out':'pose'})
##### END DELETE CURRENT GOAL OR GET NEXT GOAL
### PICKUP
smach.StateMachine.add('ScanMatcher_Pickup', ScanMatcher(), transitions = {'reached':'DecideBeforePreGrip', 'not_reached':'ScanMatcher_Pickup', 'failed':'MoveToNext'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
#smach.StateMachine.add('ScanMatcher_Pickup', ScanMatcher(), transitions = {'reached':'ScanMatcher_Align', 'not_reached':'ScanMatcher_Pickup', 'failed':'MoveToNext'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
#smach.StateMachine.add('ScanMatcher_Align', AlignState(), transitions = {'done':'DecideBeforePreGrip'})
##
#Either CBT Pickup or normal Pickup
smach.StateMachine.add('DecideBeforePreGrip', DecideBeforePreGripState(),transitions = {'CBT': 'PreGrip_CBT', 'Pickup':'PreGrip'}, remapping = {'pose_in':'pose', 'pose_out':'pose', 'dist_out':'dist'})
######CBT STUFF
smach.StateMachine.add('PreGrip_CBT', PreGripCBT(), transitions = {'success':'ScanForObjectCBT', 'failed':'TuckArmPreGripCBT'},remapping = {'pose_in':'pose', 'pose_out':'pose'})
smach.StateMachine.add('TuckArmPreGripCBT', TuckArm(), transitions = {'success':'PreGrip_CBT', 'not_reached':'TuckArmPreGripCBT','failed':'end'})
smach.StateMachine.add('ScanForObjectCBT', ScanForObjectCBT(), transitions = {'success':'GripCBT'})
smach.StateMachine.add('GripCBT', GripCBT(), transitions = {'end':'DeleteCurGoal'})
#### END CBT Stuff
### NORMAL PICKUP
smach.StateMachine.add('PreGrip', PreGrip(), transitions = {'success':'Scan', 'failed':'TuckArmPreGrip'},remapping = {'pose_in':'pose', 'pose_out':'pose'})
smach.StateMachine.add('TuckArmPreGrip', TuckArm(), transitions = {'success':'PreGrip', 'not_reached':'TuckArmPreGrip','failed':'end'})
#scan
smach.StateMachine.add("Scan", ScanForObjectsState(), transitions = {'success': 'Grip', 'failed':'TuckArmMoveNext','nothing_found': 'TuckArmDelete'}, remapping = {'pose_in':'pose', 'pose_out':'pose', 'object_out':'object', 'point_out':'point', 'dist_in':'dist','dist_out':'dist'})
#if misdetection try again
smach.StateMachine.add('TuckArmMoveNext', TuckArm(), transitions = {'success':'MoveToNext', 'not_reached':'TuckArmMoveNext','failed':'end'})
#if nothing found try next Goal
smach.StateMachine.add('TuckArmDelete', TuckArm(), transitions = {'success':'DeleteCurGoal', 'not_reached':'TuckArmDelete','failed':'end'})
#Grip Object
smach.StateMachine.add("Grip", Grip(), transitions = {'success':'DecideRV20', 'too_far':'ScanMatcher_Pickup', 'failed':'TuckArmFailGrip', 'failed_after_grip':'TuckArmGrip'}, remapping = {'pose_in':'pose', 'object_in':'object', 'point_in':'point','pose_out':'pose', 'object_out':'object', 'point_out':'point'})
#Decide RV20:
smach.StateMachine.add('DecideRV20', DecideRV20State(),transitions = {'RV20': 'TuckForDriveAfterGrip', 'Normal':'TuckForDriveAfterGrip'}, remapping = {'object_in':'object', 'object_out':'object'})
#smach.StateMachine.add('DecideRV20', DecideRV20State(),transitions = {'RV20': 'RV20CheckArm', 'Normal':'TuckForDriveAfterGrip'}, remapping = {'object_in':'object', 'object_out':'object', 'pose_out':'pose'})
####CHECK if RV20 which one
smach.StateMachine.add('RV20CheckArm', RV20CheckState(), transitions = {'success':'RV20CheckVision','failed':'TuckArmPreCheckArm'}, remapping = {'pose_in':'pose'})
smach.StateMachine.add('TuckArmPreCheckArm', TuckArm(), transitions = {'success':'RV20CheckArm', 'not_reached':'TuckArmPreCheckArm','failed':'end'})
#smach.StateMachine.add('RV20CheckVision', RV20CheckVision(), transitions = {'success':'RV20RotateTake','failed':'RV20RotateReplace'}, remapping = {'pose_in':'pose', 'object_in':'object', 'pose_out':'pose'})
smach.StateMachine.add('RV20CheckVision', RV20CheckVision(), transitions = {'success':'RV20RotateTake','failed':'RV20Trash'}, remapping = {'pose_in':'pose', 'object_in':'object', 'pose_out':'pose'})
smach.StateMachine.add('RV20Trash', RV20Trash(), transitions = {'done':'PreGrip'})
#smach.StateMachine.add('RV20RotateReplace', RV20ReplaceObjectRotate(), transitions = {'success':'RV20Replace','failed':'RV20Replace'}, remapping = {'pose_in':'pose'})
smach.StateMachine.add('RV20RotateTake', RV20ReplaceObjectRotate(), transitions = {'success':'TuckForDriveAfterGrip','failed':'TuckForDriveAfterGrip'}, remapping = {'pose_in':'pose'})
#smach.StateMachine.add('RV20Replace', FinePlace(), transitions = {'success':'RV20ReplaceUp', 'failed':'TuckArmFailPlace_RV20', 'too_far':'RV20Replace','failed_after_place':'TuckArmFailPlace_RV20'}, remapping = {'object_in':'object','pose_in':'pose', 'pose_out':'pose', 'point_in':'point'})
#smach.StateMachine.add('TuckArmFailPlace_RV20', TuckArm(), transitions = {'success':'RV20Replace', 'not_reached':'TuckArmFailPlace_RV20','failed':'end'})
#smach.StateMachine.add('RV20ReplaceUp', RV20ReplaceUp(), transitions = {'done':'MoveBack10'})
#MoveBack 10 to skip object and resume scanning
#smach.StateMachine.add('MoveBack10', MoveBack(0.10), transitions = {'done':'Remove10'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
#smach.StateMachine.add('Remove10', RemoveDist(0.10), transitions = {'done':'PreGrip'}, remapping = {'dist_in':'dist', 'dist_out':'dist'})
#Tuck and Move away
##Tuck For Drive
smach.StateMachine.add('TuckForDriveAfterGrip', TuckForDrive(), transitions={'done':'MoveAwayFromPlatform'}, remapping = {'pose_in':'pose'} )
smach.StateMachine.add('TuckArmGrip', TuckArm(), transitions = {'success':'MoveAwayFromPlatform', 'not_reached':'TuckArmGrip','failed':'end'})
smach.StateMachine.add('TuckArmFailGrip', TuckArm(), transitions = {'success':'MoveToNext', 'not_reached':'TuckArmFailGrip','failed':'end'})
smach.StateMachine.add('MoveAwayFromPlatform', RecoverState(), transitions = {'done':'MoveToPlace'})
### Move to Place location
smach.StateMachine.add('MoveToPlace', MoveStateUserData(), transitions = {'reached': 'ScanMatcher_Place', 'not_reached': 'MoveAwayFromPlatform', 'failed': 'MoveAwayFromPlatform'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
smach.StateMachine.add('ScanMatcher_Place', ScanMatcher(), transitions = {'reached':'DecideBeforePlace', 'not_reached':'ScanMatcher_Place', 'failed':'DecideBeforePlace'}, remapping = {'pose_in':'pose', 'suffix_in':'suffix', 'pose_out':'pose'})
#### Decide either Normal place or PPT place
smach.StateMachine.add('DecideBeforePlace', DecideBeforePlaceState(),transitions = {'PPT': 'PreScanHole', 'Normal':'MoveBack'}, remapping = {'object_in':'object', 'object_out':'object'})
####PPT
smach.StateMachine.add('PreScanHole', PreGrip(), transitions = {'success':'ScanHole', 'failed':'TuckArmPreScan'},remapping = {'pose_in':'pose', 'pose_out':'pose'})
smach.StateMachine.add('TuckArmPreScan', TuckArm(), transitions = {'success':'PreScanHole', 'not_reached':'TuckArmPreScan','failed':'end'})
smach.StateMachine.add("ScanHole", ScanForHoles(), transitions = {'success': 'FinePlace', 'failed':'ScanMatcher_Place','nothing_found': 'ScanMatcher_Place'}, remapping = {'pose_in':'pose', 'pose_out':'pose', 'object_in':'object', 'object_out':'object', 'point_out':'point'})
smach.StateMachine.add('FinePlace', FinePlace(), transitions = {'success':'TuckForDriveAfterPlace', 'failed':'TuckArmFailPlace_PPT', 'too_far':'ScanMatcher_Place','failed_after_place':'TuckArmFailPlace_PPT'}, remapping = {'object_in':'object','pose_in':'pose', 'pose_out':'pose', 'point_in':'point'})
smach.StateMachine.add('TuckArmFailPlace_PPT', TuckArm(), transitions = {'success':'FinePlace', 'not_reached':'TuckArmFailPlace_PPT','failed':'end'})
### END PPT
##NORMAL PLACE
smach.StateMachine.add('MoveBack', MoveBack(0.25), transitions = {'done':'Place'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
smach.StateMachine.add('Place', Place(), transitions = {'success':'TuckForDriveAfterPlace', 'failed':'TuckArmFailPlace'}, remapping = {'pose_in':'pose', 'pose_out':'pose'})
##Tuck For Drive
smach.StateMachine.add('TuckForDriveAfterPlace', TuckForDrive(), transitions={'done':'MoveAwayFromPlatformAfterPlace'}, remapping = {'pose_in':'pose'} )
smach.StateMachine.add('TuckArmFailPlace', TuckArm(), transitions = {'success':'Place', 'not_reached':'TuckArmFailPlace','failed':'end'})
smach.StateMachine.add('MoveAwayFromPlatformAfterPlace', RecoverState(), transitions = {'done':'GetNextGoal'})
# Create and start the introspection server
self.sis = smach_ros.IntrospectionServer('server_name', self.sm, '/SLAW_SMACH')
self.sis.start()
self.serv = rospy.Service("/start_SMACH", Empty, self.go)
def go(self, req):
#sm.userdata.pose = "D2"
print "Starting SMACH"
locations = rospy.get_param('locations')
self.sm.userdata.pose = locations[0]
#self.sm.userdata.suffix = "_grip"
self.sm.execute()
return EmptyResponse()
def stop(self):
self.sis.stop()
if __name__ == '__main__':
smach = Smach()
rospy.spin()
smach.stop()
| mit | -3,893,603,533,906,580,000 | 68.744186 | 321 | 0.635128 | false | 3.580896 | false | false | false |
mtrdesign/pylogwatch | pylogwatch/logwlib.py | 1 | 5917 | # Python 2.5 compatibility
from __future__ import with_statement
# Python version
import sys
if sys.version_info < (2, 5):
raise "Required python 2.5 or greater"
import os, sqlite3, itertools, time
from datetime import datetime
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
proj_path = lambda x: os.path.abspath(os.path.join(PROJECT_DIR,x))
# Check if we are bundled together with raven, and add our dir to the pythonpath if we are
if os.path.exists(proj_path( 'raven')):
sys.path.append(PROJECT_DIR)
from raven import Client
def item_import(name):
d = name.rfind(".")
classname = name[d+1:]
m = __import__(name[:d], globals(), locals(), [classname])
return getattr(m, classname)
class PyLog (object):
def __init__ (self, filenames, dbname = 'logw.db', filetable = 'file_cursor', eventtable = 'events'):
self._filetable = filetable
self._eventtable = eventtable
self.conn = self.init_db(dbname)
self.curs = self.conn.cursor()
self.fnames = filenames
def init_db (self, dbname):
"""Set up the DB"""
conn = sqlite3.connect (dbname)
curs = conn.cursor()
sql = 'create table if not exists file_cursor (filename TEXT PRIMARY KEY, inode INTEGER, lastbyte INTEGER, updated INTEGER)'
curs.execute (sql)
sql = 'create table if not exists events (event TEXT PRIMARY KEY, args TEXT, updated INTEGER)'
curs.execute (sql)
conn.commit()
return conn
def readlines (self, f, lastpos = 0):
"""Read full lines from the file object f starting from lastpos"""
self.save_fileinfo (f.name, os.stat(f.name)[1], lastpos)
f.seek(lastpos)
result = []
for line in f:
# handle lines that are not yet finished (no \n)
curpos = f.tell()
if not line.endswith('\n'):
f.seek(curpos)
raise StopIteration
yield line
def get_fileinfo (self, fname):
self.curs.execute ('SELECT filename, inode, lastbyte from file_cursor where filename=?', [fname,])
result = self.curs.fetchone()
if result and len(result)==3:
f, inode, lastbyte = result
return inode,lastbyte
else:
return None,0
def save_fileinfo (self, fname, inode, lastbyte):
self.curs.execute ("REPLACE into file_cursor (filename, inode, lastbyte, updated) \
values (?,?,?,datetime())", [fname,inode, lastbyte ])
self.conn.commit()
return
def update_bytes (self,fname, lastbyte):
"""
Only updates the lastbyte property of a file, without touching the inode.
Meant for calling after each line is processed
"""
def save_fileinfo (self, fname, inode, lastbyte):
self.curs.execute ("UPDATE into file_cursor set lastbyte=? where filename=?",\
[fname,inode, lastbyte ])
self.conn.commit()
return
def process_lines (self, fname, lines):
"""Dummy line processor - should be overridden"""
raise NotImplementedError
def open_rotated_version(self, fname):
sufxs = ['.1','.1.gz','.0']
for sufx in sufxs:
newname = fname + sufx
if not os.path.exists (newname):
continue
try:
f = open(newname)
return f
except:
continue
def run (self):
for fn in self.fnames:
if not os.path.exists (fn):
continue
newlines = []
rotated = None
lastinode, lastbyte = self.get_fileinfo (fn)
if lastbyte and not lastinode == os.stat(fn)[1]:
# handle rotated files
rotated = self.open_rotated_version(fn)
if rotated:
newlines = self.readlines (rotated, lastbyte)
lastbyte = 0
self.process_lines (fn, rotated, newlines)
try:
f = open(fn)
except:
continue
self.process_lines (fn, f, self.readlines (f, lastbyte))
lastbyte = f.tell()
lastinode = os.stat(fn)[1]
f.close()
self.save_fileinfo (fn, lastinode, lastbyte)
if rotated:
rotated.close()
class PyLogConf (PyLog):
def __init__ (self, conf):
"""
Initialize object based on the provided configuration
"""
self.conf = conf
self.client = Client (conf.RAVEN['dsn'])
self.formatters = {}
for k,v in self.conf.FILE_FORMATTERS.iteritems():
if isinstance(v,str):
raise ValueError ('Please use a list or a tuple for the file formatters values')
self.formatters[k] = [item_import(i)() for i in v]
dbname = os.path.join(os.path.dirname(conf.__file__),'pylogwatch.db')
return super(PyLogConf, self).__init__ (self.conf.FILE_FORMATTERS.keys(), dbname = dbname)
def process_lines (self, fname, fileobject, lines):
"""Main workhorse. Called with the filename that is being logged and an iterable of lines"""
for line in lines:
paramdict = {}
data = {'event_type':'Message', 'message': line.replace('%','%%'), 'data' :{'logger':fname}}
for fobj in self.formatters[fname]:
fobj.format_line(line, data, paramdict)
if not data.pop('_do_not_send', False): # Skip lines that have the '_do_not_send' key
if paramdict:
data['params'] = tuple([paramdict[i] for i in sorted(paramdict.keys())])
if self.conf.DEBUG:
print data
self.client.capture(**data)
self.update_bytes(fname, fileobject.tell())
| gpl-3.0 | -7,848,765,276,239,736,000 | 36.449367 | 132 | 0.564982 | false | 4.066667 | false | false | false |
googlemaps/google-maps-services-python | googlemaps/convert.py | 1 | 10197 | #
# Copyright 2014 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""Converts Python types to string representations suitable for Maps API server.
For example:
sydney = {
"lat" : -33.8674869,
"lng" : 151.2069902
}
convert.latlng(sydney)
# '-33.8674869,151.2069902'
"""
def format_float(arg):
"""Formats a float value to be as short as possible.
Truncates float to 8 decimal places and trims extraneous
trailing zeros and period to give API args the best
possible chance of fitting within 2000 char URL length
restrictions.
For example:
format_float(40) -> "40"
format_float(40.0) -> "40"
format_float(40.1) -> "40.1"
format_float(40.001) -> "40.001"
format_float(40.0010) -> "40.001"
format_float(40.000000001) -> "40"
format_float(40.000000009) -> "40.00000001"
:param arg: The lat or lng float.
:type arg: float
:rtype: string
"""
return ("%.8f" % float(arg)).rstrip("0").rstrip(".")
def latlng(arg):
"""Converts a lat/lon pair to a comma-separated string.
For example:
sydney = {
"lat" : -33.8674869,
"lng" : 151.2069902
}
convert.latlng(sydney)
# '-33.8674869,151.2069902'
For convenience, also accepts lat/lon pair as a string, in
which case it's returned unchanged.
:param arg: The lat/lon pair.
:type arg: string or dict or list or tuple
"""
if is_string(arg):
return arg
normalized = normalize_lat_lng(arg)
return "%s,%s" % (format_float(normalized[0]), format_float(normalized[1]))
def normalize_lat_lng(arg):
"""Take the various lat/lng representations and return a tuple.
Accepts various representations:
1) dict with two entries - "lat" and "lng"
2) list or tuple - e.g. (-33, 151) or [-33, 151]
:param arg: The lat/lng pair.
:type arg: dict or list or tuple
:rtype: tuple (lat, lng)
"""
if isinstance(arg, dict):
if "lat" in arg and "lng" in arg:
return arg["lat"], arg["lng"]
if "latitude" in arg and "longitude" in arg:
return arg["latitude"], arg["longitude"]
# List or tuple.
if _is_list(arg):
return arg[0], arg[1]
raise TypeError(
"Expected a lat/lng dict or tuple, "
"but got %s" % type(arg).__name__)
def location_list(arg):
"""Joins a list of locations into a pipe separated string, handling
the various formats supported for lat/lng values.
For example:
p = [{"lat" : -33.867486, "lng" : 151.206990}, "Sydney"]
convert.waypoint(p)
# '-33.867486,151.206990|Sydney'
:param arg: The lat/lng list.
:type arg: list
:rtype: string
"""
if isinstance(arg, tuple):
# Handle the single-tuple lat/lng case.
return latlng(arg)
else:
return "|".join([latlng(location) for location in as_list(arg)])
def join_list(sep, arg):
"""If arg is list-like, then joins it with sep.
:param sep: Separator string.
:type sep: string
:param arg: Value to coerce into a list.
:type arg: string or list of strings
:rtype: string
"""
return sep.join(as_list(arg))
def as_list(arg):
"""Coerces arg into a list. If arg is already list-like, returns arg.
Otherwise, returns a one-element list containing arg.
:rtype: list
"""
if _is_list(arg):
return arg
return [arg]
def _is_list(arg):
"""Checks if arg is list-like. This excludes strings and dicts."""
if isinstance(arg, dict):
return False
if isinstance(arg, str): # Python 3-only, as str has __iter__
return False
return _has_method(arg, "__getitem__") if not _has_method(arg, "strip") else _has_method(arg, "__iter__")
def is_string(val):
"""Determines whether the passed value is a string, safe for 2/3."""
try:
basestring
except NameError:
return isinstance(val, str)
return isinstance(val, basestring)
def time(arg):
"""Converts the value into a unix time (seconds since unix epoch).
For example:
convert.time(datetime.now())
# '1409810596'
:param arg: The time.
:type arg: datetime.datetime or int
"""
# handle datetime instances.
if _has_method(arg, "timestamp"):
arg = arg.timestamp()
if isinstance(arg, float):
arg = int(arg)
return str(arg)
def _has_method(arg, method):
"""Returns true if the given object has a method with the given name.
:param arg: the object
:param method: the method name
:type method: string
:rtype: bool
"""
return hasattr(arg, method) and callable(getattr(arg, method))
def components(arg):
"""Converts a dict of components to the format expected by the Google Maps
server.
For example:
c = {"country": "US", "postal_code": "94043"}
convert.components(c)
# 'country:US|postal_code:94043'
:param arg: The component filter.
:type arg: dict
:rtype: basestring
"""
# Components may have multiple values per type, here we
# expand them into individual key/value items, eg:
# {"country": ["US", "AU"], "foo": 1} -> "country:AU", "country:US", "foo:1"
def expand(arg):
for k, v in arg.items():
for item in as_list(v):
yield "%s:%s" % (k, item)
if isinstance(arg, dict):
return "|".join(sorted(expand(arg)))
raise TypeError(
"Expected a dict for components, "
"but got %s" % type(arg).__name__)
def bounds(arg):
"""Converts a lat/lon bounds to a comma- and pipe-separated string.
Accepts two representations:
1) string: pipe-separated pair of comma-separated lat/lon pairs.
2) dict with two entries - "southwest" and "northeast". See convert.latlng
for information on how these can be represented.
For example:
sydney_bounds = {
"northeast" : {
"lat" : -33.4245981,
"lng" : 151.3426361
},
"southwest" : {
"lat" : -34.1692489,
"lng" : 150.502229
}
}
convert.bounds(sydney_bounds)
# '-34.169249,150.502229|-33.424598,151.342636'
:param arg: The bounds.
:type arg: dict
"""
if is_string(arg) and arg.count("|") == 1 and arg.count(",") == 2:
return arg
elif isinstance(arg, dict):
if "southwest" in arg and "northeast" in arg:
return "%s|%s" % (latlng(arg["southwest"]),
latlng(arg["northeast"]))
raise TypeError(
"Expected a bounds (southwest/northeast) dict, "
"but got %s" % type(arg).__name__)
def size(arg):
if isinstance(arg, int):
return "%sx%s" % (arg, arg)
elif _is_list(arg):
return "%sx%s" % (arg[0], arg[1])
raise TypeError(
"Expected a size int or list, "
"but got %s" % type(arg).__name__)
def decode_polyline(polyline):
"""Decodes a Polyline string into a list of lat/lng dicts.
See the developer docs for a detailed description of this encoding:
https://developers.google.com/maps/documentation/utilities/polylinealgorithm
:param polyline: An encoded polyline
:type polyline: string
:rtype: list of dicts with lat/lng keys
"""
points = []
index = lat = lng = 0
while index < len(polyline):
result = 1
shift = 0
while True:
b = ord(polyline[index]) - 63 - 1
index += 1
result += b << shift
shift += 5
if b < 0x1f:
break
lat += (~result >> 1) if (result & 1) != 0 else (result >> 1)
result = 1
shift = 0
while True:
b = ord(polyline[index]) - 63 - 1
index += 1
result += b << shift
shift += 5
if b < 0x1f:
break
lng += ~(result >> 1) if (result & 1) != 0 else (result >> 1)
points.append({"lat": lat * 1e-5, "lng": lng * 1e-5})
return points
def encode_polyline(points):
"""Encodes a list of points into a polyline string.
See the developer docs for a detailed description of this encoding:
https://developers.google.com/maps/documentation/utilities/polylinealgorithm
:param points: a list of lat/lng pairs
:type points: list of dicts or tuples
:rtype: string
"""
last_lat = last_lng = 0
result = ""
for point in points:
ll = normalize_lat_lng(point)
lat = int(round(ll[0] * 1e5))
lng = int(round(ll[1] * 1e5))
d_lat = lat - last_lat
d_lng = lng - last_lng
for v in [d_lat, d_lng]:
v = ~(v << 1) if v < 0 else v << 1
while v >= 0x20:
result += (chr((0x20 | (v & 0x1f)) + 63))
v >>= 5
result += (chr(v + 63))
last_lat = lat
last_lng = lng
return result
def shortest_path(locations):
"""Returns the shortest representation of the given locations.
The Elevations API limits requests to 2000 characters, and accepts
multiple locations either as pipe-delimited lat/lng values, or
an encoded polyline, so we determine which is shortest and use it.
:param locations: The lat/lng list.
:type locations: list
:rtype: string
"""
if isinstance(locations, tuple):
# Handle the single-tuple lat/lng case.
locations = [locations]
encoded = "enc:%s" % encode_polyline(locations)
unencoded = location_list(locations)
if len(encoded) < len(unencoded):
return encoded
else:
return unencoded
| apache-2.0 | 7,628,449,255,522,140,000 | 25.417098 | 109 | 0.5939 | false | 3.619808 | false | false | false |
mo-ki/pypgpwords | pypgpwords.py | 1 | 13257 | #!/usr/bin/python3
"""Define a PGPWords object inherited from bytearray.
Adding initialization via hex-, or pgp-word-string,
adding .hex() method and
overriding __str__
Mainline code:
Convert pgp words to hex strings and vice versa.
Example:
$ pypgpwords.py DEAD 1337
tactics perceptive Aztec consensus
or
$ pypgpwords.py absurd bodyguard baboon unicorn
0116 14EC
[email protected]
"""
from __future__ import print_function
import sys
SEPARATOR = " "
EVEN = ("aardvark",
"absurd",
"accrue",
"acme",
"adrift",
"adult",
"afflict",
"ahead",
"aimless",
"Algol",
"allow",
"alone",
"ammo",
"ancient",
"apple",
"artist",
"assume",
"Athens",
"atlas",
"Aztec",
"baboon",
"backfield",
"backward",
"banjo",
"beaming",
"bedlamp",
"beehive",
"beeswax",
"befriend",
"Belfast",
"berserk",
"billiard",
"bison",
"blackjack",
"blockade",
"blowtorch",
"bluebird",
"bombast",
"bookshelf",
"brackish",
"breadline",
"breakup",
"brickyard",
"briefcase",
"Burbank",
"button",
"buzzard",
"cement",
"chairlift",
"chatter",
"checkup",
"chisel",
"choking",
"chopper",
"Christmas",
"clamshell",
"classic",
"classroom",
"cleanup",
"clockwork",
"cobra",
"commence",
"concert",
"cowbell",
"crackdown",
"cranky",
"crowfoot",
"crucial",
"crumpled",
"crusade",
"cubic",
"dashboard",
"deadbolt",
"deckhand",
"dogsled",
"dragnet",
"drainage",
"dreadful",
"drifter",
"dropper",
"drumbeat",
"drunken",
"Dupont",
"dwelling",
"eating",
"edict",
"egghead",
"eightball",
"endorse",
"endow",
"enlist",
"erase",
"escape",
"exceed",
"eyeglass",
"eyetooth",
"facial",
"fallout",
"flagpole",
"flatfoot",
"flytrap",
"fracture",
"framework",
"freedom",
"frighten",
"gazelle",
"Geiger",
"glitter",
"glucose",
"goggles",
"goldfish",
"gremlin",
"guidance",
"hamlet",
"highchair",
"hockey",
"indoors",
"indulge",
"inverse",
"involve",
"island",
"jawbone",
"keyboard",
"kickoff",
"kiwi",
"klaxon",
"locale",
"lockup",
"merit",
"minnow",
"miser",
"Mohawk",
"mural",
"music",
"necklace",
"Neptune",
"newborn",
"nightbird",
"Oakland",
"obtuse",
"offload",
"optic",
"orca",
"payday",
"peachy",
"pheasant",
"physique",
"playhouse",
"Pluto",
"preclude",
"prefer",
"preshrunk",
"printer",
"prowler",
"pupil",
"puppy",
"python",
"quadrant",
"quiver",
"quota",
"ragtime",
"ratchet",
"rebirth",
"reform",
"regain",
"reindeer",
"rematch",
"repay",
"retouch",
"revenge",
"reward",
"rhythm",
"ribcage",
"ringbolt",
"robust",
"rocker",
"ruffled",
"sailboat",
"sawdust",
"scallion",
"scenic",
"scorecard",
"Scotland",
"seabird",
"select",
"sentence",
"shadow",
"shamrock",
"showgirl",
"skullcap",
"skydive",
"slingshot",
"slowdown",
"snapline",
"snapshot",
"snowcap",
"snowslide",
"solo",
"southward",
"soybean",
"spaniel",
"spearhead",
"spellbind",
"spheroid",
"spigot",
"spindle",
"spyglass",
"stagehand",
"stagnate",
"stairway",
"standard",
"stapler",
"steamship",
"sterling",
"stockman",
"stopwatch",
"stormy",
"sugar",
"surmount",
"suspense",
"sweatband",
"swelter",
"tactics",
"talon",
"tapeworm",
"tempest",
"tiger",
"tissue",
"tonic",
"topmost",
"tracker",
"transit",
"trauma",
"treadmill",
"Trojan",
"trouble",
"tumor",
"tunnel",
"tycoon",
"uncut",
"unearth",
"unwind",
"uproot",
"upset",
"upshot",
"vapor",
"village",
"virus",
"Vulcan",
"waffle",
"wallet",
"watchword",
"wayside",
"willow",
"woodlark",
"Zulu")
ODD = ("adroitness",
"adviser",
"aftermath",
"aggregate",
"alkali",
"almighty",
"amulet",
"amusement",
"antenna",
"applicant",
"Apollo",
"armistice",
"article",
"asteroid",
"Atlantic",
"atmosphere",
"autopsy",
"Babylon",
"backwater",
"barbecue",
"belowground",
"bifocals",
"bodyguard",
"bookseller",
"borderline",
"bottomless",
"Bradbury",
"bravado",
"Brazilian",
"breakaway",
"Burlington",
"businessman",
"butterfat",
"Camelot",
"candidate",
"cannonball",
"Capricorn",
"caravan",
"caretaker",
"celebrate",
"cellulose",
"certify",
"chambermaid",
"Cherokee",
"Chicago",
"clergyman",
"coherence",
"combustion",
"commando",
"company",
"component",
"concurrent",
"confidence",
"conformist",
"congregate",
"consensus",
"consulting",
"corporate",
"corrosion",
"councilman",
"crossover",
"crucifix",
"cumbersome",
"customer",
"Dakota",
"decadence",
"December",
"decimal",
"designing",
"detector",
"detergent",
"determine",
"dictator",
"dinosaur",
"direction",
"disable",
"disbelief",
"disruptive",
"distortion",
"document",
"embezzle",
"enchanting",
"enrollment",
"enterprise",
"equation",
"equipment",
"escapade",
"Eskimo",
"everyday",
"examine",
"existence",
"exodus",
"fascinate",
"filament",
"finicky",
"forever",
"fortitude",
"frequency",
"gadgetry",
"Galveston",
"getaway",
"glossary",
"gossamer",
"graduate",
"gravity",
"guitarist",
"hamburger",
"Hamilton",
"handiwork",
"hazardous",
"headwaters",
"hemisphere",
"hesitate",
"hideaway",
"holiness",
"hurricane",
"hydraulic",
"impartial",
"impetus",
"inception",
"indigo",
"inertia",
"infancy",
"inferno",
"informant",
"insincere",
"insurgent",
"integrate",
"intention",
"inventive",
"Istanbul",
"Jamaica",
"Jupiter",
"leprosy",
"letterhead",
"liberty",
"maritime",
"matchmaker",
"maverick",
"Medusa",
"megaton",
"microscope",
"microwave",
"midsummer",
"millionaire",
"miracle",
"misnomer",
"molasses",
"molecule",
"Montana",
"monument",
"mosquito",
"narrative",
"nebula",
"newsletter",
"Norwegian",
"October",
"Ohio",
"onlooker",
"opulent",
"Orlando",
"outfielder",
"Pacific",
"pandemic",
"Pandora",
"paperweight",
"paragon",
"paragraph",
"paramount",
"passenger",
"pedigree",
"Pegasus",
"penetrate",
"perceptive",
"performance",
"pharmacy",
"phonetic",
"photograph",
"pioneer",
"pocketful",
"politeness",
"positive",
"potato",
"processor",
"provincial",
"proximate",
"puberty",
"publisher",
"pyramid",
"quantity",
"racketeer",
"rebellion",
"recipe",
"recover",
"repellent",
"replica",
"reproduce",
"resistor",
"responsive",
"retraction",
"retrieval",
"retrospect",
"revenue",
"revival",
"revolver",
"sandalwood",
"sardonic",
"Saturday",
"savagery",
"scavenger",
"sensation",
"sociable",
"souvenir",
"specialist",
"speculate",
"stethoscope",
"stupendous",
"supportive",
"surrender",
"suspicious",
"sympathy",
"tambourine",
"telephone",
"therapist",
"tobacco",
"tolerance",
"tomorrow",
"torpedo",
"tradition",
"travesty",
"trombonist",
"truncated",
"typewriter",
"ultimate",
"undaunted",
"underfoot",
"unicorn",
"unify",
"universe",
"unravel",
"upcoming",
"vacancy",
"vagabond",
"vertigo",
"Virginia",
"visitor",
"vocalist",
"voyager",
"warranty",
"Waterloo",
"whimsical",
"Wichita",
"Wilmington",
"Wyoming",
"yesteryear",
"Yucatan")
class InvalidWordError(ValueError):
pass
def words_to_int(word_iter, odd=False):
"""Generator yielding integer indices for each word in word_iter.
:param word_iter: iterable of pgp words
:type word_iter: iterable
:param odd: start with odd word list
:type odd: boolean
:return: integer
:rtype: generator
"""
for word in word_iter:
try:
yield (ODD if odd else EVEN).index(word)
except ValueError:
msg = "not in {} word list: '{}'"
raise InvalidWordError(msg.format("odd" if odd else "even", word))
# toggle odd/even
odd = not odd
def ints_to_word(int_iter, odd=False):
"""Generator yielding PGP words for each byte/int in int_iter.
:param int_iter: iterable of integers between 0 and 255
:type int_iter: iterable
:param odd: start with odd word list
:type odd: boolean
:return: pgp words
:rtype: generator
"""
for idx in int_iter:
yield (ODD if odd else EVEN)[idx]
# toggle odd/even
odd = not odd
class PGPWords(bytearray):
"""Inherits from bytearray. Add .hex() method and overwrite __str__"""
def __init__(self, source, **kwargs):
"""Initiate bytearray. Added initialization styles:
E.g.:
p = PGPWords("absurd bodyguard baboon", encoding="pgp-words")
p = PGPWords("DEAD 1337", encoding="hex")
"""
enc = kwargs.get("encoding")
if enc == "pgp-words":
kwargs.pop("encoding")
source = words_to_int(source.split(SEPARATOR), **kwargs)
kwargs = {}
elif enc == "hex" or source.startswith('0x'):
kwargs.pop("encoding")
tmp = source.replace("0x", '').replace(' ', '')
source = (int(tmp[i:i+2], 16) for i in range(0, len(tmp), 2))
super(PGPWords, self).__init__(source, **kwargs)
def __str__(self):
"""Return corresponding pgp words, separated by SEPARATOR."""
gen = ints_to_word(self)
return SEPARATOR.join(gen)
def hex(self):
"""Return corresponding hex representation as string"""
tmp = ''.join([hex(i).split('x')[1].zfill(2) for i in self])
gen = (tmp[i:i+4].upper() for i in range(0, len(tmp), 4))
return SEPARATOR.join(gen)
def main():
"""Try to convert arguments in either direction."""
if len(sys.argv) < 2 or sys.argv[1].startswith('-'):
print(__doc__.split("Mainline code:\n\n")[1], file=sys.stderr)
exit(-1)
arg_str = ' '.join(sys.argv[1:])
try:
result = PGPWords(arg_str, encoding="hex")
print(result)
except ValueError as err1:
try:
result = PGPWords(arg_str, encoding="pgp-words").hex()
print(result)
except InvalidWordError as err2:
print(err1, file=sys.stderr)
print(err2, file=sys.stderr)
exit(-1)
if __name__ == "__main__":
main()
| mit | -7,093,815,584,150,289,000 | 19.746479 | 78 | 0.45063 | false | 3.529553 | false | false | false |
petezybrick/iote2e | iote2e-pyclient/src/iote2epyclient/test/testhatsensors.py | 1 | 3137 | # Copyright 2016, 2017 Peter Zybrick and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
testhatsensors - Test RPi HAT sensors
:author: Pete Zybrick
:contact: [email protected]
:version: 1.0.0
"""
import sys
import datetime
from sense_hat import SenseHat
from time import sleep
def main(conf_file):
import logging.config
logging.config.fileConfig( conf_file, disable_existing_loggers=False)
logger = logging.getLogger(__name__)
logger.info('Starting')
sense = SenseHat()
#showMessages(sense)
#showLetters(sense)
#showPixels(sense)
showTemperature(sense)
#showJoystickPoll(sense)
#showJoystickWait(sense)
sense.clear()
logger.info('Done')
def showJoystickPoll(sense):
while True:
for event in sense.stick.get_events():
print("The joystick was {} {}".format(event.action,event.direction))
sleep(.25)
print('poll')
def showJoystickWait(sense):
while True:
event = sense.stick.wait_for_event()
if "middle" == event.direction:
if "pressed" == event.action:
print("1");
elif "released" == event.action:
print("0");
#print("The joystick was {} {}".format(event.action,event.direction))
def showTemperature(sense):
for i in range(0,5):
t = round(sense.get_temperature(),2)
print(t)
sense.show_message("{}".format(t), scroll_speed=.1)
sleep(1)
def showMessages(sense):
sense.show_message("Watson, come here. I need you.", scroll_speed=.025);
def showLetters(sense):
sense.show_letter("R", text_colour=[255,0,0],back_colour=[0,0,0]);
sleep(1.5)
sense.show_letter("G", text_colour=[0,255,0],back_colour=[0,0,0]);
sleep(1.5)
sense.show_letter("B", text_colour=[0,0,255],back_colour=[0,0,0]);
sleep(1.5)
def showPixels(sense):
b = [0,0,255]
y = [255,255,0]
e = [0,0,0]
image = [
b,b,e,b,b,e,y,y,
b,b,e,b,b,e,y,y,
e,e,e,e,e,e,e,e,
b,b,e,b,b,e,b,b,
b,b,e,b,b,e,b,b,
e,e,e,e,e,e,e,e,
b,b,e,b,b,e,b,b,
b,b,e,b,b,e,b,b
]
sense.set_pixels(image)
angles = [0,90,180,270,0,90,180,270]
for angle in angles:
sense.set_rotation(angle)
sleep(2)
if __name__ == '__main__':
sys.argv = ['testhatsensors.py', '/home/pete/iote2epyclient/log-configs/client_consoleonly.conf']
if( len(sys.argv) < 2 ):
print('Invalid format, execution cancelled')
print('Correct format: python <consoleConfigFile.conf>')
sys.exit(8)
main(sys.argv[1])
| apache-2.0 | 3,881,030,705,390,971,000 | 27.008929 | 101 | 0.620019 | false | 3.115194 | false | false | false |
Goyatuzo/Challenges | HackerRank/Algorithms/Sorting/Insertion Sort Part 1/insertion_sort_p1.py | 1 | 1113 | def insertion_sort(lst):
"""Instead of just inserting the value where it should be at,
it shifts the entire array until the location is found. It prints
out all the intermediate steps, but the final step is actually just
returned, so the output must be manually printed.
:param lst: The list of values to be sorted by insertion."""
# The value to be inserted.
to_insert = lst[-1]
n = len(lst)
# Remove the element to be added and replace with last element.
del lst[-1]
lst.append(lst[-1])
print(" ".join(map(str, lst)))
for i in range(n - 2, -1, -1):
# If it's at the beginning of the list, just insert it.
if i <= 0:
lst.insert(0, to_insert)
del lst[1]
break
# If it's in the middle of the list.
elif lst[i - 1] <= to_insert and lst[i] >= to_insert:
lst.insert(i, to_insert)
del lst[i + 1]
break
else:
lst.insert(i, lst[i - 1])
del lst[i + 1]
print(" ".join(map(str, lst)))
return " ".join(map(str, lst))
| mit | 3,846,452,538,363,552,000 | 29.916667 | 71 | 0.562444 | false | 3.673267 | false | false | false |
ict-felix/stack | vt_manager_kvm/src/python/vt_manager_kvm/controller/dispatchers/ui/GUIdispatcher.py | 1 | 17272 | from django.core.urlresolvers import reverse
from django.forms.models import modelformset_factory
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic import simple
from django.views.generic import list_detail, simple
from django.views.generic.create_update import apply_extra_context
from vt_manager_kvm.models import *
from vt_manager_kvm.communication.utils.XmlHelper import XmlHelper
import uuid, time, logging
from django.template import loader, RequestContext
from django.core.xheaders import populate_xheaders
from django.contrib import messages
#News
from vt_manager_kvm.controller.drivers.VTDriver import VTDriver
from vt_manager_kvm.utils.HttpUtils import HttpUtils
from vt_manager_kvm.models.NetworkInterface import NetworkInterface
from vt_manager_kvm.models.MacRange import MacRange
from vt_manager_kvm.controller.dispatchers.xmlrpc.InformationDispatcher import InformationDispatcher
from vt_manager_kvm.controller.dispatchers.forms.NetworkInterfaceForm import MgmtBridgeForm
from vt_manager_kvm.controller.dispatchers.forms.ServerForm import ServerForm
from django.db import transaction
def userIsIslandManager(request):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
@transaction.commit_on_success
def servers_crud(request, server_id=None):
"""Show a page for the user to add/edit an VTServer """
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
vmProjects = {}
vmSlices = {}
try:
for vm in VTDriver.getVMsInServer(VTDriver.getServerById(server_id)):
if vm.projectName not in vmProjects:
vmProjects[vm.projectName] = vm.projectId
if vm.sliceName not in vmSlices:
vmSlices[vm.sliceName] = vm.sliceId
except Exception as e:
print e
pass
serverFormClass = HttpUtils.getFormFromModel(VTServer)
ifaceFormClass = HttpUtils.getFormFromModel(NetworkInterface)
IfaceFormSetClass = modelformset_factory(NetworkInterface)
if server_id != None:
server = get_object_or_404(VTServer, pk=server_id)
else:
server = None
if request.method == "GET":
#serverForm = serverFormClass(instance=server)
serverForm = ServerForm(instance=server, prefix ="server")
if server != None:
mgmt = server.getNetworkInterfaces().filter(isMgmt = True)
if mgmt:
mgmt = mgmt.get()
mgmtIfaceForm = MgmtBridgeForm({'mgmtBridge-name':mgmt.getName(), 'mgmtBridge-mac':mgmt.getMacStr()}, prefix ="mgmtBridge")
else:
mgmtIfaceForm = MgmtBridgeForm(prefix ="mgmtBridge")
data = server.getNetworkInterfaces().filter(isMgmt = False)
if data:
IfaceFormSetClass = modelformset_factory(NetworkInterface,extra = 0)
ifaceformset = IfaceFormSetClass(queryset= data)
else:
mgmtIfaceForm = MgmtBridgeForm(prefix ="mgmtBridge")
ifaceformset = IfaceFormSetClass(queryset= NetworkInterface.objects.none())
elif request.method == "POST":
#serverForm = serverFormClass(request.POST, instance=server)
serverForm = ServerForm(request.POST, instance=server, prefix ="server")
ifaceformset = IfaceFormSetClass(request.POST)
mgmtIfaceForm = MgmtBridgeForm(request.POST, prefix ="mgmtBridge")
if serverForm.is_valid() and ifaceformset.is_valid() and mgmtIfaceForm.is_valid():
ifaces = ifaceformset.save(commit = False)
if server == None:
server = serverForm.save(commit = False)
try:
server = VTDriver.crudServerFromInstance(server)
VTDriver.setMgmtBridge(request, server)
VTDriver.crudDataBridgeFromInstance(server, ifaces,request.POST.getlist("DELETE"))
except Exception as e:
print e
e = HttpUtils.processException(e)
context = {"exception":e, "serverForm": serverForm, 'vmProjects': vmProjects, 'vmSlices': vmSlices,'ifaceformset' : ifaceformset, 'mgmtIfaceForm' : mgmtIfaceForm}
if server_id != None: context["server"] = server
return simple.direct_to_template(
request,
template="servers/servers_crud.html",
extra_context=context,
)
# Returns to server's admin page and rollback transactions
return HttpResponseRedirect('/servers/admin/')
else:
return HttpResponseNotAllowed("GET", "POST")
context = {"serverForm": serverForm, 'vmProjects': vmProjects, 'vmSlices': vmSlices,'ifaceformset' : ifaceformset, 'mgmtIfaceForm' : mgmtIfaceForm}
if server_id != None: context["server"] = server
return simple.direct_to_template(
request,
template="servers/servers_crud.html",
extra_context=context,
)
def admin_servers(request):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
servers = VTDriver.getAllServers()
return simple.direct_to_template(
request, template="servers/admin_servers.html",
extra_context={"servers_ids": servers})
def delete_server(request, server_id):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
if request.method == 'POST':
try:
VTDriver.deleteServer(VTDriver.getServerById(server_id))
return HttpResponseRedirect(reverse('dashboard'))
except Exception as e:
logging.error(e)
e = HttpUtils.processException(e)
return simple.direct_to_template(request,
template = 'servers/delete_server.html',
extra_context = {'user':request.user, 'exception':e, 'next':reverse("admin_servers")},
)
elif request.method == 'GET':
return simple.direct_to_template(request,
template = 'servers/delete_server.html',
extra_context = {'user':request.user, 'next':reverse("admin_servers"),'object':VTDriver.getServerById(server_id)},
)
def action_vm(request, server_id, vm_id, action):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
if(action == 'list'):
return simple.direct_to_template(
request, template="servers/server_vm_details.html",
extra_context={"vm": VTDriver.getVMbyId(vm_id), "server_id":server_id}
)
elif(action == 'check_status'):
#XXX: Do this function if needed
return simple.direct_to_template(
request, template="servers/list_vm.html",
extra_context={"vm": VM.objects.get(id = vm_id)}
)
elif(action == 'force_update_server'):
InformationDispatcher.forceListActiveVMs(serverID=server_id)
elif(action == 'force_update_vm'):
InformationDispatcher.forceListActiveVMs(vmID=vm_id)
else:
#XXX: serverUUID should be passed in a different way
VTDriver.PropagateActionToProvisioningDispatcher(vm_id, VTServer.objects.get(id=server_id).uuid, action)
#return HttpResponseRedirect(reverse('edit_server', args = [server_id]))
return HttpResponse("")
def subscribeEthernetRanges(request, server_id):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
macRanges = MacRange.objects.all()
if server_id != None:
server = get_object_or_404(VTServer, pk=server_id)
else:
raise Exception ("NO SERVER")
if request.method == "GET":
return simple.direct_to_template(request,
template = 'servers/servers_subscribeEthernetRanges.html',
extra_context = {'server': server, 'macRanges':macRanges},
)
elif request.method=='POST':
VTDriver.manageEthernetRanges(request,server,macRanges)
return HttpResponseRedirect(reverse('edit_server', args = [server_id]))
else:
return HttpResponseNotAllowed("GET", "POST")
def subscribeIp4Ranges(request, server_id):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
ipRanges = Ip4Range.objects.all()
if server_id != None:
server = get_object_or_404(VTServer, pk=server_id)
else:
raise Exception ("NO SERVER")
if request.method == "GET":
return simple.direct_to_template(request,
template = 'servers/servers_subscribeIp4Ranges.html',
extra_context = {'server': server, 'ipRanges':ipRanges},
)
elif request.method=='POST':
VTDriver.manageIp4Ranges(request,server,ipRanges)
return HttpResponseRedirect(reverse('edit_server', args = [server_id]))
else:
return HttpResponseNotAllowed("GET", "POST")
def list_vms(request, server_id):
if (not request.user.is_superuser):
return simple.direct_to_template(request,
template = 'not_admin.html',
extra_context = {'user':request.user},
)
vmProjects = {}
vmSlices = {}
try:
for vm in VTDriver.getVMsInServer(VTDriver.getServerById(server_id)):
if vm.projectName not in vmProjects:
vmProjects[vm.projectName] = vm.projectId
if vm.sliceName not in vmSlices:
vmSlices[vm.sliceName] = vm.sliceId
except Exception as e:
print e
pass
server = get_object_or_404(VTServer, pk=server_id)
context = { 'vmProjects': vmProjects, 'vmSlices': vmSlices,'server':server}
return simple.direct_to_template(
request,
template="servers/servers_list_vms.html",
extra_context=context,
)
'''
Networking point of entry
'''
from vt_manager_kvm.controller.networking.EthernetController import EthernetController
from vt_manager_kvm.controller.networking.Ip4Controller import Ip4Controller
from vt_manager_kvm.models.MacRange import MacRange
NETWORKING_ACTION_ADD="add"
NETWORKING_ACTION_EDIT="edit"
NETWORKING_ACTION_DELETE="delete"
NETWORKING_ACTION_SHOW="show"
NETWORKING_ACTION_ADDEXCLUDED="addExcluded"
NETWORKING_ACTION_REMOVEXCLUDED="removeExcluded"
NETWORKING_POSSIBLE_ACTIONS=(NETWORKING_ACTION_ADD,NETWORKING_ACTION_DELETE,NETWORKING_ACTION_EDIT,NETWORKING_ACTION_SHOW,NETWORKING_ACTION_ADDEXCLUDED,NETWORKING_ACTION_REMOVEXCLUDED,None)
def networkingDashboard(request):#,rangeId):
extra_context = {"section": "networking","subsection":"None"}
extra_context["macRanges"] = EthernetController.listRanges()
extra_context["MacRange"] = MacRange
extra_context["ip4Ranges"] = Ip4Controller.listRanges()
extra_context["Ip4Range"] = Ip4Range
template = "networking/index.html"
return simple.direct_to_template(
request,
extra_context=extra_context,
template=template,
)
def manageIp4(request,rangeId=None,action=None,ip4Id=None):
if not action in NETWORKING_POSSIBLE_ACTIONS:
raise Exception("Unknown action")
#Define context
extra_context = {"section": "networking","subsection":"ip4"+str(action),}
#Add process
if (action == NETWORKING_ACTION_ADD):
if request.method == "GET":
#Show form
extra_context["form"] = HttpUtils.getFormFromModel(Ip4Range)
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeCrud.html",
)
return
# return HttpResponseRedirect("/networking/ip4/")
elif request.method == "POST":
try:
instance = HttpUtils.getInstanceFromForm(request,Ip4Range)
#Create Range
Ip4Controller.createRange(instance)
return HttpResponseRedirect("/networking/ip4/")
except Exception as e:
print e
extra_context["form"] = HttpUtils.processExceptionForm(e,request,Ip4Range)
#Process creation query
#return HttpResponseRedirect("/networking/ip4/")
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeCrud.html",
)
#Show
if ((action == None) or (action==NETWORKING_ACTION_SHOW)) and (not rangeId==None):
instance = Ip4Controller.getRange(rangeId)
extra_context["range"] = instance
#return HttpResponseRedirect("/networking/ip4/")
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeDetail.html",
)
#Edit
#TODO
#Add excluded Ip
if (action == NETWORKING_ACTION_ADDEXCLUDED) and (request.method == "POST"):
if not request.method == "POST":
raise Exception("Invalid method")
try:
instance = Ip4Controller.getRange(rangeId)
extra_context["range"] = instance
#Create excluded
Ip4Controller.addExcludedIp4(instance,request)
return HttpResponseRedirect("/networking/ip4/"+rangeId)
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeDetail.html",
)
#Release excluded Ip
if (action == NETWORKING_ACTION_REMOVEXCLUDED) and (request.method == "POST"):
try:
instance = Ip4Controller.getRange(rangeId)
#Create excluded
Ip4Controller.removeExcludedIp4(instance,ip4Id)
#FIXME: Why initial instance is not refreshed?
instance = Ip4Controller.getRange(rangeId)
extra_context["range"] = instance
return HttpResponseRedirect("/networking/ip4/"+rangeId)
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ip4/rangeDetail.html",
)
#Delete
if (action == NETWORKING_ACTION_DELETE) and (request.method == "POST"):
try:
Ip4Controller.deleteRange(rangeId)
return HttpResponseRedirect("/networking/ip4/")
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
extra_context["ranges"] = Ip4Controller.listRanges()
template = "networking/ip4/index.html"
return simple.direct_to_template(
request,
extra_context = extra_context,
template=template,
)
def manageEthernet(request,rangeId=None,action=None,macId=None):
if not action in NETWORKING_POSSIBLE_ACTIONS:
raise Exception("Unknown action")
#Define context
extra_context = {"section": "networking","subsection":"ethernet",}
#Add process
if (action == NETWORKING_ACTION_ADD):
if request.method == "GET":
#Show form
extra_context["form"] = HttpUtils.getFormFromModel(MacRange)
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeCrud.html",
)
return
elif request.method == "POST":
try:
instance = HttpUtils.getInstanceFromForm(request,MacRange)
#Create Range
EthernetController.createRange(instance)
return HttpResponseRedirect("/networking/ethernet/")
except Exception as e:
print e
extra_context["form"] = HttpUtils.processExceptionForm(e,request,MacRange)
#Process creation query
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeCrud.html",
)
#Show
if ((action == None) or (action==NETWORKING_ACTION_SHOW)) and (not rangeId==None):
instance = EthernetController.getRange(rangeId)
extra_context["range"] = instance
#return HttpResponseRedirect("/networking/ethernet/")
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeDetail.html",
)
#Edit
#TODO
#Add excluded Mac
if (action == NETWORKING_ACTION_ADDEXCLUDED) and (request.method == "POST"):
if not request.method == "POST":
raise Exception("Invalid method")
try:
instance = EthernetController.getRange(rangeId)
extra_context["range"] = instance
#Create excluded
EthernetController.addExcludedMac(instance,request)
return HttpResponseRedirect("/networking/ethernet/"+rangeId)
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeDetail.html",
)
#Release excluded Mac
if (action == NETWORKING_ACTION_REMOVEXCLUDED) and (request.method == "POST"):
try:
instance = EthernetController.getRange(rangeId)
#Create excluded
#FIXME: Why initial instance is not refreshed?
EthernetController.removeExcludedMac(instance,macId)
instance = EthernetController.getRange(rangeId)
extra_context["range"] = instance
return HttpResponseRedirect("/networking/ethernet/"+rangeId)
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
return simple.direct_to_template(
request,
extra_context = extra_context,
template="networking/ethernet/rangeDetail.html",
)
#Delete
if (action == NETWORKING_ACTION_DELETE) and (request.method == "POST"):
try:
EthernetController.deleteRange(rangeId)
return HttpResponseRedirect("/networking/ethernet/")
except Exception as e:
print e
extra_context["errors"] = HttpUtils.processException(e)
pass
#Listing ranges
extra_context["ranges"] = EthernetController.listRanges()
return simple.direct_to_template(
request,
extra_context = extra_context,
template = "networking/ethernet/index.html",
)
| apache-2.0 | -5,939,845,024,590,403,000 | 29.898032 | 189 | 0.721688 | false | 3.373438 | false | false | false |
edx-solutions/discussion-edx-platform-extensions | social_engagement/engagement.py | 1 | 14753 | """
Business logic tier regarding social engagement scores
"""
import logging
import sys
from collections import defaultdict
from datetime import datetime
import pytz
from django.conf import settings
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.http import HttpRequest
import openedx.core.djangoapps.django_comment_common.comment_client as cc
from edx_notifications.data import NotificationMessage
from edx_notifications.lib.publisher import (get_notification_type,
publish_notification_to_user)
from edx_solutions_api_integration.utils import get_aggregate_exclusion_user_ids
from lms.djangoapps.discussion.rest_api.exceptions import (CommentNotFoundError,
ThreadNotFoundError)
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.django_comment_common.comment_client.user import get_course_social_stats
from openedx.core.djangoapps.django_comment_common.comment_client.utils import CommentClientRequestError
from requests.exceptions import ConnectionError
from xmodule.modulestore.django import modulestore
from .models import StudentSocialEngagementScore
log = logging.getLogger(__name__)
def update_course_engagement(course_id, compute_if_closed_course=False, course_descriptor=None):
"""
Compute and save engagement scores and stats for whole course.
"""
if not settings.FEATURES.get('ENABLE_SOCIAL_ENGAGEMENT', False):
return
course_key = course_id if isinstance(course_id, CourseKey) else CourseKey.from_string(course_id)
# cs_comment_service works is slash separated course_id strings
slash_course_id = str(course_key)
if not course_descriptor:
# it course descriptor was not passed in (as an optimization)
course_descriptor = modulestore().get_course(course_key)
if not course_descriptor:
# couldn't find course?!?
return
if not compute_if_closed_course and course_descriptor.end:
# if course is closed then don't bother. Note we can override this if we want to force update
now_utc = datetime.now(pytz.UTC)
if now_utc > course_descriptor.end:
log.info('update_user_engagement_score() is skipping because the course is closed...')
return
score_update_count = 0
try:
for user_id, social_stats in _get_course_social_stats(slash_course_id):
log.info('Updating social engagement score for user_id {} in course_key {}'.format(user_id, course_key))
current_score = _compute_social_engagement_score(social_stats)
StudentSocialEngagementScore.save_user_engagement_score(
course_key, user_id, current_score, social_stats
)
score_update_count += 1
except (CommentClientRequestError, ConnectionError) as error:
log.exception(error)
return score_update_count
def _get_course_social_stats(course_id):
""""
Yield user and user's stats for whole course from Forum API.
"""
stats = get_course_social_stats(course_id)
yield from stats.items()
def get_social_metric_points():
"""
Get custom or default social metric points.
"""
return getattr(
settings,
'SOCIAL_METRIC_POINTS',
{
'num_threads': 10,
'num_comments': 15,
'num_replies': 15,
'num_upvotes': 25,
'num_thread_followers': 5,
'num_comments_generated': 15,
}
)
def _compute_social_engagement_score(social_metrics):
"""
For a list of social_stats, compute the social score
"""
social_metric_points = get_social_metric_points()
social_total = 0
for key, val in social_metric_points.items():
social_total += social_metrics.get(key, 0) * val
return social_total
#
# Support for Notifications, these two receivers should actually be migrated into a new Leaderboard django app.
# For now, put the business logic here, but it is pretty decoupled through event signaling
# so we should be able to move these files easily when we are able to do so
#
@receiver(pre_save, sender=StudentSocialEngagementScore)
def handle_progress_pre_save_signal(sender, instance, **kwargs):
"""
Handle the pre-save ORM event on StudentSocialEngagementScore
"""
if settings.FEATURES['ENABLE_NOTIFICATIONS']:
# If notifications feature is enabled, then we need to get the user's
# rank before the save is made, so that we can compare it to
# after the save and see if the position changes
instance.presave_leaderboard_rank = StudentSocialEngagementScore.get_user_leaderboard_position(
instance.course_id,
user_id=instance.user.id,
exclude_users=get_aggregate_exclusion_user_ids(instance.course_id)
)['position']
@receiver(post_save, sender=StudentSocialEngagementScore)
def handle_progress_post_save_signal(sender, instance, **kwargs):
"""
Handle the pre-save ORM event on CourseModuleCompletions
"""
if settings.FEATURES['ENABLE_NOTIFICATIONS']:
# If notifications feature is enabled, then we need to get the user's
# rank before the save is made, so that we can compare it to
# after the save and see if the position changes
leaderboard_rank = StudentSocialEngagementScore.get_user_leaderboard_position(
instance.course_id,
user_id=instance.user.id,
exclude_users=get_aggregate_exclusion_user_ids(instance.course_id)
)['position']
if leaderboard_rank == 0:
# quick escape when user is not in the leaderboard
# which means rank = 0. Trouble is 0 < 3, so unfortunately
# the semantics around 0 don't match the logic below
return
# logic for Notification trigger is when a user enters into the Leaderboard
leaderboard_size = getattr(settings, 'LEADERBOARD_SIZE', 3)
presave_leaderboard_rank = instance.presave_leaderboard_rank if instance.presave_leaderboard_rank else sys.maxsize
if leaderboard_rank <= leaderboard_size and presave_leaderboard_rank > leaderboard_size:
try:
notification_msg = NotificationMessage(
msg_type=get_notification_type('open-edx.lms.leaderboard.engagement.rank-changed'),
namespace=str(instance.course_id),
payload={
'_schema_version': '1',
'rank': leaderboard_rank,
'leaderboard_name': 'Engagement',
}
)
#
# add in all the context parameters we'll need to
# generate a URL back to the website that will
# present the new course announcement
#
# IMPORTANT: This can be changed to msg.add_click_link() if we
# have a particular URL that we wish to use. In the initial use case,
# we need to make the link point to a different front end website
# so we need to resolve these links at dispatch time
#
notification_msg.add_click_link_params({
'course_id': str(instance.course_id),
})
publish_notification_to_user(int(instance.user.id), notification_msg)
except Exception as ex:
# Notifications are never critical, so we don't want to disrupt any
# other logic processing. So log and continue.
log.exception(ex)
def get_involved_users_in_thread(request, thread):
"""
Compute all the users involved in the children of a specific thread.
"""
params = {"thread_id": thread.id, "page_size": 100}
is_question = getattr(thread, "thread_type", None) == "question"
author_id = getattr(thread, 'user_id', None)
results = _detail_results_factory()
if is_question:
# get users of the non-endorsed comments in thread
params.update({"endorsed": False})
_get_details_for_deletion(_get_request(request, params), results=results, is_thread=True)
# get users of the endorsed comments in thread
if getattr(thread, 'has_endorsed', False):
params.update({"endorsed": True})
_get_details_for_deletion(_get_request(request, params), results=results, is_thread=True)
else:
_get_details_for_deletion(_get_request(request, params), results=results, is_thread=True)
users = results['users']
if author_id:
users[author_id]['num_upvotes'] += thread.votes.get('count', 0)
users[author_id]['num_threads'] += 1
users[author_id]['num_comments_generated'] += results['all_comments']
users[author_id]['num_thread_followers'] += thread.get_num_followers()
if thread.abuse_flaggers:
users[author_id]['num_flagged'] += 1
return users
def get_involved_users_in_comment(request, comment):
"""
Method used to extract the involved users in the comment.
This method also returns the creator of the post.
"""
params = {"page_size": 100}
comment_author_id = getattr(comment, 'user_id', None)
thread_author_id = None
if hasattr(comment, 'thread_id'):
thread_author_id = _get_author_of_thread(comment.thread_id)
results = _get_details_for_deletion(_get_request(request, params), comment.id, nested=True)
users = results['users']
if comment_author_id:
users[comment_author_id]['num_upvotes'] += comment.votes.get('count', 0)
if getattr(comment, 'parent_id', None):
# It's a reply.
users[comment_author_id]['num_replies'] += 1
else:
# It's a comment.
users[comment_author_id]['num_comments'] += 1
if comment.abuse_flaggers:
users[comment_author_id]['num_flagged'] += 1
if thread_author_id:
users[thread_author_id]['num_comments_generated'] += results['replies'] + 1
return users
def _detail_results_factory():
"""
Helper method to maintain organized result structure while getting involved users.
"""
return {
'replies': 0,
'all_comments': 0,
'users': defaultdict(lambda: defaultdict(int)),
}
def _get_users_in_thread(request):
from lms.djangoapps.discussion.rest_api.views import CommentViewSet
users = set()
response_page = 1
has_results = True
while has_results:
try:
params = {"page": response_page}
response = CommentViewSet().list(
_get_request(request, params)
)
for comment in response.data["results"]:
users.add(comment["author"])
if comment["child_count"] > 0:
users.update(_get_users_in_comment(request, comment["id"]))
has_results = response.data["pagination"]["next"]
response_page += 1
except (ThreadNotFoundError, InvalidKeyError):
return users
return users
def _get_users_in_comment(request, comment_id):
from lms.djangoapps.discussion.rest_api.views import CommentViewSet
users = set()
response_page = 1
has_results = True
while has_results:
try:
response = CommentViewSet().retrieve(_get_request(request, {"page": response_page}), comment_id)
for comment in response.data["results"]:
users.add(comment["author"])
if comment["child_count"] > 0:
users.update(_get_users_in_comment(request, comment["id"]))
has_results = response.data["pagination"]["next"]
response_page += 1
except (ThreadNotFoundError, InvalidKeyError):
return users
return users
def _get_request(incoming_request, params):
request = HttpRequest()
request.method = 'GET'
request.user = incoming_request.user
request.META = incoming_request.META.copy()
request.GET = incoming_request.GET.copy()
request.GET.update(params)
return request
def _get_author_of_comment(parent_id):
comment = cc.Comment.find(parent_id)
if comment and hasattr(comment, 'user_id'):
return comment.user_id
def _get_author_of_thread(thread_id):
thread = cc.Thread.find(thread_id)
if thread and hasattr(thread, 'user_id'):
return thread.user_id
def _get_details_for_deletion(request, comment_id=None, results=None, nested=False, is_thread=False):
"""
Get details of comment or thread and related users that are required for deletion purposes.
"""
if not results:
results = _detail_results_factory()
for page, response in enumerate(_get_paginated_results(request, comment_id, is_thread)):
if page == 0:
results['all_comments'] += response.data['pagination']['count']
if results['replies'] == 0:
results['replies'] = response.data['pagination']['count']
for comment in response.data['results']:
_extract_stats_from_comment(request, comment, results, nested)
return results
def _get_paginated_results(request, comment_id, is_thread):
"""
Yield paginated comments of comment or thread.
"""
from lms.djangoapps.discussion.rest_api.views import CommentViewSet
response_page = 1
has_next = True
while has_next:
try:
if is_thread:
response = CommentViewSet().list(_get_request(request, {"page": response_page}))
else:
response = CommentViewSet().retrieve(_get_request(request, {"page": response_page}), comment_id)
except (ThreadNotFoundError, CommentNotFoundError, InvalidKeyError):
raise StopIteration
has_next = response.data["pagination"]["next"]
response_page += 1
yield response
def _extract_stats_from_comment(request, comment, results, nested):
"""
Extract results from comment and its nested comments.
"""
user_id = comment.serializer.instance['user_id']
if not nested:
results['users'][user_id]['num_comments'] += 1
else:
results['users'][user_id]['num_replies'] += 1
results['users'][user_id]['num_upvotes'] += comment['vote_count']
if comment.serializer.instance['abuse_flaggers']:
results['users'][user_id]['num_flagged'] += 1
if comment['child_count'] > 0:
_get_details_for_deletion(request, comment['id'], results, nested=True)
| agpl-3.0 | -3,449,035,946,806,721,000 | 35.790524 | 122 | 0.638582 | false | 4.156946 | false | false | false |
arkanister/minitickets | lib/utils/html/templatetags/icons.py | 1 | 2009 | # -*- coding: utf-8 -*-
from django import template
from django.template import TemplateSyntaxError, Node
from ..icons.base import Icon
from ..tags import token_kwargs, resolve_kwargs
register = template.Library()
class IconNode(Node):
def __init__(self, _icon, kwargs=None):
super(IconNode, self).__init__()
self.icon = _icon
self.kwargs = kwargs or {}
def render(self, context):
icon = self.icon.resolve(context)
if isinstance(icon, Icon):
return icon.as_html()
attrs = resolve_kwargs(self.kwargs, context)
prefix = attrs.pop('prefix', None)
content = attrs.pop('content', None)
html_tag = attrs.pop('html_tag', None)
icon = Icon(icon, prefix=prefix, content=content,
html_tag=html_tag, attrs=attrs)
return icon.as_html()
@register.tag
def icon(parser, token):
"""
Render a HTML icon.
The tag can be given either a `.Icon` object or a name of the icon.
An optional second argument can specify the icon prefix to use.
An optional third argument can specify the icon html tag to use.
An optional fourth argument can specify the icon content to use.
Others arguments can specify any html attribute to use.
Example::
{% icon 'icon' 'kwarg1'='value1' 'kwarg2'='value2' ... %}
{% icon 'icon' 'prefix'='fa-' 'kwarg1'='value1' 'kwarg2'='value2' ... %}
{% icon 'icon' 'prefix'='fa-' 'html_tag'='b' 'kwarg1'='value1' 'kwarg2'='value2' ... %}
{% icon 'icon' 'prefix'='fa-' 'html_tag'='b' 'content'='R$' 'kwarg1'='value1' 'kwarg2'='value2' ... %}
"""
bits = token.split_contents()
try:
tag, _icon = bits.pop(0), parser.compile_filter(bits.pop(0))
except ValueError:
raise TemplateSyntaxError("'%s' must be given a icon." % bits[0])
kwargs = {}
# split optional args
if len(bits):
kwargs = token_kwargs(bits, parser)
return IconNode(_icon, kwargs=kwargs) | apache-2.0 | 8,514,085,994,507,164,000 | 29.923077 | 110 | 0.610254 | false | 3.626354 | false | false | false |
isaacbernat/awis | setup.py | 1 | 1887 | from setuptools import setup, find_packages
# from codecs import open
# from os import path
# here = path.abspath(path.dirname(__file__))
# # Get the long description from the README file
# with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
# Application name:
name="myawis",
# Version number (initial):
version="0.2.4",
# Application author details:
author="Ashim Lamichhane",
author_email="[email protected]",
# Packages
packages=['myawis'],
# data_files
data_files=[('awis', ['LICENSE.txt', 'README.rst'])],
# Include additional files into the package
include_package_data=True,
# Details
url="https://github.com/ashim888/awis",
# Keywords
keywords='python awis api call',
#
license='GNU General Public License v3.0',
description="A simple AWIS python wrapper",
long_description=open('README.rst').read(),
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 2 - Pre-Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: Public Domain',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
install_requires=[
"requests",
"beautifulsoup4",
"lxml",
],
entry_points={
'console_scripts': [
'myawis=myawis:main',
],
},
)
| gpl-3.0 | -1,147,754,553,041,353,200 | 27.590909 | 77 | 0.608903 | false | 3.997881 | false | true | false |
google-research/google-research | kws_streaming/models/lstm.py | 1 | 3941 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LSTM with Mel spectrum and fully connected layers."""
from kws_streaming.layers import lstm
from kws_streaming.layers import modes
from kws_streaming.layers import speech_features
from kws_streaming.layers import stream
from kws_streaming.layers.compat import tf
import kws_streaming.models.model_utils as utils
def model_parameters(parser_nn):
"""LSTM model parameters."""
parser_nn.add_argument(
'--lstm_units',
type=str,
default='500',
help='Output space dimensionality of lstm layer ',
)
parser_nn.add_argument(
'--return_sequences',
type=str,
default='0',
help='Whether to return the last output in the output sequence,'
'or the full sequence',
)
parser_nn.add_argument(
'--stateful',
type=int,
default='1',
help='If True, the last state for each sample at index i'
'in a batch will be used as initial state for the sample '
'of index i in the following batch',
)
parser_nn.add_argument(
'--num_proj',
type=str,
default='200',
help='The output dimensionality for the projection matrices.',
)
parser_nn.add_argument(
'--use_peepholes',
type=int,
default='1',
help='True to enable diagonal/peephole connections',
)
parser_nn.add_argument(
'--dropout1',
type=float,
default=0.3,
help='Percentage of data dropped',
)
parser_nn.add_argument(
'--units1',
type=str,
default='',
help='Number of units in the last set of hidden layers',
)
parser_nn.add_argument(
'--act1',
type=str,
default='',
help='Activation function of the last set of hidden layers',
)
def model(flags):
"""LSTM model.
Similar model in papers:
Convolutional Recurrent Neural Networks for Small-Footprint Keyword Spotting
https://arxiv.org/pdf/1703.05390.pdf (with no conv layer)
Model topology is similar with "Hello Edge: Keyword Spotting on
Microcontrollers" https://arxiv.org/pdf/1711.07128.pdf
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
input_audio = tf.keras.layers.Input(
shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),
batch_size=flags.batch_size)
net = input_audio
if flags.preprocess == 'raw':
# it is a self contained model, user need to feed raw audio only
net = speech_features.SpeechFeatures(
speech_features.SpeechFeatures.get_params(flags))(
net)
for units, return_sequences, num_proj in zip(
utils.parse(flags.lstm_units), utils.parse(flags.return_sequences),
utils.parse(flags.num_proj)):
net = lstm.LSTM(
units=units,
return_sequences=return_sequences,
stateful=flags.stateful,
use_peepholes=flags.use_peepholes,
num_proj=num_proj)(
net)
net = stream.Stream(cell=tf.keras.layers.Flatten())(net)
net = tf.keras.layers.Dropout(rate=flags.dropout1)(net)
for units, activation in zip(
utils.parse(flags.units1), utils.parse(flags.act1)):
net = tf.keras.layers.Dense(units=units, activation=activation)(net)
net = tf.keras.layers.Dense(units=flags.label_count)(net)
if flags.return_softmax:
net = tf.keras.layers.Activation('softmax')(net)
return tf.keras.Model(input_audio, net)
| apache-2.0 | 9,010,055,643,208,554,000 | 30.031496 | 78 | 0.678508 | false | 3.693533 | false | false | false |
fake-name/ReadableWebProxy | WebMirror/management/GravityTalesManage.py | 1 | 1202 |
import calendar
import datetime
import json
import os
import os.path
import shutil
import traceback
from concurrent.futures import ThreadPoolExecutor
import urllib.error
import urllib.parse
from sqlalchemy import and_
from sqlalchemy import or_
import sqlalchemy.exc
from sqlalchemy_continuum_vendored.utils import version_table
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
import common.database as db
import common.Exceptions
import common.management.file_cleanup
import Misc.HistoryAggregator.Consolidate
import flags
import pprint
import config
from config import C_RAW_RESOURCE_DIR
import WebMirror.OutputFilters.rss.FeedDataParser
def exposed_delete_gravitytales_bot_blocked_pages():
'''
Delete the "checking you're not a bot" garbage pages
that sometimes get through the gravitytales scraper.
'''
with db.session_context() as sess:
tables = [
db.WebPages.__table__,
version_table(db.WebPages.__table__)
]
for ctbl in tables:
update = ctbl.delete() \
.where(ctbl.c.netloc == "gravitytales.com") \
.where(ctbl.c.content.like('%<div id="bot-alert" class="alert alert-info">%'))
print(update)
sess.execute(update)
sess.commit()
| bsd-3-clause | 2,929,935,509,704,662,000 | 21.259259 | 82 | 0.75624 | false | 3.414773 | false | false | false |
McIntyre-Lab/papers | newman_t1d_cases_2017/scripts/bwa_sam_parse.py | 1 | 2304 | #!/usr/bin/env python
import argparse
## This script parses a sam file from BWA-MEM and outputs a log of alignment counts and percentages.
# Parse command line arguments
parser = argparse.ArgumentParser(description='Parse sam file to get alignment counts.')
parser.add_argument('-sam','--sam_file',dest='sam', action='store', required=True, help='A Sam file to parse [Required]')
parser.add_argument('-o','--out', dest='out', action='store', required=True, help='Output file for alignment log [Required]')
args = parser.parse_args()
flags=list()
# Open sam file and create a list that contains only the second column from the sam file, (the bitwise flags).
with open(args.sam,'r') as sam:
for line in sam.readlines():
cols=line.split('\t')
flags.append(cols[1])
# Count the flags. These flags are based on BWA sam output, may not be the same for other aligners.
# The flags are different for paired data. There is another python script 'bwa_sam_parse_se.py' for single-end alignments.
unaln=flags.count('77') + flags.count('141') + flags.count('181') + flags.count('121') + flags.count('133') + flags.count('117') + flags.count('69')
aln=flags.count('99') + flags.count('73') + flags.count('185') + flags.count('147') + flags.count('83') + flags.count('163') + flags.count('97') + flags.count('137') + flags.count('145') + flags.count('81') + flags.count('161')+ flags.count('177') + flags.count('113') + flags.count('65') + flags.count('129')
ambig=flags.count('337') + flags.count('417') + flags.count('369') + flags.count('433') + flags.count('353') + flags.count('401') + flags.count('371')+ flags.count('355') + flags.count('403') + flags.count('419') + flags.count('339') + flags.count('387') + flags.count('385') + flags.count('323') + flags.count('435') + flags.count('321')
total = unaln + aln
# Get percentages
percent_aln = float (aln) / (total) * 100
percent_unaln = float (unaln) / (total) * 100
percent_ambig = float (ambig) / (total) * 100
# Write the counts to the output.
with open(args.out,'w') as dataout:
dataout.write('Total reads '+str(total)+'\nAligned '+str(aln)+'\nUnaligned '+str(unaln)+'\nAmbiguous '+str(ambig)+'\nPercent aligned '+str(percent_aln)+'\nPercent unaligned '+str(percent_unaln)+'\nPercent ambiguous '+str(percent_ambig))
| lgpl-3.0 | -7,280,314,419,809,801,000 | 52.581395 | 338 | 0.680122 | false | 3.191136 | false | false | false |
sbg/sevenbridges-python | sevenbridges/meta/collection.py | 1 | 4097 | from sevenbridges.errors import PaginationError, SbgError
from sevenbridges.models.compound.volumes.volume_object import VolumeObject
from sevenbridges.models.compound.volumes.volume_prefix import VolumePrefix
from sevenbridges.models.link import Link, VolumeLink
class Collection(list):
"""
Wrapper for SevenBridges pageable resources.
Among the actual collection items it contains information regarding
the total number of entries available in on the server and resource href.
"""
resource = None
def __init__(self, resource, href, total, items, links, api):
super().__init__(items)
self.resource = resource
self.href = href
self.links = links
self._items = items
self._total = total
self._api = api
@property
def total(self):
return int(self._total)
def all(self):
"""
Fetches all available items.
:return: Collection object.
"""
page = self._load(self.href)
while True:
try:
for item in page._items:
yield item
page = page.next_page()
except PaginationError:
break
def _load(self, url):
if self.resource is None:
raise SbgError('Undefined collection resource.')
else:
response = self._api.get(url, append_base=False)
data = response.json()
total = response.headers['x-total-matching-query']
items = [
self.resource(api=self._api, **group)
for group in data['items']
]
links = [Link(**link) for link in data['links']]
href = data['href']
return Collection(
resource=self.resource, href=href, total=total,
items=items, links=links, api=self._api
)
def next_page(self):
"""
Fetches next result set.
:return: Collection object.
"""
for link in self.links:
if link.rel.lower() == 'next':
return self._load(link.href)
raise PaginationError('No more entries.')
def previous_page(self):
"""
Fetches previous result set.
:return: Collection object.
"""
for link in self.links:
if link.rel.lower() == 'prev':
return self._load(link.href)
raise PaginationError('No more entries.')
def __repr__(self):
return (
f'<Collection: total={self.total}, available={len(self._items)}>'
)
class VolumeCollection(Collection):
def __init__(self, href, items, links, prefixes, api):
super().__init__(
VolumeObject, href, 0, items, links, api)
self.prefixes = prefixes
@property
def total(self):
return -1
def next_page(self):
"""
Fetches next result set.
:return: VolumeCollection object.
"""
for link in self.links:
if link.next:
return self._load(link.next)
raise PaginationError('No more entries.')
def previous_page(self):
raise PaginationError('Cannot paginate backwards')
def _load(self, url):
if self.resource is None:
raise SbgError('Undefined collection resource.')
else:
response = self._api.get(url, append_base=False)
data = response.json()
items = [
self.resource(api=self._api, **group) for group in
data['items']
]
prefixes = [
VolumePrefix(api=self._api, **prefix) for prefix in
data['prefixes']
]
links = [VolumeLink(**link) for link in data['links']]
href = data['href']
return VolumeCollection(
href=href, items=items, links=links,
prefixes=prefixes, api=self._api
)
def __repr__(self):
return f'<VolumeCollection: items={len(self._items)}>'
| apache-2.0 | -4,023,204,058,258,332,700 | 30.037879 | 77 | 0.547962 | false | 4.46783 | false | false | false |
RNAcentral/rnacentral-webcode | rnacentral/portal/management/commands/update_example_locations.py | 1 | 4551 | """
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from django.core.management.base import BaseCommand
from portal.models import EnsemblAssembly
from portal.models import SequenceRegion
example_locations = {
'homo_sapiens': {
'chromosome': 'X',
'start': 73819307,
'end': 73856333,
},
'mus_musculus': {
'chromosome': 1,
'start': 86351908,
'end': 86352200,
},
'danio_rerio': {
'chromosome': 9,
'start': 7633910,
'end': 7634210,
},
'bos_taurus': {
'chromosome': 15,
'start': 82197673,
'end': 82197837,
},
'rattus_norvegicus': {
'chromosome': 'X',
'start': 118277628,
'end': 118277850,
},
'felis_catus': {
'chromosome': 'X',
'start': 18058223,
'end': 18058546,
},
'macaca_mulatta': {
'chromosome': 1,
'start': 146238837,
'end': 146238946,
},
'pan_troglodytes': {
'chromosome': 11,
'start': 78369004,
'end': 78369219,
},
'canis_familiaris': {
'chromosome': 19,
'start': 22006909,
'end': 22007119,
},
'gallus_gallus': {
'chromosome': 9,
'start': 15676031,
'end': 15676160,
},
'xenopus_tropicalis': {
'chromosome': 'NC_006839',
'start': 11649,
'end': 11717,
},
'saccharomyces_cerevisiae': {
'chromosome': 'XII',
'start': 856709,
'end': 856919,
},
'schizosaccharomyces_pombe': {
'chromosome': 'I',
'start': 540951,
'end': 544327,
},
'triticum_aestivum': {
'chromosome': '6A',
'start': 100656614,
'end': 100656828,
},
'caenorhabditis_elegans': {
'chromosome': 'III',
'start': 11467363,
'end': 11467705,
},
'drosophila_melanogaster': {
'chromosome': '3R',
'start': 7474331,
'end': 7475217,
},
'bombyx_mori': {
'chromosome': 'scaf16',
'start': 6180018,
'end': 6180422,
},
'anopheles_gambiae': {
'chromosome': '2R',
'start': 34644956,
'end': 34645131,
},
'dictyostelium_discoideum': {
'chromosome': 2,
'start': 7874546,
'end': 7876498,
},
'plasmodium_falciparum': {
'chromosome': 13,
'start': 2796339,
'end': 2798488,
},
'arabidopsis_thaliana': {
'chromosome': 2,
'start': 18819643,
'end': 18822629,
}
}
def update_example_locations():
"""
"""
for assembly in EnsemblAssembly.objects.filter().all():
print(assembly.ensembl_url)
if assembly.ensembl_url in example_locations:
assembly.example_chromosome = example_locations[assembly.ensembl_url]['chromosome']
assembly.example_start = example_locations[assembly.ensembl_url]['start']
assembly.example_end = example_locations[assembly.ensembl_url]['end']
assembly.save()
continue
try:
region = SequenceRegion.objects.filter(assembly_id=assembly.assembly_id).all()[:1].get()
assembly.example_chromosome = region.chromosome
assembly.example_start = region.region_start
assembly.example_end = region.region_stop
print('\t', assembly.assembly_id, region.chromosome, region.region_start, region.region_stop)
assembly.save()
except SequenceRegion.DoesNotExist:
print('No regions found {}'.format(assembly.ensembl_url))
except SequenceRegion.MultipleObjectsReturned:
print('Multiple assemblies found {}'.format(assembly.ensembl_url))
class Command(BaseCommand):
"""
Usage:
python manage.py update_example_locations
"""
def handle(self, *args, **options):
"""Main function, called by django."""
update_example_locations()
| apache-2.0 | -2,314,792,031,606,948,400 | 27.622642 | 105 | 0.5735 | false | 3.416667 | false | false | false |
mikelum/pyspeckit | pyspeckit/spectrum/readers/read_class.py | 1 | 67070 | """
------------------------
GILDAS CLASS file reader
------------------------
Read a CLASS file into an :class:`pyspeckit.spectrum.ObsBlock`
"""
from __future__ import print_function
from astropy.extern.six.moves import xrange
from astropy.extern.six import iteritems
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
import numpy
import numpy as np
from numpy import pi
from astropy import log
# from astropy.time import Time
from astropy import units as u
import pyspeckit
import sys
import re
try:
from astropy.utils.console import ProgressBar
except ImportError:
ProgressBar = lambda x: None
ProgressBar.update = lambda x: None
import struct
import time
# 'range' is needed as a keyword
irange = range
def print_timing(func):
"""
Prints execution time of decorated function.
Included here because CLASS files can take a little while to read;
this should probably be replaced with a progressbar
"""
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
log.info('%s took %0.5g s' % (func.func_name, (t2-t1)))
return res
wrapper.__doc__ = func.__doc__
return wrapper
""" Specification: http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html """
filetype_dict = {'1A ':'Multiple_IEEE','1 ':'Multiple_Vax','1B ':'Multiple_EEEI',
'2A ':'v2','2 ':'v2','2B ':'v2',
'9A ':'Single_IEEE','9 ':'Single_Vax','9B ':'Single_EEEI'}
fileversion_dict = {'1A ':'v1',
'2A ':'v2'}
record_lengths = {'1A': 512,
'2A': 1024*4}
header_id_numbers = {0: 'USER CODE',
-1: 'COMMENT',
-2: 'GENERAL',
-3: 'POSITION',
-4: 'SPECTRO',
-5: 'BASELINE',
-6: 'HISTORY',
# -8: 'SWITCH',
-10: 'DRIFT',
-14: 'CALIBRATION',
}
header_id_lengths = {-2: 9, # may really be 10?
-3: 17,
-4: 17,
-5: None, # variable length
-6: 3, # variable length
-14: 25,
}
# from packages/classic/lib/classic_mod.f90
filedescv2_nw1=14
"""
GENERAL
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
! Written in the entry
real(kind=8) :: ut ! 1-2 [ rad] UT of observation
real(kind=8) :: st ! 3-4 [ rad] LST of observation
real(kind=4) :: az ! 5 [ rad] Azimuth
real(kind=4) :: el ! 6 [ rad] Elevation
real(kind=4) :: tau ! 7 [neper] Opacity
real(kind=4) :: tsys ! 8 [ K] System temperature
real(kind=4) :: time ! 9 [ s] Integration time
! Not in this section in file
integer(kind=4) :: xunit ! [ code] X unit (if X coordinates section is present)
! NOT in data ---
character(len=12) :: cdobs ! [string] Duplicate of dobs
character(len=12) :: cdred ! [string] Duplicate of dred
"""
keys_lengths = {
'unknown': [
( 'NUM' ,1,'int32'), # Observation number
( 'VER' ,1,'int32'), # Version number
( 'TELES' ,3,'|S12') , # Telescope name
( 'DOBS' ,1,'int32'), # Date of observation
( 'DRED' ,1,'int32'), # Date of reduction
( 'TYPEC' ,1,'int32'), # Type of coordinates
( 'KIND' ,1,'int32'), # Type of data
( 'QUAL' ,1,'int32'), # Quality of data
( 'SCAN' ,1,'int32'), # Scan number
( 'SUBSCAN' ,1,'int32'), # Subscan number
],
'COMMENT': [ # -1
('LTEXT',1,'int32'), # integer(kind=4) :: ltext ! Length of comment
('CTEXT',1024/4,'|S1024'), # character ctext*1024 ! Comment string
],
'GENERAL': [ # -2
( 'UT' ,2,'float64'), # rad UT of observation
( 'ST' ,2,'float64'), # rad LST of observation
( 'AZ' ,1,'float32'), # rad Azimuth
( 'EL' ,1,'float32'), # rad Elevation
( 'TAU' ,1,'float32'), # neper Opacity
( 'TSYS' ,1,'float32'), # K System temperature
( 'TIME' ,1,'float32'), # s Integration time
# XUNIT should not be there?
#( 'XUNIT' ,1,'int32'), # code X unit (if xcoord_sec is present)
] ,
'POSITION': [ # -3
('SOURC',3,'|S12') , # [ ] Source name
('EPOCH',1,'float32'), # [ ] Epoch of coordinates
('LAM' ,2,'float64'), #[rad] Lambda
('BET' ,2,'float64'), #[rad] Beta
('LAMOF',1,'float32'), # [rad] Offset in Lambda
('BETOF',1,'float32'), # [rad] Offset in Beta
('PROJ' ,1,'int32') , # [rad] Projection system
('SL0P' ,1,'float64'), # lambda of descriptive system # MAY NOT EXIST IN OLD CLASS
('SB0P' ,1,'float64'), # beta of descriptive system # MAY NOT EXIST IN OLD CLASS
('SK0P' ,1,'float64'), # angle of descriptive system # MAY NOT EXIST IN OLD CLASS
],
'SPECTRO': [ # -4
#('align' ,1,'int32'), # [ ] Alignment padding
('LINE' ,3,'|S12'), # [ ] Line name
('RESTF' ,2,'float64'), # [ MHz] Rest frequency
('NCHAN' ,1,'int32'), # [ ] Number of channels
('RCHAN' ,1,'float32'), # [ ] Reference channels
('FRES' ,1,'float32'), # [ MHz] Frequency resolution
('FOFF' ,1,'float32'), # [ MHz] Frequency offset
('VRES' ,1,'float32'), # [km/s] Velocity resolution
('VOFF' ,1,'float32'), # [km/s] Velocity at reference channel
('BAD' ,1,'float32'), # [ ] Blanking value
#('ALIGN_1',1,'int32'), # [ ] Alignment padding
('IMAGE' ,2,'float64'), # [ MHz] Image frequency
#('ALIGN_2',1,'int32'), # [ ] Alignment padding
('VTYPE' ,1,'int32'), # [code] Type of velocity
('DOPPLER',2,'float64'), # [ ] Doppler factor = -V/c (CLASS convention)
],
'CALIBRATION': [ # -14
('ALIGN',1,'int32'), # BUFFER (it's a zero - it is not declared in the docs!!!!)
('BEEFF',1,'float32'), # [ ] Beam efficiency
('FOEFF',1,'float32'), # [ ] Forward efficiency
('GAINI',1,'float32'), # [ ] Image/Signal gain ratio
('H2OMM',1,'float32'), # [ mm] Water vapor content
('PAMB',1,'float32'), # [ hPa] Ambient pressure
('TAMB',1,'float32'), # [ K] Ambient temperature
('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band
('TCHOP',1,'float32'), # [ K] Chopper temperature
('TCOLD',1,'float32'), # [ K] Cold load temperature
('TAUS',1,'float32'), # [neper] Opacity in signal band
('TAUI',1,'float32'), # [neper] Opacity in image band
('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band
('TREC',1,'float32'), # [ K] Receiver temperature
('CMODE',1,'int32'), # [ code] Calibration mode
('ATFAC',1,'float32'), # [ ] Applied calibration factor
('ALTI',1,'float32'), # [ m] Site elevation
('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold
('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement
('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement
('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS
('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS
],
'BASELINE':[
('DEG',1,'int32'), #! [ ] Degree of last baseline
('SIGFI',1,'float32'), #! [Int. unit] Sigma
('AIRE',1,'float32'), #! [Int. unit] Area under windows
('NWIND',1,'int32'), #! [ ] Number of line windows
# WARNING: These should probably have 'n', the second digit, = NWIND
# The docs are really unclear about this, they say "W1(MWIND)"
('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows
('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows
('SINUS',3,'float32'), #![] Sinus baseline results
],
'DRIFT':[ # 16?
('FREQ',1,'float64') , #! [ MHz] Rest frequency real(kind=8) ::
('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::
('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::
('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::
('TREF',1,'float32') , #! [ ?] Time at reference real(kind=4) ::
('AREF',1,'float32') , #! [ rad] Angular offset at ref. real(kind=4) ::
('APOS',1,'float32') , #! [ rad] Position angle of drift real(kind=4) ::
('TRES',1,'float32') , #! [ ?] Time resolution real(kind=4) ::
('ARES',1,'float32') , #! [ rad] Angular resolution real(kind=4) ::
('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::
('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::
('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::
('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::
('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::
],
}
def _read_bytes(f, n):
'''Read the next `n` bytes (from idlsave)'''
return f.read(n)
"""
Warning: UNCLEAR what endianness should be!
Numpy seemed to get it right, and I think numpy assumes NATIVE endianness
"""
def _read_byte(f):
'''Read a single byte (from idlsave)'''
return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])
def _read_int16(f):
'''Read a signed 16-bit integer (from idlsave)'''
return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer (from idlsave)'''
return numpy.int32(struct.unpack('=i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer '''
return numpy.int64(struct.unpack('=q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float (from idlsave)'''
return numpy.float32(struct.unpack('=f', f.read(4))[0])
def _align_32(f):
'''Align to the next 32-bit position in a file (from idlsave)'''
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _read_word(f,length):
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
else:
chars = None
return chars
def _read_int(f):
return struct.unpack('i',f.read(4))
def is_ascii(s):
try:
s.decode('ascii')
return True
except UnicodeDecodeError:
return False
except UnicodeEncodeError:
return False
def is_all_null(s):
return all(x=='\x00' for x in s)
"""
from clic_file.f90: v1, v2
integer(kind=4) :: bloc ! 1 : observation address [records] integer(kind=8) :: bloc ! 1- 2: observation address [records] integer(kind=4) :: bloc ! 1 : block read from index
integer(kind=4) :: num ! 2 : observation number integer(kind=4) :: word ! 3 : address offset [4-bytes] integer(kind=4) :: num ! 2 : number read
integer(kind=4) :: ver ! 3 : observation version integer(kind=4) :: ver ! 4 : observation version integer(kind=4) :: ver ! 3 : version read from index
integer(kind=4) :: sourc(3) ! 4- 6: source name integer(kind=8) :: num ! 5- 6: observation number character(len=12) :: csour ! 4- 6: source read from index
integer(kind=4) :: line(3) ! 7- 9: line name integer(kind=4) :: sourc(3) ! 7- 9: source name character(len=12) :: cline ! 7- 9: line read from index
integer(kind=4) :: teles(3) ! 10-12: telescope name integer(kind=4) :: line(3) ! 10-12: line name character(len=12) :: ctele ! 10-12: telescope read from index
integer(kind=4) :: dobs ! 13 : observation date [class_date] integer(kind=4) :: teles(3) ! 13-15: telescope name integer(kind=4) :: dobs ! 13 : date obs. read from index
integer(kind=4) :: dred ! 14 : reduction date [class_date] integer(kind=4) :: dobs ! 16 : observation date [class_date] integer(kind=4) :: dred ! 14 : date red. read from index
real(kind=4) :: off1 ! 15 : lambda offset [radian] integer(kind=4) :: dred ! 17 : reduction date [class_date] real(kind=4) :: off1 ! 15 : read offset 1
real(kind=4) :: off2 ! 16 : beta offset [radian] real(kind=4) :: off1 ! 18 : lambda offset [radian] real(kind=4) :: off2 ! 16 : read offset 2
integer(kind=4) :: typec ! 17 : coordinates types real(kind=4) :: off2 ! 19 : beta offset [radian] integer(kind=4) :: type ! 17 : type of read offsets
integer(kind=4) :: kind ! 18 : data kind integer(kind=4) :: typec ! 20 : coordinates types integer(kind=4) :: kind ! 18 : type of observation
integer(kind=4) :: qual ! 19 : data quality integer(kind=4) :: kind ! 21 : data kind integer(kind=4) :: qual ! 19 : Quality read from index
integer(kind=4) :: scan ! 20 : scan number integer(kind=4) :: qual ! 22 : data quality integer(kind=4) :: scan ! 20 : Scan number read from index
integer(kind=4) :: proc ! 21 : procedure type integer(kind=4) :: scan ! 23 : scan number real(kind=4) :: posa ! 21 : Position angle
integer(kind=4) :: itype ! 22 : observation type integer(kind=4) :: proc ! 24 : procedure type integer(kind=4) :: subscan ! 22 : Subscan number
real(kind=4) :: houra ! 23 : hour angle [radian] integer(kind=4) :: itype ! 25 : observation type integer(kind=4) :: pad(10) ! 23-32: Pad to 32 words
integer(kind=4) :: project ! 24 : project name real(kind=4) :: houra ! 26 : hour angle [radian]
integer(kind=4) :: pad1 ! 25 : unused word integer(kind=4) :: project(2) ! 27 : project name
integer(kind=4) :: bpc ! 26 : baseline bandpass cal status integer(kind=4) :: bpc ! 29 : baseline bandpass cal status
integer(kind=4) :: ic ! 27 : instrumental cal status integer(kind=4) :: ic ! 30 : instrumental cal status
integer(kind=4) :: recei ! 28 : receiver number integer(kind=4) :: recei ! 31 : receiver number
real(kind=4) :: ut ! 29 : UT [s] real(kind=4) :: ut ! 32 : UT [s]
integer(kind=4) :: pad2(3) ! 30-32: padding to 32 4-bytes word
equivalently
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
"""
"""
index.f90:
call conv%read%i8(data(1), indl%bloc, 1) ! bloc
call conv%read%i4(data(3), indl%word, 1) ! word
call conv%read%i8(data(4), indl%num, 1) ! num
call conv%read%i4(data(6), indl%ver, 1) ! ver
call conv%read%cc(data(7), indl%csour, 3) ! csour
call conv%read%cc(data(10),indl%cline, 3) ! cline
call conv%read%cc(data(13),indl%ctele, 3) ! ctele
call conv%read%i4(data(16),indl%dobs, 1) ! dobs
call conv%read%i4(data(17),indl%dred, 1) ! dred
call conv%read%r4(data(18),indl%off1, 1) ! off1
call conv%read%r4(data(19),indl%off2, 1) ! off2
call conv%read%i4(data(20),indl%type, 1) ! type
call conv%read%i4(data(21),indl%kind, 1) ! kind
call conv%read%i4(data(22),indl%qual, 1) ! qual
call conv%read%r4(data(23),indl%posa, 1) ! posa
call conv%read%i8(data(24),indl%scan, 1) ! scan
call conv%read%i4(data(26),indl%subscan,1) ! subscan
if (isv3) then
call conv%read%r8(data(27),indl%ut, 1) ! ut
else
"""
def _read_indices(f, file_description):
#if file_description['version'] in (1,2):
# extension_positions = (file_description['aex']-1)*file_description['reclen']*4
# all_indices = {extension:
# [_read_index(f,
# filetype=file_description['version'],
# entry=ii,
# #position=position,
# )
# for ii in range(file_description['lex1'])]
# for extension,position in enumerate(extension_positions)
# if position > 0
# }
#elif file_description['version'] == 1:
extension_positions = ((file_description['aex'].astype('int64')-1)
*file_description['reclen']*4)
all_indices = [_read_index(f,
filetype=file_description['version'],
# 1-indexed files
entry_number=ii+1,
file_description=file_description,
)
for ii in range(file_description['xnext']-1)]
#else:
# raise ValueError("Invalid file version {0}".format(file_description['version']))
return all_indices
def _find_index(entry_number, file_description, return_position=False):
if file_description['gex'] == 10:
kex=(entry_number-1)/file_description['lex1'] + 1
else:
# exponential growth:
#kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1
kex = len([xx for xx in file_description['lexn'] if xx<entry_number])
ken = entry_number - file_description['lexn'][kex-1]
#! Find ken (relative entry number in the extension, starts from 1)
#ken = entry_num - file%desc%lexn(kex-1)
kb = ((ken-1)*file_description['lind'])/file_description['reclen']
#kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the
# ! relative record position (as an offset, starts from 0) where the
# ! Entry Index starts. NB: there can be a non-integer number of Entry
# ! Indexes per record
# Subtract 1: 'aex' is 1-indexed
kbl = (file_description['aex'][kex-1]+kb)-1
# kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes
k = ((ken-1)*file_description['lind']) % file_description['reclen']
#k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the
# ! first word of the Entry Index of the entry number 'entry_num'
if return_position:
return (kbl*file_description['reclen']+k)*4
else:
return kbl,k
def _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,
entry_number=None, file_description=None):
if position is not None:
f.seek(position)
if entry_number is not None:
indpos = _find_index(entry_number, file_description, return_position=True)
f.seek(indpos)
x0 = f.tell()
if filetype in ('1A ','v1', 1):
log.debug('Index filetype 1A')
index = {
"XBLOC":_read_int32(f),
"XNUM":_read_int32(f),
"XVER":_read_int32(f),
"XSOURC":_read_word(f,12),
"XLINE":_read_word(f,12),
"XTEL":_read_word(f,12),
"XDOBS":_read_int32(f),
"XDRED":_read_int32(f),
"XOFF1":_read_float32(f),# first offset (real, radians)
"XOFF2":_read_float32(f),# second offset (real, radians)
"XTYPE":_read_int32(f),# coordinate system ('EQ'', 'GA', 'HO')
"XKIND":_read_int32(f),# Kind of observation (0: spectral, 1: continuum, )
"XQUAL":_read_int32(f),# Quality (0-9)
"XSCAN":_read_int32(f),# Scan number
}
index['BLOC'] = index['XBLOC'] # v2 compatibility
index['WORD'] = 1 # v2 compatibility
index['SOURC'] = index['CSOUR'] = index['XSOURC']
index['DOBS'] = index['CDOBS'] = index['XDOBS']
index['CTELE'] = index['XTEL']
index['LINE'] = index['XLINE']
index['OFF1'] = index['XOFF1']
index['OFF2'] = index['XOFF2']
index['QUAL'] = index['XQUAL']
index['SCAN'] = index['XSCAN']
index['KIND'] = index['XKIND']
if clic: # use header set up in clic
nextchunk = {
"XPROC":_read_int32(f),# "procedure type"
"XITYPE":_read_int32(f),#
"XHOURANG":_read_float32(f),#
"XPROJNAME":_read_int32(f),#
"XPAD1":_read_int32(f),
"XBPC" :_read_int32(f),
"XIC" :_read_int32(f),
"XRECEI" :_read_int32(f),
"XUT":_read_float32(f),
"XPAD2":numpy.fromfile(f,count=3,dtype='int32') # BLANK is NOT ALLOWED!!! It is a special KW
}
else:
nextchunk = {"XPOSA":_read_float32(f),
"XSUBSCAN":_read_int32(f),
'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),
}
nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']
nextchunk['POSA'] = nextchunk['XPOSA']
index.update(nextchunk)
if (f.tell() - x0 != 128):
missed_bits = (f.tell()-x0)
X = f.read(128-missed_bits)
if DEBUG: print("read_index missed %i bits: %s" % (128-missed_bits,X))
#raise IndexError("read_index did not successfully read 128 bytes at %i. Read %i bytes." % (x0,f.tell()-x0))
if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):
raise ValueError("Invalid index read from {0}.".format(x0))
elif filetype in ('2A ','v2', 2):
log.debug('Index filetype 2A')
index = {
"BLOC" : _read_int64(f) , #(data(1), 1) ! bloc
"WORD" : _read_int32(f) , #(data(3), 1) ! word
"NUM" : _read_int64(f) , #(data(4), 1) ! num
"VER" : _read_int32(f) , #(data(6), 1) ! ver
"CSOUR" : _read_word(f,12), #(data(7), 3) ! csour
"CLINE" : _read_word(f,12), #(data(10), 3) ! cline
"CTELE" : _read_word(f,12), #(data(13), 3) ! ctele
"DOBS" : _read_int32(f) , #(data(16), 1) ! dobs
"DRED" : _read_int32(f) , #(data(17), 1) ! dred
"OFF1" : _read_float32(f), #(data(18), 1) ! off1
"OFF2" : _read_float32(f), #(data(19), 1) ! off2
"TYPE" : _read_int32(f) , #(data(20), 1) ! type
"KIND" : _read_int32(f) , #(data(21), 1) ! kind
"QUAL" : _read_int32(f) , #(data(22), 1) ! qual
"POSA" : _read_float32(f), #(data(23), 1) ! posa
"SCAN" : _read_int64(f) , #(data(24), 1) ! scan
"SUBSCAN": _read_int32(f) , #(data(26), 1) ! subscan
}
#last24bits = f.read(24)
#log.debug("Read 24 bits: '{0}'".format(last24bits))
if any((is_all_null(index[x]) or not is_ascii(index[x]))
for x in ('CSOUR','CLINE','CTELE')):
raise ValueError("Invalid index read from {0}.".format(x0))
index['SOURC'] = index['XSOURC'] = index['CSOUR']
index['LINE'] = index['XLINE'] = index['CLINE']
index['XKIND'] = index['KIND']
try:
index['DOBS'] = index['XDOBS'] = index['CDOBS']
except KeyError:
index['CDOBS'] = index['XDOBS'] = index['DOBS']
else:
raise NotImplementedError("Filetype {0} not implemented.".format(filetype))
# from kernel/lib/gsys/date.f90: gag_julda
class_dobs = index['DOBS']
index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)
# SLOW
#index['DATEOBS'] = Time(index['DOBS'], format='jyear')
#index['DATEOBSS'] = index['DATEOBS'].iso
log.debug("Indexing finished at {0}".format(f.tell()))
return index
def _read_header(f, type=0, position=None):
"""
Read a header entry from a CLASS file
(helper function)
"""
if position is not None:
f.seek(position)
if type in keys_lengths:
hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])
for x in keys_lengths[type]]
return dict(hdrsec)
else:
return {}
raise ValueError("Unrecognized type {0}".format(type))
def _read_first_record(f):
f.seek(0)
filetype = f.read(4)
if fileversion_dict[filetype] == 'v1':
return _read_first_record_v1(f)
else:
return _read_first_record_v2(f)
def _read_first_record_v1(f, record_length_words=128):
r"""
Position & Parameter & Fortran Kind & Purpose \\
\hline
1 & {\tt code} & Character*4 & File code \\
2 & {\tt next} & Integer*4 & Next free record \\
3 & {\tt lex} & Integer*4 & Length of first extension (number of entries) \\
4 & {\tt nex} & Integer*4 & Number of extensions \\
5 & {\tt xnext} & Integer*4 & Next available entry number \\
6:2*{\tt reclen} & {\tt ex(:)} & Integer*4 & Array of extension addresses
from classic_mod.f90:
integer(kind=4) :: code ! 1 File code
integer(kind=4) :: next ! 2 Next free record
integer(kind=4) :: lex ! 3 Extension length (number of entries)
integer(kind=4) :: nex ! 4 Number of extensions
integer(kind=4) :: xnext ! 5 Next available entry number
integer(kind=4) :: aex(mex_v1) ! 6:256 Extension addresses
from old (<dec2013) class, file.f90:
read(ilun,rec=1,err=11,iostat=ier) ibx%code,ibx%next, &
& ibx%ilex,ibx%imex,ibx%xnext
also uses filedesc_v1tov2 from classic/lib/file.f90
"""
# OLD NOTES
# hdr = header
# hdr.update(obshead) # re-overwrite things
# hdr.update({'OBSNUM':obsnum,'RECNUM':spcount})
# hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
# hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
# hdr.update({'OBJECT':hdr['SOURC'].strip()})
# hdr.update({'BUNIT':'Tastar'})
# hdr.update({'EXPOSURE':hdr['TIME']})
f.seek(0)
file_description = {
'code': f.read(4),
'next': _read_int32(f),
'lex': _read_int32(f),
'nex': _read_int32(f),
'xnext': _read_int32(f),
'gex': 10.,
'vind': 1, # classic_vind_v1 packages/classic/lib/classic_mod.f90
'version': 1,
'nextrec': 3,
'nextword': 1,
'lind': 32, #classic_lind_v1 packages/classic/lib/classic_mod.f90
'kind': 'unknown',
'flags': 0,
}
file_description['reclen'] = record_length_words # should be 128w = 512 bytes
ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')
file_description['ex'] = ex[ex!=0]
file_description['nextrec'] = file_description['next'] # this can't be...
file_description['lex1'] = file_description['lex'] # number of entries
file_description['lexn'] = (np.arange(file_description['nex']+1) *
file_description['lex1'])
file_description['nentries'] = np.sum(file_description['lexn'])
file_description['aex'] = file_description['ex'][:file_description['nex']]
#file_description['version'] = fileversion_dict[file_description['code']]
assert f.tell() == 1024
# Something is not quite right with the 'ex' parsing
#assert len(file_description['ex']) == file_description['nex']
return file_description
def _read_first_record_v2(f):
r""" packages/classic/lib/file.f90
Position & Parameter & Fortran Kind & Purpose & Unit \\
\hline
1 & {\tt code} & Character*4 & File code & - \\
2 & {\tt reclen} & Integer*4 & Record length & words \\
3 & {\tt kind} & Integer*4 & File kind & - \\
4 & {\tt vind} & Integer*4 & Index version & - \\
5 & {\tt lind} & Integer*4 & Index length & words \\
6 & {\tt flags} & Integer*4 & Bit flags. \#1: single or multiple, & - \\
& & & \#2-32: provision (0-filled) & \\
\hline
7:8 & {\tt xnext} & Integer*8 & Next available entry number & - \\
9:10 & {\tt nextrec} & Integer*8 & Next record which contains free space & record \\
11 & {\tt nextword} & Integer*4 & Next free word in this record & word \\
\hline
12 & {\tt lex1} & Integer*4 & Length of first extension index & entries \\
13 & {\tt nex} & Integer*4 & Number of extensions & - \\
14 & {\tt gex} & Integer*4 & Extension growth rule & - \\
15:{\tt reclen} & {\tt aex(:)} & Integer*8 & Array of extension addresses & record
"""
f.seek(0)
file_description = {
'code': f.read(4),
'reclen': _read_int32(f),
'kind': _read_int32(f),
'vind': _read_int32(f),
'lind': _read_int32(f),
'flags': _read_int32(f),
'xnext': _read_int64(f),
'nextrec': _read_int64(f),
'nextword': _read_int32(f),
'lex1': _read_int32(f),
'nex': _read_int32(f),
'gex': _read_int32(f),
}
file_description['lexn'] = [0]
if file_description['gex'] == 10:
for ii in range(1, file_description['nex']+1):
file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])
else:
#! Exponential growth. Only growth with mantissa 2.0 is supported
for ii in range(1, file_description['nex']):
# I don't know what the fortran does here!!!
# ahh, maybe 2_8 means int(2, dtype='int64')
nent = int(file_description['lex1'] * 2**(ii-1))
#nent = int(file%desc%lex1,kind=8) * 2_8**(iex-1)
file_description['lexn'].append(file_description['lexn'][-1]+nent)
#file%desc%lexn(iex) = file%desc%lexn(iex-1) + nent
file_description['nentries'] = np.sum(file_description['lexn'])
record_length_words = file_description['reclen']
aex = numpy.fromfile(f, count=(record_length_words-15)/2, dtype='int64')
file_description['aex'] = aex[aex!=0]
assert len(file_description['aex']) == file_description['nex']
file_description['version'] = 2
return file_description
def gi8_dicho(ninp,lexn,xval,ceil=True):
"""
! @ public
! Find ival such as
! X(ival-1) < xval <= X(ival) (ceiling mode)
! or
! X(ival) <= xval < X(ival+1) (floor mode)
! for input data ordered. Use a dichotomic search for that.
call gi8_dicho(nex,file%desc%lexn,entry_num,.true.,kex,error)
"""
#integer(kind=size_length), intent(in) :: np ! Number of input points
#integer(kind=8), intent(in) :: x(np) ! Input ordered Values
#integer(kind=8), intent(in) :: xval ! The value we search for
#logical, intent(in) :: ceil ! Ceiling or floor mode?
#integer(kind=size_length), intent(out) :: ival ! Position in the array
#logical, intent(inout) :: error ! Logical error flag
iinf = 1
isup = ninp
#! Ceiling mode
while isup > (iinf+1):
imid = int(np.floor((isup + iinf)/2.))
if (lexn[imid-1] < xval):
iinf = imid
else:
isup = imid
ival = isup
return ival
def _read_obshead(f, file_description, position=None):
if file_description['version'] == 1:
return _read_obshead_v1(f, position=position)
if file_description['version'] == 2:
return _read_obshead_v2(f, position=position)
else:
raise ValueError("Invalid file version {0}.".
format(file_description['version']))
def _read_obshead_v2(f, position=None):
"""
! Version 2 (public)
integer(kind=4), parameter :: entrydescv2_nw1=11 ! Number of words, in 1st part
integer(kind=4), parameter :: entrydescv2_nw2=5 ! Number of words for 1 section in 2nd part
type classic_entrydesc_t
sequence
integer(kind=4) :: code ! 1 : code observation icode
integer(kind=4) :: version ! 2 : observation version
integer(kind=4) :: nsec ! 3 : number of sections
integer(kind=4) :: pad1 ! - : memory padding (not in data)
integer(kind=8) :: nword ! 4- 5: number of words
integer(kind=8) :: adata ! 6- 7: data address
integer(kind=8) :: ldata ! 8- 9: data length
integer(kind=8) :: xnum ! 10-11: entry number
! Out of the 'sequence' block:
integer(kind=4) :: msec ! Not in data: maximum number of sections the
! Observation Index can hold
integer(kind=4) :: pad2 ! Memory padding for 8 bytes alignment
integer(kind=4) :: seciden(classic_maxsec) ! Section Numbers (on disk: 1 to ed%nsec)
integer(kind=8) :: secleng(classic_maxsec) ! Section Lengths (on disk: 1 to ed%nsec)
integer(kind=8) :: secaddr(classic_maxsec) ! Section Addresses (on disk: 1 to ed%nsec)
end type classic_entrydesc_t
"""
if position is not None:
f.seek(position)
else:
position = f.tell()
IDcode = f.read(4)
if IDcode.strip() != '2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(position))
f.seek(position)
entrydescv2_nw1=11
entrydescv2_nw2=5
obshead = {
'CODE': f.read(4),
'VERSION': _read_int32(f),
'NSEC': _read_int32(f),
#'_blank': _read_int32(f),
'NWORD': _read_int64(f),
'ADATA': _read_int64(f),
'LDATA': _read_int64(f),
'XNUM': _read_int64(f),
#'MSEC': _read_int32(f),
#'_blank2': _read_int32(f),
}
section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')
section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))
def _read_obshead_v1(f, position=None, verbose=False):
"""
Read the observation header of a CLASS file
(helper function for read_class; should not be used independently)
"""
if position is not None:
f.seek(position)
IDcode = f.read(4)
if IDcode.strip() != '2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(f.tell() - 4))
(nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,
obsnum) = numpy.fromfile(f, count=8, dtype='int32')
if verbose:
print("nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)
print("DATA_LENGTH: ",data_length)
seccodes = numpy.fromfile(f,count=nsec,dtype='int32')
# Documentation says addresses then length: It is apparently wrong
seclen = numpy.fromfile(f,count=nsec,dtype='int32')
secaddr = numpy.fromfile(f,count=nsec,dtype='int32')
if verbose: print("Section codes, addresses, lengths: ",seccodes,secaddr,seclen)
hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,
'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,
'NSEC':nsec, 'OBSNUM':obsnum}
#return obsnum,seccodes
return obsnum,hdr,dict(zip(seccodes,secaddr))
# THIS IS IN READ_OBSHEAD!!!
# def _read_preheader(f):
# """
# Not entirely clear what this is, but it is stuff that precedes the actual data
#
# Looks something like this:
# array([ 1, -2, -3, -4, -14,
# 9, 17, 18, 25, 55,
# 64, 81, 99, -1179344801, 979657591,
#
# -2, -3, -4, -14 indicate the 4 header types
# 9,17,18,25 *MAY* indicate the number of bytes in each
#
#
# HOW is it indicated how many entries there are?
# """
# # 13 comes from counting 1, -2,....99 above
# numbers = np.fromfile(f, count=13, dtype='int32')
# sections = [n for n in numbers if n in header_id_numbers]
# return sections
def downsample_1d(myarr,factor,estimator=np.mean, weight=None):
"""
Downsample a 1D array by averaging over *factor* pixels.
Crops right side if the shape is not a multiple of factor.
This code is pure numpy and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
weight: np.ndarray
An array of weights to use for the downsampling. If None,
assumes uniform 1
"""
if myarr.ndim != 1:
raise ValueError("Only works on 1d data. Says so in the title.")
xs = myarr.size
crarr = myarr[:xs-(xs % int(factor))]
if weight is None:
dsarr = estimator(np.concatenate([[crarr[i::factor] for i in
range(factor)]]),axis=0)
else:
dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in
range(factor)]]),axis=0)
warr = estimator(np.concatenate([[weight[i::factor] for i in
range(factor)]]),axis=0)
dsarr = dsarr/warr
return dsarr
# unit test
def test_downsample1d():
data = np.arange(10)
weight = np.ones(10)
weight[5]=0
assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==
np.array([ 0.5, 2.5, 4. , 6.5, 8.5]))
def read_observation(f, obsid, file_description=None, indices=None,
my_memmap=None, memmap=True):
if isinstance(f, str):
f = open(f,'rb')
opened = True
if memmap:
my_memmap = numpy.memmap(filename, offset=0, dtype='float32',
mode='r')
else:
my_memmap = None
elif my_memmap is None and memmap:
raise ValueError("Must pass in a memmap object if passing in a file object.")
else:
opened = False
if file_description is None:
file_description = _read_first_record(f)
if indices is None:
indices = _read_indices(f, file_description)
index = indices[obsid]
obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4
obsnum,obshead,sections = _read_obshead(f, file_description,
position=obs_position)
header = obshead
datastart = 0
for section_id,section_address in iteritems(sections):
# Section addresses are 1-indexed byte addresses
# in the current "block"
sec_position = obs_position + (section_address-1)*4
temp_hdr = _read_header(f, type=header_id_numbers[section_id],
position=sec_position)
header.update(temp_hdr)
datastart = max(datastart,f.tell())
hdr = header
hdr.update(obshead) # re-overwrite things
hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})
hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
hdr.update({'OBJECT':hdr['SOURC'].strip()})
hdr.update({'BUNIT':'Tastar'})
hdr.update({'EXPOSURE':float(hdr['TIME'])})
hdr['HDRSTART'] = obs_position
hdr['DATASTART'] = datastart
hdr.update(indices[obsid])
# Apparently the data are still valid in this case?
#if hdr['XNUM'] != obsid+1:
# log.error("The spectrum read was {0} but {1} was requested.".
# format(hdr['XNUM']-1, obsid))
if hdr['KIND'] == 1: # continuum
nchan = hdr['NPOIN']
elif 'NCHAN' in hdr:
nchan = hdr['NCHAN']
else:
log.error("No NCHAN in header. This is not a spectrum.")
import ipdb; ipdb.set_trace()
# There may be a 1-channel offset? CHECK!!!
# (changed by 1 pixel - October 14, 2014)
# (changed back - October 21, 2014 - I think the ends are just bad, but not
# zero.)
f.seek(datastart-1)
spec = _read_spectrum(f, position=datastart-1, nchan=nchan,
memmap=memmap, my_memmap=my_memmap)
if opened:
f.close()
return spec, hdr
def _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):
if position != f.tell():
log.warn("Reading data from {0}, but the file is wound "
"to {1}.".format(position, f.tell()))
if memmap:
here = position
#spectrum = numpy.memmap(filename, offset=here, dtype='float32',
# mode='r', shape=(nchan,))
spectrum = my_memmap[here/4:here/4+nchan]
f.seek(here+nchan*4)
else:
f.seek(position)
spectrum = numpy.fromfile(f,count=nchan,dtype='float32')
return spectrum
def _spectrum_from_header(fileobj, header, memmap=None):
return _read_spectrum(fileobj, position=header['DATASTART'],
nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],
my_memmap=memmap)
def clean_header(header):
newheader = {}
for k in header:
if not isinstance(header[k], (int, float, str)):
if isinstance(header[k], np.ndarray) and header[k].size > 1:
if header[k].size > 10:
raise ValueError("Large array being put in header. That's no good. key={0}".format(k))
for ii,val in enumerate(header[k]):
newheader[k[:7]+str(ii)] = val
else:
newheader[k[:8]] = str(header[k])
else:
newheader[k[:8]] = header[k]
return newheader
class ClassObject(object):
def __init__(self, filename, verbose=False):
t0 = time.time()
self._file = open(filename, 'rb')
self.file_description = _read_first_record(self._file)
self.allind = _read_indices(self._file, self.file_description)
self._data = np.memmap(self._file, dtype='float32', mode='r')
if verbose: log.info("Setting _spectra")
self._spectra = LazyItem(self)
t1 = time.time()
if verbose: log.info("Setting posang. t={0}".format(t1-t0))
self.set_posang()
t2 = time.time()
if verbose: log.info("Identifying otf scans. t={0}".format(t2-t1))
self._identify_otf_scans(verbose=verbose)
t3 = time.time()
#self._load_all_spectra()
if verbose:
log.info("Loaded CLASS object with {3} indices. Time breakdown:"
" {0}s for indices, "
"{1}s for posang, and {2}s for OTF scan identification"
.format(t1-t0, t2-t1, t3-t2, len(self.allind)))
def __repr__(self):
s = "\n".join(["{k}: {v}".format(k=k,v=v)
for k,v in iteritems(self.getinfo())])
return "ClassObject({id}) with {nspec} entries\n".format(id=id(self),
nspec=len(self.allind)) + s
def getinfo(self, allsources=False):
info = dict(
tels = self.tels,
lines = self.lines,
scans = self.scans,
sources = self.sources if allsources else self.sci_sources,
)
return info
def set_posang(self):
h0 = self.headers[0]
for h in self.headers:
dx = h['OFF1'] - h0['OFF1']
dy = h['OFF2'] - h0['OFF2']
h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi
h0 = h
def _identify_otf_scans(self, verbose=False):
h0 = self.allind[0]
st = 0
otfscan = 0
posangs = [h['COMPPOSA'] for h in self.allind]
if verbose:
pb = ProgressBar(len(self.allind))
for ii,h in enumerate(self.allind):
if (h['SCAN'] != h0['SCAN']
or h['SOURC'] != h0['SOURC']):
h0['FIRSTSCAN'] = st
cpa = np.median(posangs[st:ii])
for hh in self.allind[st:ii]:
hh['SCANPOSA'] = cpa % 180
st = ii
if h['SCAN'] == h0['SCAN']:
h0['OTFSCAN'] = otfscan
otfscan += 1
h['OTFSCAN'] = otfscan
else:
otfscan = 0
h['OTFSCAN'] = otfscan
else:
h['OTFSCAN'] = otfscan
if verbose:
pb.update(ii)
def listscans(self, source=None, telescope=None, out=sys.stdout):
minid=0
scan = -1
sourc = ""
#tel = ''
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
print("{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} "
"[ {RAmin:>12s}, {RAmax:>12s} ] "
"[ {DECmin:>12s}, {DECmax:>12s} ] "
"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}"
.format(entries='Scans', SOURC='Source', XTEL='Telescope',
SCAN='Scan', SUBSCAN='Subscan',
RAmin='min(RA)', RAmax='max(RA)',
DECmin='min(DEC)', DECmax='max(DEC)',
SCANPOSA='Scan PA',
angle='Angle', OTFSCAN='OTFscan',
TSYS='TSYS', UTD='UTD'),
file=out)
data_rows = []
for ii,row in enumerate(self.headers):
if (row['SCAN'] == scan
and row['SOURC'] == sourc
#and row['XTEL'] == tel
):
minoff1 = min(minoff1, row['OFF1'])
maxoff1 = max(maxoff1, row['OFF1'])
minoff2 = min(minoff2, row['OFF2'])
maxoff2 = max(maxoff2, row['OFF2'])
ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],
row['OFF1'] - prevrow['OFF1'])%np.pi
nangle += 1
prevrow = row
else:
if scan == -1:
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
prevrow = row
continue
ok = True
if source is not None:
if isinstance(source, (list,tuple)):
ok = ok and any(re.search((s), prevrow['SOURC'])
for s in source)
else:
ok = ok and re.search((source), prevrow['SOURC'])
if telescope is not None:
ok = ok and re.search((telescope), prevrow['XTEL'])
if ok:
data = dict(RAmin=minoff1*180/np.pi*3600,
RAmax=maxoff1*180/np.pi*3600,
DECmin=minoff2*180/np.pi*3600,
DECmax=maxoff2*180/np.pi*3600,
angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,
e0=minid,
e1=ii-1,
#TSYS=row['TSYS'] if 'TSYS' in row else '--',
UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,
**prevrow)
print("{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} "
"[ {RAmin:12f}, {RAmax:12f} ] "
"[ {DECmin:12f}, {DECmax:12f} ] "
"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}"
" {TSYS:>8.1f} {UTD:12f}".
format(**data),
file=out)
data_rows.append(data)
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
minid = ii
return data
@property
def tels(self):
if hasattr(self,'_tels'):
return self._tels
else:
self._tels = set([h['XTEL'] for h in self.allind])
return self._tels
@property
def sources(self):
if hasattr(self,'_source'):
return self._source
else:
self._source = set([h['SOURC'] for h in self.allind])
return self._source
@property
def scans(self):
if hasattr(self,'_scan'):
return self._scan
else:
self._scan = set([h['SCAN'] for h in self.allind])
return self._scan
@property
def sci_sources(self):
return set([s for s in self.sources
if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',
'COLD')])
@property
def lines(self):
if hasattr(self,'_lines'):
return self._lines
else:
self._lines = set([h['LINE'] for h in self.allind])
return self._lines
def _load_all_spectra(self, indices=None):
if indices is None:
indices = range(self.file_description['xnext']-1)
if hasattr(self, '_loaded_indices'):
indices_set = set(indices)
indices_to_load = (indices_set.difference(self._loaded_indices))
self._loaded_indices = self._loaded_indices.union(indices_set)
if any(indices_to_load):
pb = ProgressBar(len(indices_to_load))
for ii,k in enumerate(xrange(indices_to_load)):
self._spectra[k]
pb.update(ii)
else:
self._loaded_indices = set(indices)
self._spectra.load_all()
@property
def spectra(self):
return [x[0] for x in self._spectra]
@property
def headers(self):
return [self._spectra[ii][1]
if ii in self._spectra else x
for ii,x in enumerate(self.allind)]
def select_spectra(self,
all=None,
line=None,
linere=None,
linereflags=re.IGNORECASE,
number=None,
scan=None,
offset=None,
source=None,
sourcere=None,
sourcereflags=re.IGNORECASE,
range=None,
quality=None,
telescope=None,
telescopere=None,
telescopereflags=re.IGNORECASE,
subscan=None,
entry=None,
posang=None,
#observed=None,
#reduced=None,
frequency=None,
section=None,
user=None,
include_old_versions=False,
):
"""
Parameters
----------
include_old_versions: bool
Include spectra with XVER numbers <0? These are CLASS spectra that
have been "overwritten" (re-reduced?)
"""
if entry is not None and len(entry)==2:
return irange(entry[0], entry[1])
if frequency is not None:
self._load_all_spectra()
sel = [(re.search(re.escape(line), h['LINE'], re.IGNORECASE)
if line is not None else True) and
(re.search(linere, h['LINE'], linereflags)
if linere is not None else True) and
(h['SCAN'] == scan if scan is not None else True) and
((h['OFF1'] == offset or
h['OFF2'] == offset) if offset is not None else True) and
(re.search(re.escape(source), h['CSOUR'], re.IGNORECASE)
if source is not None else True) and
(re.search(sourcere, h['CSOUR'], sourcereflags)
if sourcere is not None else True) and
(h['OFF1']>range[0] and h['OFF1'] < range[1] and
h['OFF2']>range[2] and h['OFF2'] < range[3]
if range is not None and len(range)==4 else True) and
(h['QUAL'] == quality if quality is not None else True) and
(re.search(re.escape(telescope), h['CTELE'], re.IGNORECASE)
if telescope is not None else True) and
(re.search(telescopere, h['CTELE'], telescopereflags)
if telescopere is not None else True) and
(h['SUBSCAN']==subscan if subscan is not None else True) and
(h['NUM'] >= number[0] and h['NUM'] < number[1]
if number is not None else True) and
('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way
h['RESTF'] > frequency[0] and
h['RESTF'] < frequency[1]
if frequency is not None and len(frequency)==2
else True) and
(h['COMPPOSA']%180 > posang[0] and
h['COMPPOSA']%180 < posang[1]
if posang is not None and len(posang)==2
else True) and
(h['XVER'] > 0 if not include_old_versions else True)
for h in self.headers
]
return [ii for ii,k in enumerate(sel) if k]
def get_spectra(self, progressbar=True, **kwargs):
selected_indices = self.select_spectra(**kwargs)
if not any(selected_indices):
raise ValueError("Selection yielded empty.")
self._spectra.load(selected_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in selected_indices]
def get_pyspeckit_spectra(self, progressbar=True, **kwargs):
spdata = self.get_spectra(progressbar=progressbar, **kwargs)
spectra = [pyspeckit.Spectrum(data=data,
xarr=make_axis(header),
header=clean_header(header))
for data,header in spdata]
return spectra
def read_observations(self, observation_indices, progressbar=True):
self._spectra.load(observation_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in observation_indices]
@print_timing
def read_class(filename, downsample_factor=None, sourcename=None,
telescope=None, posang=None, verbose=False,
flag_array=None):
"""
Read a binary class file.
Based on the
`GILDAS CLASS file type Specification
<http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html>`_
Parameters
----------
filename: str
downsample_factor: None or int
Factor by which to downsample data by averaging. Useful for
overresolved data.
sourcename: str or list of str
Source names to match to the data (uses regex)
telescope: str or list of str
'XTEL' or 'TELE' parameters: the telescope & instrument
flag_array: np.ndarray
An array with the same shape as the data used to flag out
(remove) data when downsampling. True = flag out
"""
classobj = ClassObject(filename)
if not isinstance(sourcename, (list,tuple)):
sourcename = [sourcename]
if not isinstance(telescope, (list,tuple)):
telescope = [telescope]
spectra,headers = [],[]
if verbose:
log.info("Reading...")
selection = [ii
for source in sourcename
for tel in telescope
for ii in classobj.select_spectra(sourcere=source,
telescope=tel,
posang=posang)]
sphdr = classobj.read_observations(selection)
if len(sphdr) == 0:
return None
spec,hdr = zip(*sphdr)
spectra += spec
headers += hdr
indexes = headers
weight = ~flag_array if flag_array is not None else None
if downsample_factor is not None:
if verbose:
log.info("Downsampling...")
spectra = [downsample_1d(spec, downsample_factor,
weight=weight)
for spec in ProgressBar(spectra)]
headers = [downsample_header(h, downsample_factor)
for h in ProgressBar(headers)]
return spectra,headers,indexes
def downsample_header(hdr, downsample_factor):
for k in ('NCHAN','NPOIN','DATALEN'):
if k in hdr:
hdr[k] = hdr[k] / downsample_factor
# maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1
scalefactor = 1./downsample_factor
hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.
for kw in ['FRES','VRES']:
if kw in hdr:
hdr[kw] *= downsample_factor
return hdr
def make_axis(header,imagfreq=False):
"""
Create a :class:`pyspeckit.spectrum.units.SpectroscopicAxis` from the CLASS "header"
"""
from .. import units
rest_frequency = header.get('RESTF')
xunits = 'MHz'
nchan = header.get('NCHAN')
voff = header.get('VOFF')
foff = header.get('FOFF')
doppler = header.get('DOPPLER')
fres = header.get('FRES')
refchan = header.get('RCHAN')
imfreq = header.get('IMAGE')
if foff in (None, 0.0) and voff not in (None, 0.0):
# Radio convention
foff = -voff/2.997924580e5 * rest_frequency
if not imagfreq:
xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)
else:
xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)
return XAxis
@print_timing
def class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,
imagfreq=False, DEBUG=False, **kwargs):
"""
Load an entire CLASS observing session into a list of ObsBlocks based on
matches to the 'telescope', 'line' and 'source' names
Parameters
----------
filename : string
The Gildas CLASS data file to read the spectra from.
telescope : list
List of telescope names to be matched.
line : list
List of line names to be matched.
source : list (optional)
List of source names to be matched. Defaults to None.
imagfreq : bool
Create a SpectroscopicAxis with the image frequency.
"""
if datatuple is None:
spectra,header,indexes = read_class(filename,DEBUG=DEBUG, **kwargs)
else:
spectra,header,indexes = datatuple
obslist = []
lastscannum = -1
spectrumlist = None
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
# this is slow but necessary...
H = pyfits.Header()
for k,v in iteritems(hdr):
if hasattr(v,"__len__") and not isinstance(v,str):
if len(v) > 1:
for ii,vv in enumerate(v):
H.update(k[:7]+str(ii),vv)
else:
H.update(k,v[0])
elif pyfits.Card._comment_FSC_RE.match(str(v)) is not None:
H.update(k,v)
scannum = hdr['SCAN']
if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:
continue
if hdr['LINE'].strip() not in line:
continue
if (source is not None) and (hdr['SOURC'].strip() not in source):
continue
hdr.update({'RESTFREQ':hdr.get('RESTF')})
H.update('RESTFREQ',hdr.get('RESTF'))
#print "Did not skip %s,%s. Scannum, last: %i,%i" % (hdr['XTEL'],hdr['LINE'],scannum,lastscannum)
if scannum != lastscannum:
lastscannum = scannum
if spectrumlist is not None:
obslist.append(pyspeckit.ObsBlock(spectrumlist))
xarr = make_axis(hdr,imagfreq=imagfreq)
spectrumlist = [(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))]
else:
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))
return obslist
class LazyItem(object):
"""
Simple lazy spectrum-retriever wrapper
"""
def __init__(self, parent):
self.parent = parent
self.sphdr = {}
self.nind = len(self.parent.allind)
self.nloaded = 0
def __repr__(self):
return ("Set of {0} spectra & headers, {1} loaded"
" ({2:0.2f}%)".format(self.nind, self.nloaded,
(float(self.nloaded)/self.nind)*100))
def load_all(self, progressbar=True):
self.load(range(self.nind))
def load(self, indices, progressbar=True):
pb = ProgressBar(len(indices))
counter = 0
for k in indices:
self[k]
counter += 1
pb.update(counter)
def __getitem__(self, key):
if key in self.sphdr:
return self.sphdr[key]
elif isinstance(key, slice):
return [self[k] for k in xrange(key.start or 0,
key.end or len(self.parent.allind),
key.step or 1)]
else:
sphd = read_observation(self.parent._file, key,
file_description=self.parent.file_description,
indices=self.parent.allind,
my_memmap=self.parent._data)
# Update the header with OTFSCAN and POSANG info
sphd[1].update(self.parent.allind[key])
self.sphdr[key] = sphd
self.nloaded += 1
return sphd
def __iter__(self):
return self.next()
def __next__(self):
for k in self.spheader:
yield self.spheader[k]
def __contains__(self, key):
return key in self.sphdr
@print_timing
def class_to_spectra(filename, datatuple=None, **kwargs):
"""
Load each individual spectrum within a CLASS file into a list of Spectrum
objects
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
spectrumlist = []
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
xarr = make_axis(hdr)
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=hdr,
data=sp))
return pyspeckit.Spectra(spectrumlist)
def tests():
"""
Tests are specific to the machine on which this code was developed.
"""
fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'
#fn1 = '/Users/adam/work/bolocam/hht/class_001.smt'
#fn1 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-20824-073.cls'
#fn2 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-79472+203.cls'
#F1 = read_class(fn1)#,DEBUG=True)
#F2 = read_class(fn2)
n2hp = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])
hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)'])
| mit | -4,625,620,523,675,145,000 | 41.584127 | 220 | 0.516267 | false | 3.424385 | false | false | false |
xbuf/blender_io_xbuf | protocol.py | 1 | 4603 | # This file is part of blender_io_xbuf. blender_io_xbuf is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright David Bernard
# <pep8 compliant>
import struct
import asyncio
import atexit
import xbuf
import xbuf.datas_pb2
import xbuf.cmds_pb2
from . import xbuf_export # pylint: disable=W0406
# TODO better management off the event loop (eg on unregister)
loop = asyncio.get_event_loop()
atexit.register(loop.close)
class Kind:
pingpong = 0x01
logs = 0x02
ask_screenshot = 0x03
raw_screenshot = 0x04
msgpack = 0x05
xbuf_cmd = 0x06
class Client:
def __init__(self):
self.writer = None
self.reader = None
self.host = None
self.port = None
def __del__(self):
self.close()
def close(self):
if self.writer is not None:
print('Close the socket/writer')
self.writer.write_eof()
self.writer.close()
self.writer = None
self.reader = None
@asyncio.coroutine
def connect(self, host, port):
if (host != self.host) or (port != self.port):
self.close()
if self.writer is None:
self.host = host
self.port = port
(self.reader, self.writer) = yield from asyncio.open_connection(host, port, loop=loop)
return self
@asyncio.coroutine
def readHeader(reader):
"""return (size, kind)"""
header = yield from reader.readexactly(5)
return struct.unpack('>iB', header)
@asyncio.coroutine
def readMessage(reader):
"""return (kind, raw_message)"""
(size, kind) = yield from readHeader(reader)
# kind = header[4]
raw = yield from reader.readexactly(size)
return (kind, raw)
def writeMessage(writer, kind, body):
writer.write((len(body)).to_bytes(4, byteorder='big'))
writer.write((kind).to_bytes(1, byteorder='big'))
writer.write(body)
def askScreenshot(writer, width, height):
b = bytearray()
b.extend((width).to_bytes(4, byteorder='big'))
b.extend((height).to_bytes(4, byteorder='big'))
writeMessage(writer, Kind.ask_screenshot, b)
def setEye(writer, location, rotation, projection_matrix, near, far, is_ortho):
# sendCmd(writer, 'updateCamera', (_encode_vec3(location), _encode_quat(rotation), _encode_mat4(projection_matrix)))
cmd = xbuf.cmds_pb2.Cmd()
# cmd.setCamera = xbuf.cmds_pb2.SetCamera()
xbuf_export.cnv_translation(location, cmd.setEye.location)
xbuf_export.cnv_quatZupToYup(rotation, cmd.setEye.rotation)
xbuf_export.cnv_mat4(projection_matrix, cmd.setEye.projection)
cmd.setEye.near = near
cmd.setEye.far = far
cmd.setEye.projMode = xbuf.cmds_pb2.SetEye.orthographic if is_ortho else xbuf.cmds_pb2.SetEye.perspective
writeMessage(writer, Kind.xbuf_cmd, cmd.SerializeToString())
def setData(writer, scene, cfg):
cmd = xbuf.cmds_pb2.Cmd()
xbuf_export.export(scene, cmd.setData, cfg)
send = (len(cmd.setData.relations) > 0 or
len(cmd.setData.tobjects) > 0 or
len(cmd.setData.geometries) > 0 or
len(cmd.setData.materials) > 0 or
len(cmd.setData.lights) > 0
)
if send:
# print("send setData")
writeMessage(writer, Kind.xbuf_cmd, cmd.SerializeToString())
def changeAssetFolders(writer, cfg):
cmd = xbuf.cmds_pb2.Cmd()
cmd.changeAssetFolders.path.append(cfg.assets_path)
cmd.changeAssetFolders.register = True
cmd.changeAssetFolders.unregisterOther = True
writeMessage(writer, Kind.xbuf_cmd, cmd.SerializeToString())
def playAnimation(writer, ref, anims):
cmd = xbuf.cmds_pb2.Cmd()
cmd.playAnimation.ref = ref
cmd.playAnimation.animationsNames.extend(anims)
writeMessage(writer, Kind.xbuf_cmd, cmd.SerializeToString())
def run_until_complete(f, *args, **kwargs):
if asyncio.iscoroutine(f):
loop.run_until_complete(f)
else:
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
loop.run_until_complete(future)
| gpl-3.0 | -6,606,697,315,505,430,000 | 30.101351 | 120 | 0.669781 | false | 3.412157 | false | false | false |
ModulousSmash/Modulous | KerbalStuff/blueprints/mods.py | 1 | 19423 | from flask import Blueprint, render_template, request, g, Response, redirect, session, abort, send_file, make_response, url_for
from flask.ext.login import current_user
from sqlalchemy import desc
from KerbalStuff.objects import User, Mod, ModVersion, DownloadEvent, FollowEvent, ReferralEvent, Featured, Media, GameVersion, Category, Report
from KerbalStuff.email import send_update_notification, send_autoupdate_notification
from KerbalStuff.database import db
from KerbalStuff.common import *
from KerbalStuff.config import _cfg
from KerbalStuff.blueprints.api import default_description
from KerbalStuff.ckan import send_to_ckan
from werkzeug.utils import secure_filename
from datetime import datetime, timedelta
from shutil import rmtree, copyfile
from urllib.parse import urlparse
import os
import zipfile
import urllib
import random
mods = Blueprint('mods', __name__, template_folder='../../templates/mods')
@mods.route("/random")
def random_mod():
mods = Mod.query.filter(Mod.published == True).all()
mod = random.choice(mods)
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route("/mod/<int:id>/<path:mod_name>/update")
def update(id, mod_name):
mod = Mod.query.filter(Mod.id == id).first()
if not mod:
abort(404)
editable = False
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
return render_template("update.html", mod=mod, game_versions=GameVersion.query.order_by(desc(GameVersion.id)).all())
@mods.route("/mod/<int:id>.rss", defaults={'mod_name': None})
@mods.route("/mod/<int:id>/<path:mod_name>.rss")
def mod_rss(id, mod_name):
mod = Mod.query.filter(Mod.id == id).first()
if not mod:
abort(404)
return render_template("rss-mod.xml", mod=mod)
@mods.route("/mod/<int:id>", defaults={'mod_name': None})
@mods.route("/mod/<int:id>/<path:mod_name>")
@with_session
def mod(id, mod_name):
mod = Mod.query.filter(Mod.id == id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if not mod.published and not editable:
abort(401)
latest = mod.default_version()
referral = request.referrer
if referral:
host = urllib.parse.urlparse(referral).hostname
event = ReferralEvent.query\
.filter(ReferralEvent.mod_id == mod.id)\
.filter(ReferralEvent.host == host)\
.first()
if not event:
event = ReferralEvent()
event.mod = mod
event.events = 1
event.host = host
db.add(event)
db.flush()
db.commit()
mod.referrals.append(event)
else:
event.events += 1
download_stats = None
follower_stats = None
referrals = None
json_versions = None
thirty_days_ago = datetime.now() - timedelta(days=30)
referrals = list()
for r in ReferralEvent.query\
.filter(ReferralEvent.mod_id == mod.id)\
.order_by(desc(ReferralEvent.events)):
referrals.append( { 'host': r.host, 'count': r.events } )
download_stats = list()
for d in DownloadEvent.query\
.filter(DownloadEvent.mod_id == mod.id)\
.filter(DownloadEvent.created > thirty_days_ago)\
.order_by(DownloadEvent.created):
download_stats.append(dumb_object(d))
follower_stats = list()
for f in FollowEvent.query\
.filter(FollowEvent.mod_id == mod.id)\
.filter(FollowEvent.created > thirty_days_ago)\
.order_by(FollowEvent.created):
follower_stats.append(dumb_object(f))
json_versions = list()
for v in mod.versions:
json_versions.append({ 'name': v.friendly_version, 'id': v.id })
if request.args.get('noedit') != None:
editable = False
forumThread = False
if mod.external_link != None:
try:
u = urlparse(mod.external_link)
if u.netloc == 'forum.kerbalspaceprogram.com':
forumThread = True
except e:
print(e)
pass
total_authors = 1
pending_invite = False
owner = editable
for a in mod.shared_authors:
if a.accepted:
total_authors += 1
if current_user:
if current_user.id == a.user_id and not a.accepted:
pending_invite = True
if current_user.id == a.user_id and a.accepted:
editable = True
game_versions = GameVersion.query.order_by(desc(GameVersion.id)).all()
outdated = False
if latest:
outdated = game_versions[0].friendly_version != latest.ksp_version
return render_template("mod.html",
**{
'mod': mod,
'latest': latest,
'safe_name': secure_filename(mod.name)[:64],
'featured': any(Featured.query.filter(Featured.mod_id == mod.id).all()),
'editable': editable,
'owner': owner,
'pending_invite': pending_invite,
'download_stats': download_stats,
'follower_stats': follower_stats,
'referrals': referrals,
'json_versions': json_versions,
'thirty_days_ago': thirty_days_ago,
'share_link': urllib.parse.quote_plus(_cfg("protocol") + "://" + _cfg("domain") + "/mod/" + str(mod.id)),
'game_versions': game_versions,
'outdated': outdated,
'forum_thread': forumThread,
'new': request.args.get('new') != None,
'stupid_user': request.args.get('stupid_user') != None,
'total_authors': total_authors
})
@mods.route("/mod/<int:id>/<path:mod_name>/edit", methods=['GET', 'POST'])
@with_session
@loginrequired
def edit_mod(id, mod_name):
mod = Mod.query.filter(Mod.id == id).first()
if not mod:
abort(404)
editable = False
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
if request.method == 'GET':
return render_template("edit_mod.html", mod=mod, original=mod.user == current_user, categories = Category.query.all())
else:
short_description = request.form.get('short-description')
tags = request.form.get('tags')
other_authors = request.form.get('other-authors')
print(request.form.get('other-authors'))
tags_array = request.form.get('tags')
modmm = request.form.get('modmm')
if modmm == None:
modmm = False
else:
modmm = (modmm.lower() == "true" or modmm.lower() == "yes" or modmm.lower() == "on")
license = request.form.get('license')
category = request.form.get('category')
donation_link = request.form.get('donation-link')
external_link = request.form.get('external-link')
source_link = request.form.get('source-link')
description = request.form.get('description')
background = request.form.get('background')
bgOffsetY = request.form.get('bg-offset-y')
if not license or license == '':
return render_template("edit_mod.html", mod=mod, error="All mods must have a license.")
if not category or category == '':
abort(401)
else:
category = Category.query.filter(Category.name == category).first()
mod.short_description = short_description
mod.license = license
mod.donation_link = donation_link
mod.external_link = external_link
mod.source_link = source_link
mod.description = description
mod.tags = tags
mod.modmm = modmm
mod.category = category
if other_authors == 'None' or other_authors == '':
mod.other_authors = None
else:
mod.other_authors = other_authors
if background and background != '':
mod.background = background
try:
mod.bgOffsetY = int(bgOffsetY)
except:
pass
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route("/create/mod")
@loginrequired
@with_session
def create_mod():
return render_template("create.html", **{ 'game_versions': GameVersion.query.order_by(desc(GameVersion.id)).all(), 'categories': Category.query.all()})
@mods.route("/mod/<int:mod_id>/stats/downloads", defaults={'mod_name': None})
@mods.route("/mod/<int:mod_id>/<path:mod_name>/stats/downloads")
def export_downloads(mod_id, mod_name):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
download_stats = DownloadEvent.query\
.filter(DownloadEvent.mod_id == mod.id)\
.order_by(DownloadEvent.created)
response = make_response(render_template("downloads.csv", stats=download_stats))
response.headers['Content-Type'] = 'text/csv'
response.headers['Content-Disposition'] = 'attachment;filename=downloads.csv'
return response
@mods.route("/mod/<int:mod_id>/stats/followers", defaults={'mod_name': None})
@mods.route("/mod/<int:mod_id>/<path:mod_name>/stats/followers")
def export_followers(mod_id, mod_name):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
follower_stats = FollowEvent.query\
.filter(FollowEvent.mod_id == mod.id)\
.order_by(FollowEvent.created)
response = make_response(render_template("followers.csv", stats=follower_stats))
response.headers['Content-Type'] = 'text/csv'
response.headers['Content-Disposition'] = 'attachment;filename=followers.csv'
return response
@mods.route("/mod/<int:mod_id>/stats/referrals", defaults={'mod_name': None})
@mods.route("/mod/<mod_id>/<path:mod_name>/stats/referrals")
def export_referrals(mod_id, mod_name):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
referral_stats = ReferralEvent.query\
.filter(ReferralEvent.mod_id == mod.id)\
.order_by(desc(ReferralEvent.events))
response = make_response(render_template("referrals.csv", stats=referral_stats))
response.headers['Content-Type'] = 'text/csv'
response.headers['Content-Disposition'] = 'attachment;filename=referrals.csv'
return response
@mods.route("/mod/<int:mod_id>/delete", methods=['POST'])
@loginrequired
@with_session
def delete(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if not editable:
abort(401)
db.delete(mod)
for feature in Featured.query.filter(Featured.mod_id == mod.id).all():
db.delete(feature)
for media in Media.query.filter(Media.mod_id == mod.id).all():
db.delete(media)
for version in ModVersion.query.filter(ModVersion.mod_id == mod.id).all():
db.delete(version)
base_path = os.path.join(secure_filename(mod.user.username) + '_' + str(mod.user.id), secure_filename(mod.name))
full_path = os.path.join(_cfg('storage'), base_path)
db.commit()
rmtree(full_path)
return redirect("/profile/" + current_user.username)
@mods.route("/mod/<int:mod_id>/follow", methods=['POST'])
@loginrequired
@json_output
@with_session
def follow(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if any(m.id == mod.id for m in current_user.following):
abort(418)
event = FollowEvent.query\
.filter(FollowEvent.mod_id == mod.id)\
.order_by(desc(FollowEvent.created))\
.first()
# Events are aggregated hourly
if not event or ((datetime.now() - event.created).seconds / 60 / 60) >= 1:
event = FollowEvent()
event.mod = mod
event.delta = 1
event.events = 1
db.add(event)
db.flush()
db.commit()
mod.follow_events.append(event)
else:
event.delta += 1
event.events += 1
mod.follower_count += 1
current_user.following.append(mod)
return { "success": True }
@mods.route("/mod/<int:mod_id>/unfollow", methods=['POST'])
@loginrequired
@json_output
@with_session
def unfollow(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if not any(m.id == mod.id for m in current_user.following):
abort(418)
event = FollowEvent.query\
.filter(FollowEvent.mod_id == mod.id)\
.order_by(desc(FollowEvent.created))\
.first()
# Events are aggregated hourly
if not event or ((datetime.now() - event.created).seconds / 60 / 60) >= 1:
event = FollowEvent()
event.mod = mod
event.delta = -1
event.events = 1
mod.follow_events.append(event)
db.add(event)
else:
event.delta -= 1
event.events += 1
mod.follower_count -= 1
current_user.following = [m for m in current_user.following if m.id != int(mod_id)]
return { "success": True }
@mods.route('/mod/<int:mod_id>/feature', methods=['POST'])
@adminrequired
@json_output
@with_session
def feature(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if any(Featured.query.filter(Featured.mod_id == mod_id).all()):
abort(409)
feature = Featured()
feature.mod = mod
db.add(feature)
return { "success": True }
@mods.route('/mod/<mod_id>/unfeature', methods=['POST'])
@adminrequired
@json_output
@with_session
def unfeature(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
feature = Featured.query.filter(Featured.mod_id == mod_id).first()
if not feature:
abort(404)
db.delete(feature)
return { "success": True }
@mods.route('/mod/<int:mod_id>/<path:mod_name>/publish')
@with_session
@loginrequired
def publish(mod_id, mod_name):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if current_user.id != mod.user_id:
abort(401)
if mod.description == default_description:
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name, stupid_user=True))
mod.published = True
mod.updated = datetime.now()
send_to_ckan(mod)
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route('/mod/<int:mod_id>/download/<version>', defaults={ 'mod_name': None })
@mods.route('/mod/<int:mod_id>/<path:mod_name>/download/<version>')
@with_session
def download(mod_id, mod_name, version):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if not mod.published and (not current_user or current_user.id != mod.user_id):
abort(401)
version = ModVersion.query.filter(ModVersion.mod_id == mod_id, \
ModVersion.friendly_version == version).first()
if not version:
abort(404)
download = DownloadEvent.query\
.filter(DownloadEvent.mod_id == mod.id and DownloadEvent.version_id == version.id)\
.order_by(desc(DownloadEvent.created))\
.first()
if not os.path.isfile(os.path.join(_cfg('storage'), version.download_path)):
abort(404)
if not 'Range' in request.headers:
# Events are aggregated hourly
if not download or ((datetime.now() - download.created).seconds / 60 / 60) >= 1:
download = DownloadEvent()
download.mod = mod
download.version = version
download.downloads = 1
db.add(download)
db.flush()
db.commit()
mod.downloads.append(download)
else:
download.downloads += 1
mod.download_count += 1
response = make_response(send_file(os.path.join(_cfg('storage'), version.download_path), as_attachment = True))
if _cfg("use-x-accel") == 'true':
response = make_response("")
response.headers['Content-Type'] = 'application/zip'
response.headers['Content-Disposition'] = 'attachment; filename=' + os.path.basename(version.download_path)
response.headers['X-Accel-Redirect'] = '/internal/' + version.download_path
return response
@mods.route('/mod/<int:mod_id>/version/<version_id>/delete', methods=['POST'])
@with_session
@loginrequired
def delete_version(mod_id, version_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
version = [v for v in mod.versions if v.id == int(version_id)]
if len(mod.versions) == 1:
abort(400)
if len(version) == 0:
abort(404)
if version[0].id == mod.default_version_id:
abort(400)
db.delete(version[0])
mod.versions = [v for v in mod.versions if v.id != int(version_id)]
db.commit()
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route('/mod/<int:mod_id>/<mod_name>/edit_version', methods=['POST'])
@mods.route('/mod/<int:mod_id>/edit_version', methods=['POST'], defaults={ 'mod_name': None })
@with_session
@loginrequired
def edit_version(mod_name, mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
version_id = int(request.form.get('version-id'))
changelog = request.form.get('changelog')
version = [v for v in mod.versions if v.id == version_id]
if len(version) == 0:
abort(404)
version = version[0]
version.changelog = changelog
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route('/mod/<int:mod_id>/autoupdate', methods=['POST'])
@with_session
@loginrequired
def autoupdate(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
default = mod.default_version()
default.ksp_version = GameVersion.query.order_by(desc(GameVersion.id)).first().friendly_version
send_autoupdate_notification(mod)
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
| mit | -6,892,305,928,326,822,000 | 35.855787 | 155 | 0.615713 | false | 3.514839 | false | false | false |
ratschlab/RNA-geeq | SAFT/find_optimal_param_set.py | 1 | 11493 | """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2095-2010 Andre Kahles
Copyright (C) 2009-2010 by Friedrich Miescher Laboratory, Tuebingen, Germany
This script finds an optimal parameter set to maximize the performance of a
given intronfeature file.
For detailed usage information type:
python find_optimal_param_set.py
"""
import sys
import cPickle
class Feature(object):
"""Is an intron feature object"""
def __init__(self, max_mm=80, feature_string=''):
if feature_string == '':
self.alignment_support = 0
self.submission_support = 1
self.mm_ex = dict()
self.max_mm = max_mm + 1
else:
self.alignment_support = int(feature_string[0])
self.submission_support = int(feature_string[1])
self.mm_ex = dict()
self.max_mm = max_mm + 1
for _sl in feature_string[2:]:
(key, value) = _sl.split(':')
self.mm_ex[key] = int(value)
def merge_features(self, feature_string):
"""Merges information in feature_string into current feature object"""
self.alignment_support += int(feature_string[0])
self.submission_support += int(feature_string[1])
for _sl in feature_string[2:]:
(key, value) = _sl.split(':')
try:
self.mm_ex[key] += int(value)
except KeyError:
self.mm_ex[key] = int(value)
def add_mm_ex(self, ex, mm):
"""Adds mm ex information"""
self.alignment_support += 1
try:
self.mm_ex[(ex*self.max_mm) + mm] += 1
except KeyError:
self.mm_ex[(ex*self.max_mm) + mm] = 1
def get_feature_string(self):
"""Returns string with mm ex elements."""
_line = (str(self.alignment_support) + '\t' + str(self.submission_support) + '\t')
for key in self.mm_ex:
_line += (str(key) + ':' + str(self.mm_ex[key]) + '\t')
return _line[:-1]
def get_submission_support(self):
"""Returns submission support"""
return int(self.submission_support)
def is_valid(self, mm, ex, mc, options):
"""Returns true, if at least one alignment fulfills the requirements with respect to mm, ex, and mc. False otherwise."""
if self.alignment_support < mc:
return False
is_valid = False
for key in self.mm_ex.keys():
_ex = int(key) / (options.max_feat_mismatches + 1)
_mm = int(key) % (options.max_feat_mismatches + 1)
if _mm <= mm and _ex >= ex:
is_valid = True
break
return is_valid
def parse_options(argv):
"""Parses options from the command line """
from optparse import OptionParser, OptionGroup
parser = OptionParser()
required = OptionGroup(parser, 'REQUIRED')
required.add_option('-b', '--best_score', dest='best_scores', metavar='FILE', help='file to store the best scoring parameters', default='-')
required.add_option('-m', '--matrix', dest='matrix', metavar='FILE', help='file to store the full performance matrix', default='-')
required.add_option('-f', '--features', dest='features', metavar='FILE', help='alignment intron features', default='-')
required.add_option('-i', '--annotation_introns', dest='anno_int', metavar='FILE', help='annotation intron list', default='-')
optional = OptionGroup(parser, 'OPTIONAL')
optional.add_option('-E', '--exclude_introns', dest='exclude_introns', metavar='STRINGLIST', help='list of comma separated intron files to exclude from submitted features', default='-')
optional.add_option('-I', '--max_intron_len', dest='max_intron_len', metavar='INT', type='int', help='maximal intron length [10000000]', default=10000000)
optional.add_option('-s', '--ignore_strand', dest='ignore_strand', action='store_true', help='ignore strand information present in annotation', default=False)
optional.add_option('-X', '--max_feat_mismatches', dest='max_feat_mismatches', metavar='INT', type='int', help='max number of mismatches for feat generation [80] (do only change, if you are absolutely sure!)', default=80)
optional.add_option('-v', '--verbose', dest='verbose', action='store_true', help='verbosity', default=False)
parser.add_option_group(required)
parser.add_option_group(optional)
(options, args) = parser.parse_args()
if len(argv) < 2:
parser.print_help()
sys.exit(2)
return options
def get_performance_value(full_features, mm, ex, mc, annotation_list, options):
"""Builds up a filtered intron list from the given alignment features and compares to the annotation."""
alignment_list = dict()
for feat in full_features.keys():
chrm = feat[0]
intron = (0, int(feat[1]), int(feat[2]))
### filter step
if (intron[2] - intron[1]) > options.max_intron_len:
continue
if not full_features[feat].is_valid(mm, ex, mc, options):
continue
try:
alignment_list[chrm][intron] = 0
except KeyError:
alignment_list[chrm] = {intron:0}
### match intron lists
total_precision = float(0)
total_recall = float(0)
key_count = 0
for chrm in annotation_list.keys():
if alignment_list.has_key(chrm):
matches = len(set(annotation_list[chrm].keys()).intersection(set(alignment_list[chrm].keys())))
total_precision += (float(matches) / float(max(1, len(alignment_list[chrm].keys()))))
total_recall += (float(matches) / float(max(1, len(annotation_list[chrm].keys()))))
### do not include chromosomes with zero values into average
if matches > 0:
key_count += 1
total_precision /= max(1.0, float(key_count))
total_recall /= max(1.0, float(key_count))
return (total_precision, total_recall)
def main():
"""Main function extracting intron features."""
options = parse_options(sys.argv)
### get list of annotated introns
annotation_list = cPickle.load(open(options.anno_int, 'r'))
if options.ignore_strand:
for chrm in annotation_list.keys():
skiplist = set()
for intron in annotation_list[chrm].keys():
if intron[0] == 0:
continue
annotation_list[chrm][(0, intron[1], intron[2])] = annotation_list[chrm][intron]
skiplist.add(intron)
for intron in skiplist:
del annotation_list[chrm][intron]
del skiplist
### filter annotation for max intron length
print '\nFiltering intron list for max intron len'
print '-----------------------------------------'
skipped = 0
for chrm in annotation_list.keys():
skiplist = set()
for intron in annotation_list[chrm].keys():
if (intron[2] - intron[1]) > options.max_intron_len:
skiplist.add(intron)
for intron in skiplist:
del annotation_list[chrm][intron]
skipped += len(skiplist)
print '%s introns removed from annotation' % skipped
del skiplist
full_features = dict()
if options.verbose:
print 'Parsing %s' % options.features
line_counter = 0
for line in open(options.features, 'r'):
if options.verbose and line_counter % 1000 == 0:
print 'parsed %i features from %s' % (line_counter, options.features)
line_counter += 1
sl = line.strip().split('\t')
(chrm, start, stop) = sl[:3]
try:
full_features[(chrm, start, stop)].full_features(sl[3:])
except KeyError:
full_features[(chrm, start, stop)] = Feature(80, sl[3:])
### filter full feature list for excluded introns
if options.exclude_introns != '-':
_ex_introns = options.exclude_introns.strip().split(',')
### handle leading or trailing commas
if _ex_introns[0] == '':
_ex_introns = _ex_introns[1:]
if _ex_introns[-1] == '':
_ex_introns = _ex_introns[:-1]
for _infile in _ex_introns:
_ex_intron = cPickle.load(open(_infile, 'r'))
for chrm in _ex_intron.keys():
for _intron in _ex_intron[chrm].keys():
try:
del full_features[(chrm, str(_intron[1]), str(_intron[2]))]
except KeyError:
continue
del _ex_intron
if options.verbose:
print 'Parsing completed.'
print 'parsed %i features from %s' % (line_counter, options.features)
### SEARCH SPACE
### iterate over different filter dimensions
#ex_list = [2, 4, 6, 8, 10, 12, 15, 20, 25, 30] # 10
#ex_list = [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18 ] # 15
ex_list = [1, 2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18 ] # 15
mm_list = [0, 1, 2, 3, 4, 5, 6] # 7
mc_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # 10 ==> 700 combinations
checked_combs = 0
# pre rec fsc
max_pre = (0.0, 0.0, 0.0)
max_rec = (0.0, 0.0, 0.0)
max_fsc = (0.0, 0.0, 0.0)
max_pre_idx = (0, 0, 0)
max_rec_idx = (0, 0, 0)
max_fsc_idx = (0, 0, 0)
matrix_file = open(options.matrix, 'w')
for ex in ex_list:
for mm in mm_list:
for mc in mc_list:
if options.verbose and checked_combs % 10 == 0:
print 'checked %i parameter combinations' % checked_combs
print 'best scores so far:\n \tbest fScore: %0.2f, best recall: %0.2f, best precision: %0.2f' % (max_fsc[2], max_rec[1], max_pre[0])
checked_combs += 1
(pre, rec) = get_performance_value(full_features, mm, ex, mc, annotation_list, options)
if float(rec) + float(pre) > 0:
fsc = (2 * float(rec) * float(pre)) / (float(rec) + float(pre))
else:
fsc = 0.0
if pre > max_pre[0]:
max_pre = (pre, rec, fsc)
max_pre_idx = (ex, mm, mc)
if rec > max_rec[1]:
max_rec = (pre, rec, fsc)
max_rec_idx = (ex, mm, mc)
if fsc > max_fsc[2]:
max_fsc = (pre, rec, fsc)
max_fsc_idx = (ex, mm, mc)
### store information
### ex mm mc pre rec fsc
print >> matrix_file, '%s\t%s\t%s\t%s\t%s\t%s' % (ex, mm, mc, pre, rec, fsc)
matrix_file.close()
best_file = open(options.best_scores, 'w')
# best precision
print >> best_file, '%s\t%s\t%s\t%s\t%s\t%s' % (max_pre_idx[0], max_pre_idx[1], max_pre_idx[2], max_pre[0], max_pre[1], max_pre[2])
# best recall
print >> best_file, '%s\t%s\t%s\t%s\t%s\t%s' % (max_rec_idx[0], max_rec_idx[1], max_rec_idx[2], max_rec[0], max_rec[1], max_rec[2])
# best fScore
print >> best_file, '%s\t%s\t%s\t%s\t%s\t%s' % (max_fsc_idx[0], max_fsc_idx[1], max_fsc_idx[2], max_fsc[0], max_fsc[1], max_fsc[2])
best_file.close()
if __name__ == "__main__":
main()
| mit | 5,016,354,970,529,404,000 | 37.69697 | 225 | 0.560515 | false | 3.484839 | false | false | false |
intel-ctrlsys/actsys | datastore/datastore/database_schema/schema_migration/versions/d43655797899_changing_table_name_from_group_to_.py | 1 | 2060 | """Changing table name from 'group' to 'device_group'
Revision ID: d43655797899
Revises: 38f3c80e9932
Create Date: 2017-08-24 15:17:10.671537
"""
import textwrap
from alembic import op
# revision identifiers, used by Alembic.
revision = 'd43655797899'
down_revision = '38f3c80e9932'
branch_labels = None
depends_on = None
def upgrade():
op.execute(textwrap.dedent("""ALTER TABLE public.group RENAME TO device_group;"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION public.upsert_group(p_group_name character varying, p_device_list character varying)
RETURNS integer AS
$BODY$
DECLARE num_rows integer;
BEGIN
INSERT INTO public.device_group AS gro (group_name, device_list)
VALUES (p_group_name, p_device_list)
ON CONFLICT (group_name) DO UPDATE
SET
device_list = p_device_list
WHERE gro.group_name = p_group_name;
GET DIAGNOSTICS num_rows = ROW_COUNT;
RETURN num_rows;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;"""))
def downgrade():
op.execute(textwrap.dedent("""ALTER TABLE device_group RENAME TO "group";"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION public.upsert_group(p_group_name character varying, p_device_list character varying)
RETURNS integer AS
$BODY$
DECLARE num_rows integer;
BEGIN
INSERT INTO public.group AS gro (group_name, device_list)
VALUES (p_group_name, p_device_list)
ON CONFLICT (group_name) DO UPDATE
SET
device_list = p_device_list
WHERE gro.group_name = p_group_name;
GET DIAGNOSTICS num_rows = ROW_COUNT;
RETURN num_rows;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;"""))
| apache-2.0 | -1,239,599,158,944,896,800 | 33.333333 | 123 | 0.571845 | false | 3.814815 | false | false | false |
jorik041/stackprinter | app/lib/deliciousapi.py | 2 | 50450 | """
Unofficial Python API for retrieving data from Delicious.com.
This module provides the following features plus some more:
* retrieving a URL's full public bookmarking history including
* users who bookmarked the URL including tags used for such bookmarks
and the creation time of the bookmark (up to YYYY-MM-DD granularity)
* top tags (up to a maximum of 10) including tag count
* title as stored on Delicious.com
* total number of bookmarks/users for this URL at Delicious.com
* retrieving a user's full bookmark collection, including any private bookmarks
if you know the corresponding password
* retrieving a user's full public tagging vocabulary, i.e. tags and tag counts
* retrieving a user's network information (network members and network fans)
* HTTP proxy support
* updated to support Delicious.com "version 2" (mini-relaunch as of August 2008)
The official Delicious.com API and the JSON/RSS feeds do not provide all
the functionality mentioned above, and in such cases this module will query
the Delicious.com *website* directly and extract the required information
by parsing the HTML code of the resulting Web pages (a kind of poor man's
web mining). The module is able to detect IP throttling, which is employed
by Delicious.com to temporarily block abusive HTTP request behavior, and
will raise a custom Python error to indicate that. Please be a nice netizen
and do not stress the Delicious.com service more than necessary.
It is strongly advised that you read the Delicious.com Terms of Use
before using this Python module. In particular, read section 5
'Intellectual Property'.
The code is licensed to you under version 2 of the GNU General Public
License.
More information about this module can be found at
http://www.michael-noll.com/wiki/Del.icio.us_Python_API
Changelog is available at
http://code.michael-noll.com/?p=deliciousapi;a=log
Copyright 2006-2010 Michael G. Noll <http://www.michael-noll.com/>
"""
__author__ = "Michael G. Noll"
__copyright__ = "(c) 2006-2010 Michael G. Noll"
__description__ = "Unofficial Python API for retrieving data from Delicious.com"
__email__ = "coding[AT]michael-REMOVEME-noll[DOT]com"
__license__ = "GPLv2"
__maintainer__ = "Michael G. Noll"
__status__ = "Development"
__url__ = "http://www.michael-noll.com/"
__version__ = "1.6.3"
import base64
import cgi
import datetime
import hashlib
from operator import itemgetter
import re
import socket
import time
import urllib2
import xml.dom.minidom
try:
from BeautifulSoup import BeautifulSoup
except:
print "ERROR: could not import BeautifulSoup Python module"
print
print "You can download BeautifulSoup from the Python Cheese Shop at"
print "http://cheeseshop.python.org/pypi/BeautifulSoup/"
print "or directly from http://www.crummy.com/software/BeautifulSoup/"
print
raise
try:
from app.lib import simplejson
except:
print "ERROR: could not import simplejson module"
print
print "Since version 1.5.0, DeliciousAPI requires the simplejson module."
print "You can download simplejson from the Python Cheese Shop at"
print "http://pypi.python.org/pypi/simplejson"
print
raise
class DeliciousUser(object):
"""This class wraps all available information about a user into one object.
Variables:
bookmarks:
A list of (url, tags, title, comment, timestamp) tuples representing
a user's bookmark collection.
url is a 'unicode'
tags is a 'list' of 'unicode' ([] if no tags)
title is a 'unicode'
comment is a 'unicode' (u"" if no comment)
timestamp is a 'datetime.datetime'
tags (read-only property):
A list of (tag, tag_count) tuples, aggregated over all a user's
retrieved bookmarks. The tags represent a user's tagging vocabulary.
username:
The Delicious.com account name of the user.
"""
def __init__(self, username, bookmarks=None):
assert username
self.username = username
self.bookmarks = bookmarks or []
def __str__(self):
total_tag_count = 0
total_tags = set()
for url, tags, title, comment, timestamp in self.bookmarks:
if tags:
total_tag_count += len(tags)
for tag in tags:
total_tags.add(tag)
return "[%s] %d bookmarks, %d tags (%d unique)" % \
(self.username, len(self.bookmarks), total_tag_count, len(total_tags))
def __repr__(self):
return self.username
def get_tags(self):
"""Returns a dictionary mapping tags to their tag count.
For example, if the tag count of tag 'foo' is 23, then
23 bookmarks were annotated with 'foo'. A different way
to put it is that 23 users used the tag 'foo' when
bookmarking the URL.
"""
total_tags = {}
for url, tags, title, comment, timestamp in self.bookmarks:
for tag in tags:
total_tags[tag] = total_tags.get(tag, 0) + 1
return total_tags
tags = property(fget=get_tags, doc="Returns a dictionary mapping tags to their tag count")
class DeliciousURL(object):
"""This class wraps all available information about a web document into one object.
Variables:
bookmarks:
A list of (user, tags, comment, timestamp) tuples, representing a
document's bookmark history. Generally, this variable is populated
via get_url(), so the number of bookmarks available in this variable
depends on the parameters of get_url(). See get_url() for more
information.
user is a 'unicode'
tags is a 'list' of 'unicode's ([] if no tags)
comment is a 'unicode' (u"" if no comment)
timestamp is a 'datetime.datetime' (granularity: creation *day*,
i.e. the day but not the time of day)
tags (read-only property):
A list of (tag, tag_count) tuples, aggregated over all a document's
retrieved bookmarks.
top_tags:
A list of (tag, tag_count) tuples, representing a document's so-called
"top tags", i.e. the up to 10 most popular tags for this document.
url:
The URL of the document.
hash (read-only property):
The MD5 hash of the URL.
title:
The document's title.
total_bookmarks:
The number of total bookmarks (posts) of the document.
Note that the value of total_bookmarks can be greater than the
length of "bookmarks" depending on how much (detailed) bookmark
data could be retrieved from Delicious.com.
Here's some more background information:
The value of total_bookmarks is the "real" number of bookmarks of
URL "url" stored at Delicious.com as reported by Delicious.com
itself (so it's the "ground truth"). On the other hand, the length
of "bookmarks" depends on iteratively scraped bookmarking data.
Since scraping Delicous.com's Web pages has its limits in practice,
this means that DeliciousAPI could most likely not retrieve all
available bookmarks. In such a case, the value reported by
total_bookmarks is greater than the length of "bookmarks".
"""
def __init__(self, url, top_tags=None, bookmarks=None, title=u"", total_bookmarks=0):
assert url
self.url = url
self.top_tags = top_tags or []
self.bookmarks = bookmarks or []
self.title = title
self.total_bookmarks = total_bookmarks
def __str__(self):
total_tag_count = 0
total_tags = set()
for user, tags, comment, timestamp in self.bookmarks:
if tags:
total_tag_count += len(tags)
for tag in tags:
total_tags.add(tag)
return "[%s] %d total bookmarks (= users), %d tags (%d unique), %d out of 10 max 'top' tags" % \
(self.url, self.total_bookmarks, total_tag_count, \
len(total_tags), len(self.top_tags))
def __repr__(self):
return self.url
def get_tags(self):
"""Returns a dictionary mapping tags to their tag count.
For example, if the tag count of tag 'foo' is 23, then
23 bookmarks were annotated with 'foo'. A different way
to put it is that 23 users used the tag 'foo' when
bookmarking the URL.
@return: Dictionary mapping tags to their tag count.
"""
total_tags = {}
for user, tags, comment, timestamp in self.bookmarks:
for tag in tags:
total_tags[tag] = total_tags.get(tag, 0) + 1
return total_tags
tags = property(fget=get_tags, doc="Returns a dictionary mapping tags to their tag count")
def get_hash(self):
m = hashlib.md5()
m.update(self.url)
return m.hexdigest()
hash = property(fget=get_hash, doc="Returns the MD5 hash of the URL of this document")
class DeliciousAPI(object):
"""
This class provides a custom, unofficial API to the Delicious.com service.
Instead of using just the functionality provided by the official
Delicious.com API (which has limited features), this class retrieves
information from the Delicious.com website directly and extracts data from
the Web pages.
Note that Delicious.com will block clients with too many queries in a
certain time frame (similar to their API throttling). So be a nice citizen
and don't stress their website.
"""
def __init__(self,
http_proxy="",
tries=3,
wait_seconds=3,
user_agent="DeliciousAPI/%s (+http://www.michael-noll.com/wiki/Del.icio.us_Python_API)" % __version__,
timeout=30,
):
"""Set up the API module.
@param http_proxy: Optional, default: "".
Use an HTTP proxy for HTTP connections. Proxy support for
HTTPS is not available yet.
Format: "hostname:port" (e.g., "localhost:8080")
@type http_proxy: str
@param tries: Optional, default: 3.
Try the specified number of times when downloading a monitored
document fails. tries must be >= 1. See also wait_seconds.
@type tries: int
@param wait_seconds: Optional, default: 3.
Wait the specified number of seconds before re-trying to
download a monitored document. wait_seconds must be >= 0.
See also tries.
@type wait_seconds: int
@param user_agent: Optional, default: "DeliciousAPI/<version>
(+http://www.michael-noll.com/wiki/Del.icio.us_Python_API)".
The User-Agent HTTP Header to use when querying Delicous.com.
@type user_agent: str
@param timeout: Optional, default: 30.
Set network timeout. timeout must be >= 0.
@type timeout: int
"""
assert tries >= 1
assert wait_seconds >= 0
assert timeout >= 0
self.http_proxy = http_proxy
self.tries = tries
self.wait_seconds = wait_seconds
self.user_agent = user_agent
self.timeout = timeout
#socket.setdefaulttimeout(self.timeout)
def _query(self, path, host="delicious.com", user=None, password=None, use_ssl=False):
"""Queries Delicious.com for information, specified by (query) path.
@param path: The HTTP query path.
@type path: str
@param host: The host to query, default: "delicious.com".
@type host: str
@param user: The Delicious.com username if any, default: None.
@type user: str
@param password: The Delicious.com password of user, default: None.
@type password: unicode/str
@param use_ssl: Whether to use SSL encryption or not, default: False.
@type use_ssl: bool
@return: None on errors (i.e. on all HTTP status other than 200).
On success, returns the content of the HTML response.
"""
opener = None
handlers = []
# add HTTP Basic authentication if available
if user and password:
pwd_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
pwd_mgr.add_password(None, host, user, password)
basic_auth_handler = urllib2.HTTPBasicAuthHandler(pwd_mgr)
handlers.append(basic_auth_handler)
# add proxy support if requested
if self.http_proxy:
proxy_handler = urllib2.ProxyHandler({'http': 'http://%s' % self.http_proxy})
handlers.append(proxy_handler)
if handlers:
opener = urllib2.build_opener(*handlers)
else:
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', self.user_agent)]
data = None
tries = self.tries
if use_ssl:
protocol = "https"
else:
protocol = "http"
url = "%s://%s%s" % (protocol, host, path)
while tries > 0:
try:
f = opener.open(url)
data = f.read()
f.close()
break
except urllib2.HTTPError, e:
if e.code == 301:
raise DeliciousMovedPermanentlyWarning, "Delicious.com status %s - url moved permanently" % e.code
if e.code == 302:
raise DeliciousMovedTemporarilyWarning, "Delicious.com status %s - url moved temporarily" % e.code
elif e.code == 401:
raise DeliciousUnauthorizedError, "Delicious.com error %s - unauthorized (authentication failed?)" % e.code
elif e.code == 403:
raise DeliciousForbiddenError, "Delicious.com error %s - forbidden" % e.code
elif e.code == 404:
raise DeliciousNotFoundError, "Delicious.com error %s - url not found" % e.code
elif e.code == 500:
raise Delicious500Error, "Delicious.com error %s - server problem" % e.code
elif e.code == 503 or e.code == 999:
raise DeliciousThrottleError, "Delicious.com error %s - unable to process request (your IP address has been throttled/blocked)" % e.code
else:
raise DeliciousUnknownError, "Delicious.com error %s - unknown error" % e.code
break
except urllib2.URLError, e:
time.sleep(self.wait_seconds)
except socket.error, msg:
# sometimes we get a "Connection Refused" error
# wait a bit and then try again
time.sleep(self.wait_seconds)
#finally:
# f.close()
tries -= 1
return data
def get_url(self, url, max_bookmarks=50, sleep_seconds=1):
"""
Returns a DeliciousURL instance representing the Delicious.com history of url.
Generally, this method is what you want for getting title, bookmark, tag,
and user information about a URL.
Delicious only returns up to 50 bookmarks per URL. This means that
we have to do subsequent queries plus parsing if we want to retrieve
more than 50. Roughly speaking, the processing time of get_url()
increases linearly with the number of 50-bookmarks-chunks; i.e.
it will take 10 times longer to retrieve 500 bookmarks than 50.
@param url: The URL of the web document to be queried for.
@type url: str
@param max_bookmarks: Optional, default: 50.
See the documentation of get_bookmarks() for more information
as get_url() uses get_bookmarks() to retrieve a url's
bookmarking history.
@type max_bookmarks: int
@param sleep_seconds: Optional, default: 1.
See the documentation of get_bookmarks() for more information
as get_url() uses get_bookmarks() to retrieve a url's
bookmarking history. sleep_seconds must be >= 1 to comply with
Delicious.com's Terms of Use.
@type sleep_seconds: int
@return: DeliciousURL instance representing the Delicious.com history
of url.
"""
# we must wait at least 1 second between subsequent queries to
# comply with Delicious.com's Terms of Use
assert sleep_seconds >= 1
document = DeliciousURL(url)
m = hashlib.md5()
m.update(url)
hash = m.hexdigest()
path = "/v2/json/urlinfo/%s" % hash
data = self._query(path, host="feeds.delicious.com")
if data:
urlinfo = {}
try:
urlinfo = simplejson.loads(data)
if urlinfo:
urlinfo = urlinfo[0]
else:
urlinfo = {}
except TypeError:
pass
try:
document.title = urlinfo['title'] or u""
except KeyError:
pass
try:
top_tags = urlinfo['top_tags'] or {}
if top_tags:
document.top_tags = sorted(top_tags.iteritems(), key=itemgetter(1), reverse=True)
else:
document.top_tags = []
except KeyError:
pass
try:
document.total_bookmarks = int(urlinfo['total_posts'])
except (KeyError, ValueError):
pass
document.bookmarks = self.get_bookmarks(url=url, max_bookmarks=max_bookmarks, sleep_seconds=sleep_seconds)
return document
def get_network(self, username):
"""
Returns the user's list of followees and followers.
Followees are users in his Delicious "network", i.e. those users whose
bookmark streams he's subscribed to. Followers are his Delicious.com
"fans", i.e. those users who have subscribed to the given user's
bookmark stream).
Example:
A --------> --------> C
D --------> B --------> E
F --------> --------> F
followers followees
of B of B
Arrows from user A to user B denote that A has subscribed to B's
bookmark stream, i.e. A is "following" or "tracking" B.
Note that user F is both a followee and a follower of B, i.e. F tracks
B and vice versa. In Delicious.com terms, F is called a "mutual fan"
of B.
Comparing this network concept to information retrieval, one could say
that followers are incoming links and followees outgoing links of B.
@param username: Delicous.com username for which network information is
retrieved.
@type username: unicode/str
@return: Tuple of two lists ([<followees>, [<followers>]), where each list
contains tuples of (username, tracking_since_timestamp).
If a network is set as private, i.e. hidden from public view,
(None, None) is returned.
If a network is public but empty, ([], []) is returned.
"""
assert username
followees = followers = None
# followees (network members)
path = "/v2/json/networkmembers/%s" % username
data = None
try:
data = self._query(path, host="feeds.delicious.com")
except DeliciousForbiddenError:
pass
if data:
followees = []
users = []
try:
users = simplejson.loads(data)
except TypeError:
pass
uname = tracking_since = None
for user in users:
# followee's username
try:
uname = user['user']
except KeyError:
pass
# try to convert uname to Unicode
if uname:
try:
# we assume UTF-8 encoding
uname = uname.decode('utf-8')
except UnicodeDecodeError:
pass
# time when the given user started tracking this user
try:
tracking_since = datetime.datetime.strptime(user['dt'], "%Y-%m-%dT%H:%M:%SZ")
except KeyError:
pass
if uname:
followees.append( (uname, tracking_since) )
# followers (network fans)
path = "/v2/json/networkfans/%s" % username
data = None
try:
data = self._query(path, host="feeds.delicious.com")
except DeliciousForbiddenError:
pass
if data:
followers = []
users = []
try:
users = simplejson.loads(data)
except TypeError:
pass
uname = tracking_since = None
for user in users:
# fan's username
try:
uname = user['user']
except KeyError:
pass
# try to convert uname to Unicode
if uname:
try:
# we assume UTF-8 encoding
uname = uname.decode('utf-8')
except UnicodeDecodeError:
pass
# time when fan started tracking the given user
try:
tracking_since = datetime.datetime.strptime(user['dt'], "%Y-%m-%dT%H:%M:%SZ")
except KeyError:
pass
if uname:
followers.append( (uname, tracking_since) )
return ( followees, followers )
def get_bookmarks(self, url=None, username=None, max_bookmarks=50, sleep_seconds=1):
"""
Returns the bookmarks of url or user, respectively.
Delicious.com only returns up to 50 bookmarks per URL on its website.
This means that we have to do subsequent queries plus parsing if
we want to retrieve more than 50. Roughly speaking, the processing
time of get_bookmarks() increases linearly with the number of
50-bookmarks-chunks; i.e. it will take 10 times longer to retrieve
500 bookmarks than 50.
@param url: The URL of the web document to be queried for.
Cannot be used together with 'username'.
@type url: str
@param username: The Delicious.com username to be queried for.
Cannot be used together with 'url'.
@type username: str
@param max_bookmarks: Optional, default: 50.
Maximum number of bookmarks to retrieve. Set to 0 to disable
this limitation/the maximum and retrieve all available
bookmarks of the given url.
Bookmarks are sorted so that newer bookmarks are first.
Setting max_bookmarks to 50 means that get_bookmarks() will retrieve
the 50 most recent bookmarks of the given url.
In the case of getting bookmarks of a URL (url is set),
get_bookmarks() will take *considerably* longer to run
for pages with lots of bookmarks when setting max_bookmarks
to a high number or when you completely disable the limit.
Delicious returns only up to 50 bookmarks per result page,
so for example retrieving 250 bookmarks requires 5 HTTP
connections and parsing 5 HTML pages plus wait time between
queries (to comply with delicious' Terms of Use; see
also parameter 'sleep_seconds').
In the case of getting bookmarks of a user (username is set),
the same restrictions as for a URL apply with the exception
that we can retrieve up to 100 bookmarks per HTTP query
(instead of only up to 50 per HTTP query for a URL).
@type max_bookmarks: int
@param sleep_seconds: Optional, default: 1.
Wait the specified number of seconds between subsequent
queries in case that there are multiple pages of bookmarks
for the given url. sleep_seconds must be >= 1 to comply with
Delicious.com's Terms of Use.
See also parameter 'max_bookmarks'.
@type sleep_seconds: int
@return: Returns the bookmarks of url or user, respectively.
For urls, it returns a list of (user, tags, comment, timestamp)
tuples.
For users, it returns a list of (url, tags, title, comment,
timestamp) tuples.
Bookmarks are sorted "descendingly" by creation time, i.e. newer
bookmarks come first.
"""
# we must wait at least 1 second between subsequent queries to
# comply with delicious' Terms of Use
assert sleep_seconds >= 1
# url XOR username
assert bool(username) is not bool(url)
# maximum number of urls/posts Delicious.com will display
# per page on its website
max_html_count = 100
# maximum number of pages that Delicious.com will display;
# currently, the maximum number of pages is 20. Delicious.com
# allows to go beyond page 20 via pagination, but page N (for
# N > 20) will always display the same content as page 20.
max_html_pages = 20
path = None
if url:
m = hashlib.md5()
m.update(url)
hash = m.hexdigest()
# path will change later on if there are multiple pages of boomarks
# for the given url
path = "/url/%s" % hash
elif username:
# path will change later on if there are multiple pages of boomarks
# for the given username
path = "/%s?setcount=%d" % (username, max_html_count)
else:
raise Exception('You must specify either url or user.')
page_index = 1
bookmarks = []
while path and page_index <= max_html_pages:
data = self._query(path)
path = None
if data:
# extract bookmarks from current page
if url:
bookmarks.extend(self._extract_bookmarks_from_url_history(data))
else:
bookmarks.extend(self._extract_bookmarks_from_user_history(data))
# stop scraping if we already have as many bookmarks as we want
if (len(bookmarks) >= max_bookmarks) and max_bookmarks != 0:
break
else:
# check if there are multiple pages of bookmarks for this
# url on Delicious.com
soup = BeautifulSoup(data)
paginations = soup.findAll("div", id="pagination")
if paginations:
# find next path
nexts = paginations[0].findAll("a", attrs={ "class": "pn next" })
if nexts and (max_bookmarks == 0 or len(bookmarks) < max_bookmarks) and len(bookmarks) > 0:
# e.g. /url/2bb293d594a93e77d45c2caaf120e1b1?show=all&page=2
path = nexts[0]['href']
if username:
path += "&setcount=%d" % max_html_count
page_index += 1
# wait one second between queries to be compliant with
# delicious' Terms of Use
time.sleep(sleep_seconds)
if max_bookmarks > 0:
return bookmarks[:max_bookmarks]
else:
return bookmarks
def _extract_bookmarks_from_url_history(self, data):
"""
Extracts user bookmarks from a URL's history page on Delicious.com.
The Python library BeautifulSoup is used to parse the HTML page.
@param data: The HTML source of a URL history Web page on Delicious.com.
@type data: str
@return: list of user bookmarks of the corresponding URL
"""
bookmarks = []
soup = BeautifulSoup(data)
bookmark_elements = soup.findAll("div", attrs={"class": re.compile("^bookmark\s*")})
timestamp = None
for bookmark_element in bookmark_elements:
# extract bookmark creation time
#
# this timestamp has to "persist" until a new timestamp is
# found (delicious only provides the creation time data for the
# first bookmark in the list of bookmarks for a given day
dategroups = bookmark_element.findAll("div", attrs={"class": "dateGroup"})
if dategroups:
spans = dategroups[0].findAll('span')
if spans:
date_str = spans[0].contents[0].strip()
timestamp = datetime.datetime.strptime(date_str, '%d %b %y')
# extract comments
comment = u""
datas = bookmark_element.findAll("div", attrs={"class": "data"})
if datas:
divs = datas[0].findAll("div", attrs={"class": "description"})
if divs:
comment = divs[0].contents[0].strip()
# extract tags
user_tags = []
tagdisplays = bookmark_element.findAll("div", attrs={"class": "tagdisplay"})
if tagdisplays:
spans = tagdisplays[0].findAll("span", attrs={"class": "tagItem"})
for span in spans:
tag = span.contents[0]
user_tags.append(tag)
# extract user information
metas = bookmark_element.findAll("div", attrs={"class": "meta"})
if metas:
links = metas[0].findAll("a", attrs={"class": "user user-tag"})
if links:
user_a = links[0]
spans = user_a.findAll('span')
if spans:
try:
user = spans[0].contents[0]
except IndexError:
# WORKAROUND: it seems there is a bug on Delicious.com where
# sometimes a bookmark is shown in a URL history without any
# associated Delicious username (username is empty); this could
# be caused by special characters in the username or other things
#
# this problem of Delicious is very rare, so we just skip such
# entries until they find a fix
pass
bookmarks.append( (user, user_tags, comment, timestamp) )
return bookmarks
def _extract_bookmarks_from_user_history(self, data):
"""
Extracts a user's bookmarks from his user page on Delicious.com.
The Python library BeautifulSoup is used to parse the HTML page.
@param data: The HTML source of a user page on Delicious.com.
@type data: str
@return: list of bookmarks of the corresponding user
"""
bookmarks = []
soup = BeautifulSoup(data)
ul = soup.find("ul", id="bookmarklist")
if ul:
bookmark_elements = ul.findAll("div", attrs={"class": re.compile("^bookmark\s*")})
timestamp = None
for bookmark_element in bookmark_elements:
# extract bookmark creation time
#
# this timestamp has to "persist" until a new timestamp is
# found (delicious only provides the creation time data for the
# first bookmark in the list of bookmarks for a given day
dategroups = bookmark_element.findAll("div", attrs={"class": "dateGroup"})
if dategroups:
spans = dategroups[0].findAll('span')
if spans:
date_str = spans[0].contents[0].strip()
timestamp = datetime.datetime.strptime(date_str, '%d %b %y')
# extract url, title and comments
url = u""
title = u""
comment = u""
datas = bookmark_element.findAll("div", attrs={"class": "data"})
if datas:
links = datas[0].findAll("a", attrs={"class": re.compile("^taggedlink\s*")})
if links:
title = links[0].contents[0].strip()
url = links[0]['href']
divs = datas[0].findAll("div", attrs={"class": "description"})
if divs:
comment = divs[0].contents[0].strip()
# extract tags
url_tags = []
tagdisplays = bookmark_element.findAll("div", attrs={"class": "tagdisplay"})
if tagdisplays:
spans = tagdisplays[0].findAll("span", attrs={"class": "tagItem"})
for span in spans:
tag = span.contents[0]
url_tags.append(tag)
bookmarks.append( (url, url_tags, title, comment, timestamp) )
return bookmarks
def get_user(self, username, password=None, max_bookmarks=50, sleep_seconds=1):
"""Retrieves a user's bookmarks from Delicious.com.
If a correct username AND password are supplied, a user's *full*
bookmark collection (which also includes private bookmarks) is
retrieved. Data communication is encrypted using SSL in this case.
If no password is supplied, only the *public* bookmarks of the user
are retrieved. Here, the parameter 'max_bookmarks' specifies how
many public bookmarks will be retrieved (default: 50). Set the
parameter to 0 to retrieve all public bookmarks.
This function can be used to backup all of a user's bookmarks if
called with a username and password.
@param username: The Delicious.com username.
@type username: str
@param password: Optional, default: None.
The user's Delicious.com password. If password is set,
all communication with Delicious.com is SSL-encrypted.
@type password: unicode/str
@param max_bookmarks: Optional, default: 50.
See the documentation of get_bookmarks() for more
information as get_url() uses get_bookmarks() to
retrieve a url's bookmarking history.
The parameter is NOT used when a password is specified
because in this case the *full* bookmark collection of
a user will be retrieved.
@type max_bookmarks: int
@param sleep_seconds: Optional, default: 1.
See the documentation of get_bookmarks() for more information as
get_url() uses get_bookmarks() to retrieve a url's bookmarking
history. sleep_seconds must be >= 1 to comply with Delicious.com's
Terms of Use.
@type sleep_seconds: int
@return: DeliciousUser instance
"""
assert username
user = DeliciousUser(username)
bookmarks = []
if password:
# We have username AND password, so we call
# the official Delicious.com API.
path = "/v1/posts/all"
data = self._query(path, host="api.del.icio.us", use_ssl=True, user=username, password=password)
if data:
soup = BeautifulSoup(data)
elements = soup.findAll("post")
for element in elements:
url = element["href"]
title = element["description"] or u""
comment = element["extended"] or u""
tags = []
if element["tag"]:
tags = element["tag"].split()
timestamp = datetime.datetime.strptime(element["time"], "%Y-%m-%dT%H:%M:%SZ")
bookmarks.append( (url, tags, title, comment, timestamp) )
user.bookmarks = bookmarks
else:
# We have only the username, so we extract data from
# the user's JSON feed. However, the feed is restricted
# to the most recent public bookmarks of the user, which
# is about 100 if any. So if we need more than 100, we start
# scraping the Delicious.com website directly
if max_bookmarks > 0 and max_bookmarks <= 100:
path = "/v2/json/%s/stackoverflow?count=100" % username
data = self._query(path, host="feeds.delicious.com", user=username)
if data:
posts = []
try:
posts = simplejson.loads(data)
except TypeError:
pass
url = timestamp = None
title = comment = u""
tags = []
for post in posts:
# url
try:
url = post['u']
except KeyError:
pass
# title
try:
title = post['d']
except KeyError:
pass
# tags
try:
tags = post['t']
except KeyError:
pass
if not tags:
tags = [u"system:unfiled"]
# comment / notes
try:
comment = post['n']
except KeyError:
pass
# bookmark creation time
try:
timestamp = datetime.datetime.strptime(post['dt'], "%Y-%m-%dT%H:%M:%SZ")
except KeyError:
pass
bookmarks.append( (url, tags, title, comment, timestamp) )
user.bookmarks = bookmarks[:max_bookmarks]
else:
# TODO: retrieve the first 100 bookmarks via JSON before
# falling back to scraping the delicous.com website
user.bookmarks = self.get_bookmarks(username=username, max_bookmarks=max_bookmarks, sleep_seconds=sleep_seconds)
return user
def get_urls(self, tag=None, popular=True, max_urls=100, sleep_seconds=1):
"""
Returns the list of recent URLs (of web documents) tagged with a given tag.
This is very similar to parsing Delicious' RSS/JSON feeds directly,
but this function will return up to 2,000 links compared to a maximum
of 100 links when using the official feeds (with query parameter
count=100).
The return list of links will be sorted by recency in descending order,
i.e. newest items first.
Note that even when setting max_urls, get_urls() cannot guarantee that
it can retrieve *at least* this many URLs. It is really just an upper
bound.
@param tag: Retrieve links which have been tagged with the given tag.
If tag is not set (default), links will be retrieved from the
Delicious.com front page (aka "delicious hotlist").
@type tag: unicode/str
@param popular: If true (default), retrieve only popular links (i.e.
/popular/<tag>). Otherwise, the most recent links tagged with
the given tag will be retrieved (i.e. /tag/<tag>).
As of January 2009, it seems that Delicious.com modified the list
of popular tags to contain only up to a maximum of 15 URLs.
This also means that setting max_urls to values larger than 15
will not change the results of get_urls().
So if you are interested in more URLs, set the "popular" parameter
to false.
Note that if you set popular to False, the returned list of URLs
might contain duplicate items. This is due to the way Delicious.com
creates its /tag/<tag> Web pages. So if you need a certain
number of unique URLs, you have to take care of that in your
own code.
@type popular: bool
@param max_urls: Retrieve at most max_urls links. The default is 100,
which is the maximum number of links that can be retrieved by
parsing the official JSON feeds. The maximum value of max_urls
in practice is 2000 (currently). If it is set higher, Delicious
will return the same links over and over again, giving lots of
duplicate items.
@type max_urls: int
@param sleep_seconds: Optional, default: 1.
Wait the specified number of seconds between subsequent queries in
case that there are multiple pages of bookmarks for the given url.
Must be greater than or equal to 1 to comply with Delicious.com's
Terms of Use.
See also parameter 'max_urls'.
@type sleep_seconds: int
@return: The list of recent URLs (of web documents) tagged with a given tag.
"""
assert sleep_seconds >= 1
urls = []
path = None
if tag is None or (tag is not None and max_urls > 0 and max_urls <= 100):
# use official JSON feeds
max_json_count = 100
if tag:
# tag-specific JSON feed
if popular:
path = "/v2/json/popular/%s?count=%d" % (tag, max_json_count)
else:
path = "/v2/json/tag/%s?count=%d" % (tag, max_json_count)
else:
# Delicious.com hotlist
path = "/v2/json/?count=%d" % (max_json_count)
data = self._query(path, host="feeds.delicious.com")
if data:
posts = []
try:
posts = simplejson.loads(data)
except TypeError:
pass
for post in posts:
# url
try:
url = post['u']
if url:
urls.append(url)
except KeyError:
pass
else:
# maximum number of urls/posts Delicious.com will display
# per page on its website
max_html_count = 100
# maximum number of pages that Delicious.com will display;
# currently, the maximum number of pages is 20. Delicious.com
# allows to go beyond page 20 via pagination, but page N (for
# N > 20) will always display the same content as page 20.
max_html_pages = 20
if popular:
path = "/popular/%s?setcount=%d" % (tag, max_html_count)
else:
path = "/tag/%s?setcount=%d" % (tag, max_html_count)
page_index = 1
urls = []
while path and page_index <= max_html_pages:
data = self._query(path)
path = None
if data:
# extract urls from current page
soup = BeautifulSoup(data)
links = soup.findAll("a", attrs={"class": re.compile("^taggedlink\s*")})
for link in links:
try:
url = link['href']
if url:
urls.append(url)
except KeyError:
pass
# check if there are more multiple pages of urls
soup = BeautifulSoup(data)
paginations = soup.findAll("div", id="pagination")
if paginations:
# find next path
nexts = paginations[0].findAll("a", attrs={ "class": "pn next" })
if nexts and (max_urls == 0 or len(urls) < max_urls) and len(urls) > 0:
# e.g. /url/2bb293d594a93e77d45c2caaf120e1b1?show=all&page=2
path = nexts[0]['href']
path += "&setcount=%d" % max_html_count
page_index += 1
# wait between queries to Delicious.com to be
# compliant with its Terms of Use
time.sleep(sleep_seconds)
if max_urls > 0:
return urls[:max_urls]
else:
return urls
def get_tags_of_user(self, username):
"""
Retrieves user's public tags and their tag counts from Delicious.com.
The tags represent a user's full public tagging vocabulary.
DeliciousAPI uses the official JSON feed of the user. We could use
RSS here, but the JSON feed has proven to be faster in practice.
@param username: The Delicious.com username.
@type username: str
@return: Dictionary mapping tags to their tag counts.
"""
tags = {}
path = "/v2/json/tags/%s" % username
data = self._query(path, host="feeds.delicious.com")
if data:
try:
tags = simplejson.loads(data)
except TypeError:
pass
return tags
def get_number_of_users(self, url):
"""get_number_of_users() is obsolete and has been removed. Please use get_url() instead."""
reason = "get_number_of_users() is obsolete and has been removed. Please use get_url() instead."
raise Exception(reason)
def get_common_tags_of_url(self, url):
"""get_common_tags_of_url() is obsolete and has been removed. Please use get_url() instead."""
reason = "get_common_tags_of_url() is obsolete and has been removed. Please use get_url() instead."
raise Exception(reason)
def _html_escape(self, s):
"""HTML-escape a string or object.
This converts any non-string objects passed into it to strings
(actually, using unicode()). All values returned are
non-unicode strings (using "&#num;" entities for all non-ASCII
characters).
None is treated specially, and returns the empty string.
@param s: The string that needs to be escaped.
@type s: str
@return: The escaped string.
"""
if s is None:
return ''
if not isinstance(s, basestring):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = str(s)
s = cgi.escape(s, True)
if isinstance(s, unicode):
s = s.encode('ascii', 'xmlcharrefreplace')
return s
class DeliciousError(Exception):
"""Used to indicate that an error occurred when trying to access Delicious.com via its API."""
class DeliciousWarning(Exception):
"""Used to indicate a warning when trying to access Delicious.com via its API.
Warnings are raised when it is useful to alert the user of some condition
where that condition doesn't warrant raising an exception and terminating
the program. For example, we issue a warning when Delicious.com returns a
HTTP status code for redirections (3xx).
"""
class DeliciousThrottleError(DeliciousError):
"""Used to indicate that the client computer (i.e. its IP address) has been temporarily blocked by Delicious.com."""
pass
class DeliciousUnknownError(DeliciousError):
"""Used to indicate that Delicious.com returned an (HTTP) error which we don't know how to handle yet."""
pass
class DeliciousUnauthorizedError(DeliciousError):
"""Used to indicate that Delicious.com returned a 401 Unauthorized error.
Most of the time, the user credentials for accessing restricted functions
of the official Delicious.com API are incorrect.
"""
pass
class DeliciousForbiddenError(DeliciousError):
"""Used to indicate that Delicious.com returned a 403 Forbidden error.
"""
pass
class DeliciousNotFoundError(DeliciousError):
"""Used to indicate that Delicious.com returned a 404 Not Found error.
Most of the time, retrying some seconds later fixes the problem
(because we only query existing pages with this API).
"""
pass
class Delicious500Error(DeliciousError):
"""Used to indicate that Delicious.com returned a 500 error.
Most of the time, retrying some seconds later fixes the problem.
"""
pass
class DeliciousMovedPermanentlyWarning(DeliciousWarning):
"""Used to indicate that Delicious.com returned a 301 Found (Moved Permanently) redirection."""
pass
class DeliciousMovedTemporarilyWarning(DeliciousWarning):
"""Used to indicate that Delicious.com returned a 302 Found (Moved Temporarily) redirection."""
pass
__all__ = ['DeliciousAPI', 'DeliciousURL', 'DeliciousError', 'DeliciousThrottleError', 'DeliciousUnauthorizedError', 'DeliciousUnknownError', 'DeliciousNotFoundError' , 'Delicious500Error', 'DeliciousMovedTemporarilyWarning']
if __name__ == "__main__":
d = DeliciousAPI()
max_bookmarks = 50
url = 'http://www.michael-noll.com/wiki/Del.icio.us_Python_API'
print "Retrieving Delicious.com information about url"
print "'%s'" % url
print "Note: This might take some time..."
print "========================================================="
document = d.get_url(url, max_bookmarks=max_bookmarks)
print document
| bsd-3-clause | 3,970,781,760,770,663,400 | 39.263368 | 225 | 0.565451 | false | 4.53198 | false | false | false |
hugobranquinho/ines | ines/__init__.py | 1 | 1198 | # -*- coding: utf-8 -*-
import datetime
import errno
from os import getpid, linesep, uname
from os.path import join as os_join
import sys
from tempfile import gettempdir
from time import time as _now_time
APPLICATIONS = {}
CAMELCASE_UPPER_WORDS = {'CSV'}
MARKER = object()
API_CONFIGURATION_EXTENSIONS = {}
DEFAULT_RENDERERS = {}
DEFAULT_METHODS = ['GET', 'PUT', 'POST', 'DELETE']
IGNORE_FULL_NAME_WORDS = ['de', 'da', 'e', 'do']
PROCESS_ID = getpid()
SYSTEM_NAME, DOMAIN_NAME, SYSTEM_RELEASE, SYSTEM_VERSION, MACHINE = uname()
DEFAULT_CACHE_DIRPATH = os_join(gettempdir(), 'ines-cache')
DEFAULT_RETRY_ERRNO = {errno.ESTALE}
DEFAULT_RETRY_ERRNO.add(116) # Stale NFS file handle
OPEN_BLOCK_SIZE = 2**18
# datetime now without microseconds
_now = datetime.datetime.now
NOW = lambda: _now().replace(microsecond=0)
# timestamp without microseconds
NOW_TIME = lambda: int(_now_time())
TODAY_DATE = datetime.date.today
HTML_NEW_LINE = '<br/>'
NEW_LINE = linesep
NEW_LINE_AS_BYTES = NEW_LINE.encode()
def lazy_import_module(name):
module = sys.modules.get(name, MARKER)
if module is not MARKER:
return module
else:
__import__(name)
return sys.modules[name]
| mit | 6,989,389,053,462,082,000 | 23.958333 | 75 | 0.69616 | false | 3.152632 | false | false | false |
Dwolla/arbalest | examples/s3_json_object_to_redshift.py | 1 | 2379 | #!/usr/bin/env python
import psycopg2
from arbalest.configuration import env
from arbalest.redshift import S3CopyPipeline
from arbalest.redshift.schema import JsonObject, Property
"""
**Example: Bulk copy JSON objects from S3 bucket to Redshift table**
Arbalest orchestrates data loading using pipelines. Each `Pipeline`
can have one or many steps that are made up of three parts:
metadata: Path in an S3 bucket to store information needed for the copy process.
`s3://{BUCKET_NAME}/path_to_save_pipeline_metadata`
source: Path in an S3 bucket where data to be copied from is located.
`s3://{BUCKET_NAME}/path_of_source_data` consisting of JSON files:
```
{
"id": "66bc8153-d6d9-4351-bada-803330f22db7",
"someNumber": 1
}
```
schema: Definition of JSON objects to map into Redshift rows using a
`JsonObject` mapper which consists of one or many `Property` declarations.
By default the name of the JSON property is used as the column, but can be set
to a custom column name.
"""
if __name__ == '__main__':
pipeline = S3CopyPipeline(
aws_access_key_id=env('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=env('AWS_SECRET_ACCESS_KEY'),
bucket=env('BUCKET_NAME'),
db_connection=psycopg2.connect(env('REDSHIFT_CONNECTION')))
pipeline.bulk_copy(metadata='path_to_save_pipeline_metadata',
source='path_of_source_data',
schema=JsonObject('destination_table_name',
Property('id', 'VARCHAR(36)'),
Property('someNumber', 'INTEGER',
'custom_column_name')))
pipeline.manifest_copy(metadata='path_to_save_pipeline_metadata',
source='path_of_incremental_source_data',
schema=JsonObject('incremental_destination_table_name',
Property('id', 'VARCHAR(36)'),
Property('someNumber', 'INTEGER',
'custom_column_name')))
pipeline.sql(('SELECT someNumber + %s '
'INTO some_olap_table FROM destination_table_name', 1),
('SELECT * INTO destination_table_name_copy '
'FROM destination_table_name'))
pipeline.run()
| mit | 2,963,051,004,563,553,000 | 38.65 | 82 | 0.599412 | false | 4.12305 | false | false | false |
hlmnrmr/superdesk-core | superdesk/tests/steps.py | 1 | 91099 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
import time
import shutil
from base64 import b64encode
from datetime import datetime, timedelta
from os.path import basename
from re import findall
from unittest.mock import patch
from urllib.parse import urlparse
import arrow
from behave import given, when, then # @UnresolvedImport
from bson import ObjectId
from eve.io.mongo import MongoJSONEncoder
from eve.methods.common import parse
from eve.utils import ParsedRequest, config
from flask import json
from wooper.assertions import (
assert_in, assert_equal, assertions
)
from wooper.general import (
fail_and_print_body, apply_path, parse_json_response,
WooperAssertionError
)
from wooper.expect import (
expect_status, expect_status_in,
expect_json, expect_json_length,
expect_json_contains, expect_json_not_contains,
expect_headers_contain,
)
import superdesk
from superdesk import tests
from superdesk.io import registered_feeding_services
from superdesk.io.commands.update_ingest import LAST_ITEM_UPDATE
from superdesk import default_user_preferences, get_resource_service, utc, etree
from superdesk.io.feed_parsers import XMLFeedParser, EMailRFC822FeedParser
from superdesk.utc import utcnow, get_expiry_date
from superdesk.tests import get_prefixed_url, set_placeholder
from apps.dictionaries.resource import DICTIONARY_FILE
from superdesk.filemeta import get_filemeta
external_url = 'http://thumbs.dreamstime.com/z/digital-nature-10485007.jpg'
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S%z"
ANALYTICS_DATETIME_FORMAT = "%Y-%m-%d %H:00:00"
def test_json(context):
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data, response_data), True,
msg=str(context_data) + '\n != \n' + str(response_data))
return response_data
def test_json_with_string_field_value(context, field):
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data[field], response_data[field]), True,
msg=str(context_data) + '\n != \n' + str(response_data))
return response_data
def test_key_is_present(key, context, response):
"""Test if given key is present in response.
In case the context value is empty - "", {}, [] - it checks if it's non empty in response.
If it's set in context to false, it will check that it's falsy/empty in response too.
:param key
:param context
:param response
"""
assert not isinstance(context[key], bool) or not response[key], \
'"%s" should be empty or false, but it was "%s" in (%s)' % (key, response[key], response)
def test_key_is_not_present(key, response):
"""Test if given key is not present in response.
:param key
:param response
"""
assert key not in response, \
'"%s" should not be present, but it was "%s" in (%s)' % (key, response[key], response)
def assert_is_now(val, key):
"""Assert that given datetime value is now (with 2s tolerance).
:param val: datetime
:param key: val label - used for error reporting
"""
now = arrow.get()
val = arrow.get(val)
assert val + timedelta(seconds=2) > now, '%s should be now, it is %s' % (key, val)
def json_match(context_data, response_data):
if isinstance(context_data, dict):
if (not isinstance(response_data, dict)):
return False
for key in context_data:
if context_data[key] == "__none__":
assert response_data[key] is None
continue
if context_data[key] == "__no_value__":
test_key_is_not_present(key, response_data)
continue
if key not in response_data:
print(key, ' not in ', response_data)
return False
if context_data[key] == "__any_value__":
test_key_is_present(key, context_data, response_data)
continue
if context_data[key] == "__now__":
assert_is_now(response_data[key], key)
continue
if context_data[key] == "__empty__":
assert len(response_data[key]) == 0, '%s is not empty' % key
continue
if not json_match(context_data[key], response_data[key]):
return False
return True
elif isinstance(context_data, list):
for item_context in context_data:
found = False
for item_response in response_data:
if json_match(item_context, item_response):
found = True
break
if not found:
print(item_context, ' not in ', json.dumps(response_data, indent=2))
return False
return True
elif not isinstance(context_data, dict):
if context_data != response_data:
print('---' + str(context_data) + '---\n', ' != \n', '---' + str(response_data) + '---\n')
return context_data == response_data
def get_fixture_path(context, fixture):
path = context.app.settings['BEHAVE_TESTS_FIXTURES_PATH']
return os.path.join(path, fixture)
def get_macro_path(macro):
abspath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return os.path.join(abspath, 'macros', macro)
def get_self_href(resource, context):
assert '_links' in resource, 'expted "_links", but got only %s' % (resource)
return resource['_links']['self']['href']
def get_res(url, context):
response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
expect_status(response, 200)
return json.loads(response.get_data())
def parse_date(datestr):
return datetime.strptime(datestr, DATETIME_FORMAT)
def format_date(date_to_format):
return date_to_format.strftime(DATETIME_FORMAT)
def format_date_analytics(date_to_format):
return date_to_format.strftime(ANALYTICS_DATETIME_FORMAT)
def assert_200(response):
"""Assert we get status code 200."""
expect_status_in(response, (200, 201, 204))
def assert_404(response):
"""Assert we get status code 404."""
assert response.status_code == 404, 'Expected 404, got %d' % (response.status_code)
def assert_ok(response):
"""Assert we get ok status within api response."""
expect_status_in(response, (200, 201))
expect_json_contains(response, {'_status': 'OK'})
def get_json_data(response):
return json.loads(response.get_data())
def get_it(context):
it = context.data[0]
res = get_res('/%s/%s' % (context.resource, it['_id']), context)
return get_self_href(res, context), res.get('_etag')
def if_match(context, etag):
headers = []
if etag:
headers = [('If-Match', etag)]
headers = unique_headers(headers, context.headers)
return headers
def unique_headers(headers_to_add, old_headers):
headers = dict(old_headers)
for item in headers_to_add:
headers.update({item[0]: item[1]})
unique_headers = [(k, v) for k, v in headers.items()]
return unique_headers
def patch_current_user(context, data):
response = context.client.get(get_prefixed_url(context.app, '/users/%s' % context.user['_id']),
headers=context.headers)
user = json.loads(response.get_data())
headers = if_match(context, user.get('_etag'))
response = context.client.patch(get_prefixed_url(context.app, '/users/%s' % context.user['_id']),
data=data, headers=headers)
assert_ok(response)
return response
def apply_placeholders(context, text):
placeholders = getattr(context, 'placeholders', {})
for placeholder in findall('#([^#"]+)#', text):
if placeholder.startswith('DATE'):
value = utcnow()
unit = placeholder.find('+')
if unit != -1:
value += timedelta(days=int(placeholder[unit + 1]))
else:
unit = placeholder.find('-')
if unit != -1:
value -= timedelta(days=int(placeholder[unit + 1]))
if placeholder == 'ANALYTICS_DATE_FORMATTED':
value = format_date_analytics(value)
else:
value = format_date(value)
placeholders['LAST_DATE_VALUE'] = value
elif placeholder not in placeholders:
try:
resource_name, field_name = placeholder.split('.', maxsplit=1)
except Exception:
continue
resource = getattr(context, resource_name, None)
for name in field_name.split('.'):
if not resource:
break
resource = resource.get(name, None)
if not resource:
continue
if isinstance(resource, datetime):
value = format_date(resource)
else:
value = str(resource)
else:
value = placeholders[placeholder]
text = text.replace('#%s#' % placeholder, value)
return text
def get_resource_name(url):
parsed_url = urlparse(url)
return basename(parsed_url.path)
def format_items(items):
output = [''] # insert empty line
for item in items:
if item.get('formatted_item'):
item['formatted_item'] = json.loads(item['formatted_item'])
output.append(json.dumps(item, indent=4, sort_keys=True))
return ',\n'.join(output)
@given('empty "{resource}"')
def step_impl_given_empty(context, resource):
if not is_user_resource(resource):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
get_resource_service(resource).delete_action()
@given('"{resource}"')
def step_impl_given_(context, resource):
data = apply_placeholders(context, context.text)
with context.app.test_request_context(context.app.config['URL_PREFIX']):
if not is_user_resource(resource):
get_resource_service(resource).delete_action()
items = [parse(item, resource) for item in json.loads(data)]
if is_user_resource(resource):
for item in items:
item.setdefault('needs_activation', False)
get_resource_service(resource).post(items)
context.data = items
context.resource = resource
try:
setattr(context, resource, items[-1])
except KeyError:
pass
@given('"{resource}" with objectid')
def step_impl_given_with_objectid(context, resource):
data = apply_placeholders(context, context.text)
with context.app.test_request_context(context.app.config['URL_PREFIX']):
items = [parse(item, resource) for item in json.loads(data)]
for item in items:
if '_id' in item:
item['_id'] = ObjectId(item['_id'])
get_resource_service(resource).post(items)
context.data = items
context.resource = resource
setattr(context, resource, items[-1])
@given('the "{resource}"')
def step_impl_given_the(context, resource):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
if not is_user_resource(resource):
get_resource_service(resource).delete_action()
orig_items = {}
items = [parse(item, resource) for item in json.loads(context.text)]
get_resource_service(resource).post(items)
context.data = orig_items or items
context.resource = resource
@given('ingest from "{provider}"')
def step_impl_given_resource_with_provider(context, provider):
resource = 'ingest'
with context.app.test_request_context(context.app.config['URL_PREFIX']):
get_resource_service(resource).delete_action()
items = [parse(item, resource) for item in json.loads(context.text)]
ingest_provider = get_resource_service('ingest_providers').find_one(req=None,
_id=context.providers[provider])
for item in items:
item['ingest_provider'] = context.providers[provider]
item['source'] = ingest_provider.get('source')
get_resource_service(resource).post(items)
context.data = items
context.resource = resource
@given('config update')
def given_config_update(context):
diff = json.loads(context.text)
context.app.config.update(diff)
if 'AMAZON_CONTAINER_NAME' in diff:
from superdesk.storage import AmazonMediaStorage
context.app.media = AmazonMediaStorage(context.app)
m = patch.object(context.app.media, 'client')
m.start()
@given('config')
def step_impl_given_config(context):
tests.setup(context, json.loads(context.text))
tests.setup_auth_user(context)
@given('we have "{role_name}" role')
def step_impl_given_role(context, role_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
role = get_resource_service('roles').find_one(name=role_name, req=None)
data = MongoJSONEncoder().encode({'role': role.get('_id')})
response = patch_current_user(context, data)
assert_ok(response)
@given('we have "{user_type}" as type of user')
def step_impl_given_user_type(context, user_type):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
data = json.dumps({'user_type': user_type})
response = patch_current_user(context, data)
assert_ok(response)
@when('we post to auth_db')
def step_impl_when_auth(context):
data = context.text
context.response = context.client.post(
get_prefixed_url(context.app, '/auth_db'), data=data, headers=context.headers)
if context.response.status_code == 200 or context.response.status_code == 201:
item = json.loads(context.response.get_data())
if item.get('_id'):
set_placeholder(context, 'AUTH_ID', item['_id'])
context.headers.append(('Authorization', b'basic ' + b64encode(item['token'].encode('ascii') + b':')))
context.user = item['user']
@when('we sleep for {limit}s')
def when_we_sleep_for(context, limit):
time.sleep(int(limit))
@given('we create a new macro "{macro_name}"')
def step_create_new_macro(context, macro_name):
src = get_fixture_path(context, macro_name)
dst = get_macro_path(macro_name)
shutil.copyfile(src, dst)
@when('we fetch from "{provider_name}" ingest "{guid}"')
def step_impl_fetch_from_provider_ingest(context, provider_name, guid):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
fetch_from_provider(context, provider_name, guid)
def embed_routing_scheme_rules(scheme):
"""Fetch all content filters referenced by the given routing scheme and embed those into scheme.
:param dict scheme: routing scheme configuration
"""
filters_service = superdesk.get_resource_service('content_filters')
rules_filters = (
(rule, str(rule['filter']))
for rule in scheme['rules'] if rule.get('filter'))
for rule, filter_id in rules_filters:
content_filter = filters_service.find_one(_id=filter_id, req=None)
rule['filter'] = content_filter
@when('we fetch from "{provider_name}" ingest "{guid}" using routing_scheme')
def step_impl_fetch_from_provider_ingest_using_routing(context, provider_name, guid):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
_id = apply_placeholders(context, context.text)
routing_scheme = get_resource_service('routing_schemes').find_one(_id=_id, req=None)
embed_routing_scheme_rules(routing_scheme)
fetch_from_provider(context, provider_name, guid, routing_scheme)
@when('we ingest and fetch "{provider_name}" "{guid}" to desk "{desk}" stage "{stage}" using routing_scheme')
def step_impl_fetch_from_provider_ingest_using_routing_with_desk(context, provider_name, guid, desk, stage):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
_id = apply_placeholders(context, context.text)
desk_id = apply_placeholders(context, desk)
stage_id = apply_placeholders(context, stage)
routing_scheme = get_resource_service('routing_schemes').find_one(_id=_id, req=None)
embed_routing_scheme_rules(routing_scheme)
fetch_from_provider(context, provider_name, guid, routing_scheme, desk_id, stage_id)
@when('we ingest with routing scheme "{provider_name}" "{guid}"')
def step_impl_ingest_with_routing_scheme(context, provider_name, guid):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
_id = apply_placeholders(context, context.text)
routing_scheme = get_resource_service('routing_schemes').find_one(_id=_id, req=None)
embed_routing_scheme_rules(routing_scheme)
fetch_from_provider(context, provider_name, guid, routing_scheme)
def fetch_from_provider(context, provider_name, guid, routing_scheme=None, desk_id=None, stage_id=None):
ingest_provider_service = get_resource_service('ingest_providers')
provider = ingest_provider_service.find_one(name=provider_name, req=None)
provider['routing_scheme'] = routing_scheme
if 'rule_set' in provider:
rule_set = get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
else:
rule_set = None
provider_service = registered_feeding_services[provider['feeding_service']]
provider_service = provider_service.__class__()
if provider.get('name', '').lower() in ('aap', 'dpa', 'ninjs', 'email'):
file_path = os.path.join(provider.get('config', {}).get('path', ''), guid)
feeding_parser = provider_service.get_feed_parser(provider)
if isinstance(feeding_parser, XMLFeedParser):
with open(file_path, 'rb') as f:
xml_string = etree.etree.fromstring(f.read())
items = [feeding_parser.parse(xml_string, provider)]
elif isinstance(feeding_parser, EMailRFC822FeedParser):
with open(file_path, 'rb') as f:
data = f.read()
items = feeding_parser.parse([(1, data)], provider)
else:
parsed = feeding_parser.parse(file_path, provider)
items = [parsed] if not isinstance(parsed, list) else parsed
else:
provider_service.provider = provider
provider_service.URL = provider.get('config', {}).get('url')
items = provider_service.fetch_ingest(guid)
for item in items:
item['versioncreated'] = utcnow()
item['expiry'] = utcnow() + timedelta(minutes=20)
if desk_id:
from bson.objectid import ObjectId
item['task'] = {'desk': ObjectId(desk_id), 'stage': ObjectId(stage_id)}
failed = context.ingest_items(items, provider, provider_service, rule_set=rule_set,
routing_scheme=provider.get('routing_scheme'))
assert len(failed) == 0, failed
provider = ingest_provider_service.find_one(name=provider_name, req=None)
ingest_provider_service.system_update(provider['_id'], {LAST_ITEM_UPDATE: utcnow()}, provider)
for item in items:
set_placeholder(context, '{}.{}'.format(provider_name, item['guid']), item['_id'])
@when('we post to "{url}"')
def step_impl_when_post_url(context, url):
post_data(context, url)
@when('we post to "{url}" with delay')
def step_impl_when_post_url_delay(context, url):
time.sleep(1)
post_data(context, url)
def set_user_default(url, data):
if is_user_resource(url):
user = json.loads(data)
user.setdefault('needs_activation', False)
data = json.dumps(user)
def get_response_etag(response):
return json.loads(response.get_data())['_etag']
@when('we save etag')
def step_when_we_save_etag(context):
context.etag = get_response_etag(context.response)
@then('we get same etag')
def step_then_we_get_same_etag(context):
assert context.etag == get_response_etag(context.response), 'etags not matching'
def store_placeholder(context, url):
if context.response.status_code in (200, 201):
item = json.loads(context.response.get_data())
if item['_status'] == 'OK' and item.get('_id'):
try:
setattr(context, get_resource_name(url), item)
except (IndexError, KeyError):
pass
def post_data(context, url, success=False):
with context.app.mail.record_messages() as outbox:
data = apply_placeholders(context, context.text)
url = apply_placeholders(context, url)
set_user_default(url, data)
context.response = context.client.post(get_prefixed_url(context.app, url),
data=data, headers=context.headers)
if success:
assert_ok(context.response)
item = json.loads(context.response.get_data())
context.outbox = outbox
store_placeholder(context, url)
return item
@when('we post to "{url}" with "{tag}" and success')
def step_impl_when_post_url_with_tag(context, url, tag):
item = post_data(context, url, True)
if item.get('_id'):
set_placeholder(context, tag, item.get('_id'))
@given('we have "{url}" with "{tag}" and success')
def step_impl_given_post_url_with_tag(context, url, tag):
item = post_data(context, url, True)
if item.get('_id'):
set_placeholder(context, tag, item.get('_id'))
@when('we post to "{url}" with success')
def step_impl_when_post_url_with_success(context, url):
post_data(context, url, True)
@when('we put to "{url}"')
def step_impl_when_put_url(context, url):
with context.app.mail.record_messages() as outbox:
data = apply_placeholders(context, context.text)
href = get_self_href(url)
context.response = context.client.put(get_prefixed_url(context.app, href), data=data, headers=context.headers)
assert_ok(context.response)
context.outbox = outbox
@when('we get "{url}"')
def when_we_get_url(context, url):
url = apply_placeholders(context, url).encode('ascii').decode('unicode-escape')
headers = []
if context.text:
for line in context.text.split('\n'):
key, val = line.split(': ')
headers.append((key, val))
headers = unique_headers(headers, context.headers)
url = apply_placeholders(context, url)
context.response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
@when('we get dictionary "{dictionary_id}"')
def when_we_get_dictionary(context, dictionary_id):
dictionary_id = apply_placeholders(context, dictionary_id)
url = '/dictionaries/' + dictionary_id + '?projection={"content": 1}'
return when_we_get_url(context, url)
@then('we get latest')
def step_impl_we_get_latest(context):
data = get_json_data(context.response)
href = get_self_href(data, context)
headers = if_match(context, data.get('_etag'))
href = get_prefixed_url(context.app, href)
context.response = context.client.get(href, headers=headers)
assert_200(context.response)
@when('we find for "{resource}" the id as "{name}" by "{search_criteria}"')
def when_we_find_for_resource_the_id_as_name_by_search_criteria(context, resource, name, search_criteria):
url = '/' + resource + '?' + search_criteria
context.response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
if context.response.status_code == 200:
expect_json_length(context.response, 1, path='_items')
item = json.loads(context.response.get_data())
item = item['_items'][0]
if item.get('_id'):
set_placeholder(context, name, item['_id'])
@when('we delete "{url}"')
def step_impl_when_delete_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
href = get_prefixed_url(context.app, href)
context.response = context.client.delete(href, headers=headers)
context.outbox = outbox
@when('we delete link "{url}"')
def step_impl_when_delete_link_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
headers = context.headers
context.response = context.client.delete(get_prefixed_url(context.app, url), headers=headers)
context.outbox = outbox
@when('we delete all sessions "{url}"')
def step_impl_when_delete_all_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
headers = context.headers
href = get_prefixed_url(context.app, url)
context.response = context.client.delete(href, headers=headers)
context.outbox = outbox
@when('we delete latest')
def when_we_delete_it(context):
with context.app.mail.record_messages() as outbox:
res = get_json_data(context.response)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
href = get_prefixed_url(context.app, href)
context.response = context.client.delete(href, headers=headers)
context.email = outbox
@when('we patch "{url}"')
def step_impl_when_patch_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
data = apply_placeholders(context, context.text)
href = get_prefixed_url(context.app, href)
context.response = context.client.patch(href, data=data, headers=headers)
context.outbox = outbox
@when('we patch latest')
def step_impl_when_patch_again(context):
with context.app.mail.record_messages() as outbox:
data = get_json_data(context.response)
href = get_prefixed_url(context.app, get_self_href(data, context))
headers = if_match(context, data.get('_etag'))
data2 = apply_placeholders(context, context.text)
context.response = context.client.patch(href, data=data2, headers=headers)
if context.response.status_code in (200, 201):
item = json.loads(context.response.get_data())
if item['_status'] == 'OK' and item.get('_id'):
setattr(context, get_resource_name(href), item)
assert_ok(context.response)
context.outbox = outbox
@when('we patch latest without assert')
def step_impl_when_patch_without_assert(context):
data = get_json_data(context.response)
href = get_prefixed_url(context.app, get_self_href(data, context))
headers = if_match(context, data.get('_etag'))
data2 = apply_placeholders(context, context.text)
context.response = context.client.patch(href, data=data2, headers=headers)
@when('we patch routing scheme "{url}"')
def step_impl_when_patch_routing_scheme(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
data = json.loads(apply_placeholders(context, context.text))
res.get('rules', []).append(data)
context.response = context.client.patch(get_prefixed_url(context.app, href),
data=json.dumps({'rules': res.get('rules', [])}),
headers=headers)
context.outbox = outbox
@when('we patch given')
def step_impl_when_patch(context):
with context.app.mail.record_messages() as outbox:
href, etag = get_it(context)
headers = if_match(context, etag)
context.response = context.client.patch(get_prefixed_url(context.app, href), data=context.text, headers=headers)
assert_ok(context.response)
context.outbox = outbox
@when('we get given')
def step_impl_when_get(context):
href, _etag = get_it(context)
context.response = context.client.get(get_prefixed_url(context.app, href), headers=context.headers)
@when('we restore version {version}')
def step_impl_when_restore_version(context, version):
data = get_json_data(context.response)
href = get_self_href(data, context)
headers = if_match(context, data.get('_etag'))
text = '{"type": "text", "old_version": %s, "last_version": %s}' % (version, data.get('_current_version'))
context.response = context.client.put(get_prefixed_url(context.app, href), data=text, headers=headers)
assert_ok(context.response)
@when('we upload a file "{filename}" to "{dest}"')
def step_impl_when_upload_image(context, filename, dest):
upload_file(context, dest, filename, 'media')
@when('we upload a binary file with cropping')
def step_impl_when_upload_with_crop(context):
data = {'CropTop': '0', 'CropLeft': '0', 'CropBottom': '333', 'CropRight': '333'}
upload_file(context, '/upload', 'bike.jpg', 'media', data)
@when('upload a file "{file_name}" to "{destination}" with "{guid}"')
def step_impl_when_upload_image_with_guid(context, file_name, destination, guid):
upload_file(context, destination, file_name, 'media', {'guid': guid})
if destination == 'archive':
set_placeholder(context, 'original.href', context.archive['renditions']['original']['href'])
set_placeholder(context, 'original.media', context.archive['renditions']['original']['media'])
@when('we upload a new dictionary with success')
def when_upload_dictionary(context):
data = json.loads(apply_placeholders(context, context.text))
upload_file(context, '/dictionaries', 'test_dict.txt', DICTIONARY_FILE, data)
assert_ok(context.response)
@when('we upload to an existing dictionary with success')
def when_upload_patch_dictionary(context):
data = json.loads(apply_placeholders(context, context.text))
url = apply_placeholders(context, '/dictionaries/#dictionaries._id#')
etag = apply_placeholders(context, '#dictionaries._etag#')
upload_file(context, url, 'test_dict2.txt', DICTIONARY_FILE, data, 'patch', [('If-Match', etag)])
assert_ok(context.response)
def upload_file(context, dest, filename, file_field, extra_data=None, method='post', user_headers=[]):
with open(get_fixture_path(context, filename), 'rb') as f:
data = {file_field: f}
if extra_data:
data.update(extra_data)
headers = [('Content-Type', 'multipart/form-data')]
headers.extend(user_headers)
headers = unique_headers(headers, context.headers)
url = get_prefixed_url(context.app, dest)
context.response = getattr(context.client, method)(url, data=data, headers=headers)
assert_ok(context.response)
store_placeholder(context, url)
@when('we upload a file from URL')
def step_impl_when_upload_from_url(context):
data = {'URL': external_url}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/upload'), data=data, headers=headers)
@when('we upload a file from URL with cropping')
def step_impl_when_upload_from_url_with_crop(context):
data = {'URL': external_url,
'CropTop': '0',
'CropLeft': '0',
'CropBottom': '333',
'CropRight': '333'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/upload'), data=data, headers=headers)
@when('we get user profile')
def step_impl_when_get_user(context):
profile_url = '/%s/%s' % ('users', context.user['_id'])
context.response = context.client.get(get_prefixed_url(context.app, profile_url), headers=context.headers)
@then('we get new resource')
def step_impl_then_get_new(context):
assert_ok(context.response)
expect_json_contains(context.response, 'self', path='_links')
if context.text is not None:
return test_json(context)
@then('we get error {code}')
def step_impl_then_get_error(context, code):
expect_status(context.response, int(code))
if context.text:
test_json(context)
@then('we get list with {total_count} items')
def step_impl_then_get_list(context, total_count):
assert_200(context.response)
data = get_json_data(context.response)
int_count = int(total_count.replace('+', '').replace('<', ''))
if '+' in total_count:
assert int_count <= data['_meta']['total'], '%d items is not enough' % data['_meta']['total']
elif total_count.startswith('<'):
assert int_count > data['_meta']['total'], '%d items is too much' % data['_meta']['total']
else:
assert int_count == data['_meta']['total'], 'got %d: %s' % (data['_meta']['total'],
format_items(data['_items']))
if context.text:
test_json(context)
@then('we get list ordered by {field} with {total_count} items')
def step_impl_ordered_list(context, field, total_count):
step_impl_then_get_list(context, total_count)
data = get_json_data(context.response)
fields = []
for i in data['_items']:
fields.append(i[field])
assert sorted(fields) == fields
@then('we get "{value}" in formatted output')
def step_impl_then_get_formatted_output(context, value):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if value in item['formatted_item']:
return
assert False
@then('we get "{value}" in formatted output as "{group}" story for subscriber "{sub}"')
def step_impl_then_get_formatted_output_as_story(context, value, group, sub):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if item['subscriber_id'] != sub:
continue
try:
formatted_data = json.loads(item['formatted_item'])
except Exception:
continue
associations = formatted_data.get('associations', {})
for assoc_group in associations:
if assoc_group.startswith(group) and associations[assoc_group].get('guid', '') == value:
return
assert False
@then('we get "{value}" as "{group}" story for subscriber "{sub}" in package "{pck}"')
def step_impl_then_get_formatted_output_pck(context, value, group, sub, pck):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if item['item_id'] != pck:
continue
if item['subscriber_id'] != sub:
continue
try:
formatted_data = json.loads(item['formatted_item'])
except Exception:
continue
associations = formatted_data.get('associations', {})
for assoc_group in associations:
if assoc_group.startswith(group) and associations[assoc_group].get('guid', '') == value:
return
assert False
@then('we get "{value}" as "{group}" story for subscriber "{sub}" not in package "{pck}" version "{v}"')
def step_impl_then_get_formatted_output_pck_version(context, value, group, sub, pck, v):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if item['item_id'] == pck:
if item['subscriber_id'] == sub and str(item['item_version']) == v:
try:
formatted_data = json.loads(item['formatted_item'])
except Exception:
continue
associations = formatted_data.get('associations', {})
for assoc_group in associations:
if assoc_group.startswith(group) \
and associations[assoc_group].get('guid', '') == value:
assert False
assert True
return
assert False
@then('we get "{value}" in formatted output as "{group}" newsml12 story')
def step_impl_then_get_formatted_output_newsml(context, value, group):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if '<' + group + '>' + value + '</' + group + '>' in item['formatted_item']:
return
assert False
@then('we get no "{field}"')
def step_impl_then_get_nofield(context, field):
assert_200(context.response)
expect_json_not_contains(context.response, field)
@then('expect json in "{path}"')
def step_impl_then_get_nofield_in_path(context, path):
assert_200(context.response)
expect_json(context.response, context.text, path)
@then('we get existing resource')
def step_impl_then_get_existing(context):
assert_200(context.response)
test_json(context)
@then('we get existing saved search')
def step_impl_then_get_existing_saved_search(context):
assert_200(context.response)
test_json_with_string_field_value(context, 'filter')
@then('we get OK response')
def step_impl_then_get_ok(context):
assert_200(context.response)
@then('we get response code {code}')
def step_impl_then_get_code(context, code):
expect_status(context.response, int(code))
@then('we get updated response')
def step_impl_then_get_updated(context):
assert_ok(context.response)
if context.text:
test_json(context)
@then('we get "{key}" in "{url}"')
def step_impl_then_get_key_in_url(context, key, url):
url = apply_placeholders(context, url)
res = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
assert_200(res)
expect_json_contains(res, key)
@then('we get file metadata')
def step_impl_then_get_file_meta(context):
assert len(
json.loads(apply_path(
parse_json_response(context.response),
'filemeta_json'
)).items()
) > 0
'expected non empty metadata dictionary'
@then('we get "{filename}" metadata')
def step_impl_then_get_given_file_meta(context, filename):
if filename == 'bike.jpg':
metadata = {
'ycbcrpositioning': 1,
'imagelength': 2448,
'exifimagewidth': 2448,
'meteringmode': 2,
'datetimedigitized': '2013:08:01 16:19:28',
'exposuremode': 0,
'flashpixversion': '0100',
'isospeedratings': 80,
'length': 469900,
'imageuniqueid': 'f3533c05daef2debe6257fd99e058eec',
'datetimeoriginal': '2013:08:01 16:19:28',
'whitebalance': 0,
'exposureprogram': 3,
'colorspace': 1,
'exifimageheight': 3264,
'software': 'Google',
'resolutionunit': 2,
'make': 'SAMSUNG',
'maxaperturevalue': [276, 100],
'aperturevalue': [276, 100],
'scenecapturetype': 0,
'exposuretime': [1, 2004],
'datetime': '2013:08:01 16:19:28',
'exifoffset': 216,
'yresolution': [72, 1],
'orientation': 1,
'componentsconfiguration': '0000',
'exifversion': '0220',
'focallength': [37, 10],
'flash': 0,
'model': 'GT-I9300',
'xresolution': [72, 1],
'fnumber': [26, 10],
'imagewidth': 3264,
'brightnessvalue': [2362, 256],
'exposurebiasvalue': [0, 10],
'shutterspeedvalue': [2808, 256]
}
elif filename == 'green.ogg':
metadata = {
'producer': 'Lavf54.59.103',
'music_genre': 'New Age',
'sample_rate': '44100',
'artist': 'Maxime Abbey',
'length': 368058,
'bit_rate': '160000',
'title': 'Green Hills',
'mime_type': 'audio/vorbis',
'format_version': 'Vorbis version 0',
'compression': 'Vorbis',
'duration': '0:00:20.088163',
'endian': 'Little endian',
'nb_channel': '2'
}
elif filename == 'this_week_nasa.mp4':
metadata = {
'mime_type': 'video/mp4',
'creation_date': '1904-01-01T00:00:00+00:00',
'duration': '0:00:10.224000',
'width': '480',
'length': 877869,
'comment': 'User volume: 100.0%',
'height': '270',
'endian': 'Big endian',
'last_modification': '1904-01-01T00:00:00+00:00'
}
else:
raise NotImplementedError("No metadata for file '{}'.".format(filename))
assertions.maxDiff = None
data = json.loads(context.response.get_data())
filemeta = get_filemeta(data)
json_match(filemeta, metadata)
@then('we get "{type}" renditions')
def step_impl_then_get_renditions(context, type):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
for rend_name in context.app.config['RENDITIONS'][type]:
desc = renditions[rend_name]
assert isinstance(desc, dict), 'expected dict for rendition description'
assert 'href' in desc, 'expected href in rendition description'
assert 'media' in desc, 'expected media identifier in rendition description'
we_can_fetch_a_file(context, desc['href'], 'image/jpeg')
@then('we get "{crop_name}" in renditions')
def step_impl_then_get_renditions(context, crop_name):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
desc = renditions[crop_name]
assert isinstance(desc, dict), 'expected dict for rendition description'
assert 'href' in desc, 'expected href in rendition description'
assert 'media' in desc, 'expected media identifier in rendition description'
we_can_fetch_a_file(context, desc['href'], 'image/jpeg')
@then('we get "{crop_name}" not in renditions')
def step_impl_then_get_renditions(context, crop_name):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
assert crop_name not in renditions, 'expected crop not in renditions'
@then('item "{item_id}" is unlocked')
def then_item_is_unlocked(context, item_id):
assert_200(context.response)
data = json.loads(context.response.get_data())
assert data.get('lock_user', None) is None, 'item is locked by user #{0}'.format(data.get('lock_user'))
@then('item "{item_id}" is locked')
def then_item_is_locked(context, item_id):
assert_200(context.response)
resp = parse_json_response(context.response)
assert resp['lock_user'] is not None
@then('item "{item_id}" is assigned')
def then_item_is_assigned(context, item_id):
resp = parse_json_response(context.response)
assert resp['task'].get('user', None) is not None, 'item is not assigned'
@then('we get rendition "{name}" with mimetype "{mimetype}"')
def step_impl_then_get_rendition_with_mimetype(context, name, mimetype):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
desc = renditions[name]
assert isinstance(desc, dict), 'expected dict for rendition description'
assert 'href' in desc, 'expected href in rendition description'
we_can_fetch_a_file(context, desc['href'], mimetype)
set_placeholder(context, "rendition.{}.href".format(name), desc['href'])
@when('we get updated media from archive')
def get_updated_media_from_archive(context):
url = 'archive/%s' % context._id
when_we_get_url(context, url)
assert_200(context.response)
@then('baseImage rendition is updated')
def check_base_image_rendition(context):
check_rendition(context, 'baseImage')
@then('original rendition is updated with link to file having mimetype "{mimetype}"')
def check_original_rendition(context, mimetype):
rv = parse_json_response(context.response)
link_to_file = rv['renditions']['original']['href']
assert link_to_file
we_can_fetch_a_file(context, link_to_file, mimetype)
@then('thumbnail rendition is updated')
def check_thumbnail_rendition(context):
check_rendition(context, 'thumbnail')
def check_rendition(context, rendition_name):
rv = parse_json_response(context.response)
assert rv['renditions'][rendition_name] != context.renditions[rendition_name], rv['renditions']
@then('we get "{key}"')
def step_impl_then_get_key(context, key):
assert_200(context.response)
expect_json_contains(context.response, key)
item = json.loads(context.response.get_data())
set_placeholder(context, '%s' % key, item[key])
@then('we store "{key}" with value "{value}" to context')
def step_impl_then_we_store_key_value_to_context(context, key, value):
set_placeholder(context, key, apply_placeholders(context, value))
@then('we get action in user activity')
def step_impl_then_get_action(context):
response = context.client.get(get_prefixed_url(context.app, '/activity'), headers=context.headers)
expect_json_contains(response, '_items')
@then('we get a file reference')
def step_impl_then_get_file(context):
assert_200(context.response)
expect_json_contains(context.response, 'renditions')
data = get_json_data(context.response)
url = '/upload/%s' % data['_id']
headers = [('Accept', 'application/json')]
headers = unique_headers(headers, context.headers)
response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
assert_200(response)
assert len(response.get_data()), response
assert response.mimetype == 'application/json', response.mimetype
expect_json_contains(response, 'renditions')
expect_json_contains(response, {'mimetype': 'image/jpeg'})
fetched_data = get_json_data(context.response)
context.fetched_data = fetched_data
@then('we get cropped data smaller than "{max_size}"')
def step_impl_then_get_cropped_file(context, max_size):
assert int(get_filemeta(context.fetched_data, 'length')) < int(max_size), 'was expecting smaller image'
@then('we can fetch a data_uri')
def step_impl_we_fetch_data_uri(context):
we_can_fetch_a_file(context, context.fetched_data['renditions']['original']['href'], 'image/jpeg')
@then('we fetch a file "{url}"')
def step_impl_we_cannot_fetch_file(context, url):
url = apply_placeholders(context, url)
headers = [('Accept', 'application/json')]
headers = unique_headers(headers, context.headers)
context.response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
def we_can_fetch_a_file(context, url, mimetype):
headers = [('Accept', 'application/json')]
headers = unique_headers(headers, context.headers)
response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
assert_200(response)
assert len(response.get_data()), response
assert response.mimetype == mimetype, response.mimetype
@then('we can delete that file')
def step_impl_we_delete_file(context):
url = '/upload/%s' % context.fetched_data['_id']
context.headers.append(('Accept', 'application/json'))
headers = if_match(context, context.fetched_data.get('_etag'))
response = context.client.delete(get_prefixed_url(context.app, url), headers=headers)
assert_200(response)
response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
assert_404(response)
@then('we get a picture url')
def step_impl_then_get_picture(context):
assert_ok(context.response)
expect_json_contains(context.response, 'picture_url')
@then('we get aggregations "{keys}"')
def step_impl_then_get_aggs(context, keys):
assert_200(context.response)
expect_json_contains(context.response, '_aggregations')
data = get_json_data(context.response)
aggs = data['_aggregations']
for key in keys.split(','):
assert_in(key, aggs)
@then('the file is stored localy')
def step_impl_then_file(context):
assert_200(context.response)
folder = context.app.config['UPLOAD_FOLDER']
assert os.path.exists(os.path.join(folder, context.filename))
@then('we get version {version}')
def step_impl_then_get_version(context, version):
assert_200(context.response)
expect_json_contains(context.response, {'_current_version': int(version)})
@then('the field "{field}" value is "{value}"')
def step_impl_then_get_field_value(context, field, value):
assert_200(context.response)
expect_json_contains(context.response, {field: value})
@then('we get etag matching "{url}"')
def step_impl_then_get_etag(context, url):
if context.app.config['IF_MATCH']:
assert_200(context.response)
expect_json_contains(context.response, '_etag')
etag = get_json_data(context.response).get('_etag')
response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
expect_json_contains(response, {'_etag': etag})
@then('we get not modified response')
def step_impl_then_not_modified(context):
expect_status(context.response, 304)
@then('we get "{header}" header')
def step_impl_then_get_header(context, header):
expect_headers_contain(context.response, header)
@then('we get "{header}" header with "{type}" type')
def step_impl_then_get_header_with_type(context, header, type):
expect_headers_contain(context.response, header, type)
@then('we get link to "{resource}"')
def then_we_get_link_to_resource(context, resource):
doc = get_json_data(context.response)
self_link = doc.get('_links').get('self')
assert resource in self_link['href'], 'expect link to "%s", got %s' % (resource, self_link)
@then('we get deleted response')
def then_we_get_deleted_response(context):
assert_200(context.response)
@when('we post to reset_password we get email with token')
def we_post_to_reset_password(context):
data = {'email': '[email protected]'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
with context.app.mail.record_messages() as outbox:
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (200, 201))
assert len(outbox) == 1
assert outbox[0].subject == "Reset password"
email_text = outbox[0].body
assert "24" in email_text
words = email_text.split()
url = urlparse(words[words.index("link") + 1])
token = url.fragment.split('token=')[-1]
assert token
context.token = token
@then('we can check if token is valid')
def we_can_check_token_is_valid(context):
data = {'token': context.token}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (200, 201))
@then('we update token to be expired')
def we_update_token_to_expired(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
expiry = utc.utcnow() - timedelta(days=2)
reset_request = get_resource_service('reset_user_password').find_one(req=None, token=context.token)
reset_request['expire_time'] = expiry
id = reset_request.pop('_id')
get_resource_service('reset_user_password').patch(id, reset_request)
@then('token is invalid')
def check_token_invalid(context):
data = {'token': context.token}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (403, 401))
@when('we post to reset_password we do not get email with token')
def we_post_to_reset_password_it_fails(context):
data = {'email': '[email protected]'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
with context.app.mail.record_messages() as outbox:
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (200, 201))
assert len(outbox) == 0
def start_reset_password_for_user(context):
data = {'token': context.token, 'password': 'test_pass'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
@then('we fail to reset password for user')
def we_fail_to_reset_password_for_user(context):
start_reset_password_for_user(context)
step_impl_then_get_error(context, 403)
@then('we reset password for user')
def we_reset_password_for_user(context):
start_reset_password_for_user(context)
expect_status_in(context.response, (200, 201))
auth_data = {'username': 'foo', 'password': 'test_pass'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/auth_db'), data=auth_data, headers=headers)
expect_status_in(context.response, (200, 201))
@when('we switch user')
def when_we_switch_user(context):
user = {'username': 'test-user-2', 'password': 'pwd', 'is_active': True,
'needs_activation': False, 'sign_off': 'foo'}
tests.setup_auth_user(context, user)
set_placeholder(context, 'USERS_ID', str(context.user['_id']))
@when('we setup test user')
def when_we_setup_test_user(context):
tests.setup_auth_user(context, tests.test_user)
@when('we get my "{url}"')
def when_we_get_my_url(context, url):
user_id = str(context.user.get('_id'))
my_url = '{0}?where={1}'.format(url, json.dumps({'user': user_id}))
return when_we_get_url(context, my_url)
@when('we get user "{resource}"')
def when_we_get_user_resource(context, resource):
url = '/users/{0}/{1}'.format(str(context.user.get('_id')), resource)
return when_we_get_url(context, url)
@then('we get embedded items')
def we_get_embedded_items(context):
response_data = json.loads(context.response.get_data())
href = get_self_href(response_data, context)
url = href + '/?embedded={"items": 1}'
context.response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
assert_200(context.response)
context.response_data = json.loads(context.response.get_data())
assert len(context.response_data['items']['view_items']) == 2
@when('we reset notifications')
def step_when_we_reset_notifications(context):
context.app.notification_client.reset()
@then('we get notifications')
def then_we_get_notifications(context):
assert hasattr(context.app.notification_client, 'messages'), 'no messages'
notifications = context.app.notification_client.messages
notifications_data = [json.loads(notification) for notification in notifications]
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data, notifications_data), True,
msg=str(context_data) + '\n != \n' + str(notifications_data))
@then('we get default preferences')
def get_default_prefs(context):
response_data = json.loads(context.response.get_data())
assert_equal(response_data['user_preferences'], default_user_preferences)
@when('we spike "{item_id}"')
def step_impl_when_spike_url(context, item_id):
item_id = apply_placeholders(context, item_id)
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/spike/' + item_id),
data='{"state": "spiked"}', headers=headers)
@when('we spike fetched item')
def step_impl_when_spike_fetched_item(context):
data = json.loads(apply_placeholders(context, context.text))
item_id = data["_id"]
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/spike/' + item_id),
data='{"state": "spiked"}', headers=headers)
@when('we unspike "{item_id}"')
def step_impl_when_unspike_url(context, item_id):
item_id = apply_placeholders(context, item_id)
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/unspike/' + item_id),
data=apply_placeholders(context, context.text or '{}'), headers=headers)
@then('we get spiked content "{item_id}"')
def get_spiked_content(context, item_id):
item_id = apply_placeholders(context, item_id)
url = 'archive/{0}'.format(item_id)
when_we_get_url(context, url)
assert_200(context.response)
response_data = json.loads(context.response.get_data())
assert_equal(response_data['state'], 'spiked')
assert_equal(response_data['operation'], 'spike')
@then('we get unspiked content "{id}"')
def get_unspiked_content(context, id):
text = context.text
context.text = ''
url = 'archive/{0}'.format(id)
when_we_get_url(context, url)
assert_200(context.response)
response_data = json.loads(context.response.get_data())
assert_equal(response_data['state'], 'draft')
assert_equal(response_data['operation'], 'unspike')
# Tolga Akin (05/11/14)
# Expiry value doesn't get set to None properly in Elastic.
# Discussed with Petr so we'll look into this later
# assert_equal(response_data['expiry'], None)
if text:
assert json_match(json.loads(apply_placeholders(context, text)), response_data)
@then('we get global content expiry')
def get_global_content_expiry(context):
validate_expired_content(context, context.app.config['CONTENT_EXPIRY_MINUTES'], utcnow())
@then('we get content expiry {minutes}')
def get_content_expiry(context, minutes):
validate_expired_content(context, minutes, utcnow())
@then('we get expiry for schedule and embargo content {minutes} minutes after "{future_date}"')
def get_content_expiry_schedule(context, minutes, future_date):
future_date = parse_date(apply_placeholders(context, future_date))
validate_expired_content(context, minutes, future_date)
@then('we get desk spike expiry after "{test_minutes}"')
def get_desk_spike_expiry(context, test_minutes):
validate_expired_content(context, test_minutes, utcnow())
def validate_expired_content(context, minutes, start_datetime):
response_data = json.loads(context.response.get_data())
assert response_data['expiry']
response_expiry = parse_date(response_data['expiry'])
expiry = start_datetime + timedelta(minutes=int(minutes))
assert response_expiry <= expiry
@when('we mention user in comment for "{url}"')
def we_mention_user_in_comment(context, url):
with context.app.mail.record_messages() as outbox:
step_impl_when_post_url(context, url)
assert len(outbox) == 1
assert_equal(outbox[0].subject, "You were mentioned in a comment by test_user")
email_text = outbox[0].body
assert email_text
@when('we change user status to "{status}" using "{url}"')
def we_change_user_status(context, status, url):
with context.app.mail.record_messages() as outbox:
step_impl_when_patch_url(context, url)
assert len(outbox) == 1
assert_equal(outbox[0].subject, "Your Superdesk account is " + status)
assert outbox[0].body
@when('we get the default incoming stage')
def we_get_default_incoming_stage(context):
data = json.loads(context.response.get_data())
incoming_stage = data['_items'][0]['incoming_stage'] if '_items' in data else data['incoming_stage']
assert incoming_stage
url = 'stages/{0}'.format(incoming_stage)
when_we_get_url(context, url)
assert_200(context.response)
data = json.loads(context.response.get_data())
assert data['default_incoming'] is True
assert data['name'] == 'Incoming Stage'
@then('we get stage filled in to default_incoming')
def we_get_stage_filled_in(context):
data = json.loads(context.response.get_data())
assert data['task']['stage']
@given('we have sessions "{url}"')
def we_have_sessions_get_id(context, url):
when_we_get_url(context, url)
item = json.loads(context.response.get_data())
context.session_id = item['_items'][0]['_id']
context.data = item
set_placeholder(context, 'SESSION_ID', item['_items'][0]['_id'])
setattr(context, 'users', item['_items'][0]['user'])
@then('we get session by id')
def we_get_session_by_id(context):
url = 'sessions/' + context.session_id
when_we_get_url(context, url)
item = json.loads(context.response.get_data())
returned_id = item["_id"]
assert context.session_id == returned_id
@then('we delete session by id')
def we_delete_session_by_id(context):
url = 'sessions/' + context.session_id
step_impl_when_delete_url(context, url)
assert_200(context.response)
@when('we create a new user')
def step_create_a_user(context):
data = apply_placeholders(context, context.text)
with context.app.mail.record_messages() as outbox:
context.response = context.client.post(get_prefixed_url(context.app, '/users'),
data=data, headers=context.headers)
expect_status_in(context.response, (200, 201))
assert len(outbox) == 1
context.email = outbox[0]
@then('we get activation email')
def step_get_activation_email(context):
assert context.email.subject == 'Superdesk account created'
email_text = context.email.body
words = email_text.split()
url = urlparse(words[words.index("to") + 1])
token = url.fragment.split('token=')[-1]
assert token
@then('we set elastic limit')
def step_set_limit(context):
context.app.settings['MAX_SEARCH_DEPTH'] = 1
@then('we get emails')
def step_we_get_email(context):
data = json.loads(context.text)
for email in data:
assert check_if_email_sent(context, email)
@then('we get {count} emails')
def step_we_get_no_email(context, count):
assert len(context.outbox) == int(count)
if context.text:
step_we_get_email(context)
def check_if_email_sent(context, spec):
if context.outbox:
for key in spec:
found = False
values = [getattr(email, key) for email in context.outbox]
for value in values:
if spec[key] in value:
found = True
if not found:
print('%s:%s not found in %s' % (key, spec[key], json.dumps(values, indent=2)))
return False
return True
print('no email sent')
return False
@then('we get activity')
def then_we_get_activity(context):
url = apply_placeholders(context, '/activity?where={"name": {"$in": ["notify", "user:mention" , "desk:mention"]}}')
context.response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
if context.response.status_code == 200:
expect_json_length(context.response, 1, path='_items')
item = json.loads(context.response.get_data())
item = item['_items'][0]
if item.get('_id'):
setattr(context, 'activity', item)
set_placeholder(context, 'USERS_ID', item['user'])
def login_as(context, username, password, user_type):
user = {'username': username, 'password': password, 'is_active': True,
'is_enabled': True, 'needs_activation': False, user_type: user_type}
if context.text:
user.update(json.loads(context.text))
tests.setup_auth_user(context, user)
@given('we login as user "{username}" with password "{password}" and user type "{user_type}"')
def given_we_login_as_user(context, username, password, user_type):
login_as(context, username, password, user_type)
@when('we login as user "{username}" with password "{password}" and user type "{user_type}"')
def when_we_login_as_user(context, username, password, user_type):
login_as(context, username, password, user_type)
def is_user_resource(resource):
return resource in ('users', '/users')
@then('we get {no_of_stages} invisible stages')
def when_we_get_invisible_stages(context, no_of_stages):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
stages = get_resource_service('stages').get_stages_by_visibility(is_visible=False)
assert len(stages) == int(no_of_stages)
@then('we get {no_of_stages} visible stages')
def when_we_get_visible_stages(context, no_of_stages):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
stages = get_resource_service('stages').get_stages_by_visibility(is_visible=True)
assert len(stages) == int(no_of_stages)
@then('we get {no_of_stages} invisible stages for user')
def when_we_get_invisible_stages_for_user(context, no_of_stages):
data = json.loads(apply_placeholders(context, context.text))
with context.app.test_request_context(context.app.config['URL_PREFIX']):
stages = get_resource_service('users').get_invisible_stages(data['user'])
assert len(stages) == int(no_of_stages)
@then('we get "{field_name}" populated')
def then_field_is_populated(context, field_name):
resp = parse_json_response(context.response)
assert resp[field_name].get('user', None) is not None, 'item is not populated'
@then('we get "{field_name}" not populated')
def then_field_is_not_populated(context, field_name):
resp = parse_json_response(context.response)
assert resp[field_name] is None, 'item is not populated'
@then('the field "{field_name}" value is not "{field_value}"')
def then_field_value_is_not_same(context, field_name, field_value):
resp = parse_json_response(context.response)
assert resp[field_name] != field_value, 'values are the same'
@then('we get "{field_name}" not populated in results')
def then_field_is_not_populated_in_results(context, field_name):
resps = parse_json_response(context.response)
for resp in resps['_items']:
assert resp[field_name] is None, 'item is not populated'
@when('we delete content filter "{name}"')
def step_delete_content_filter(context, name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
filter = get_resource_service('content_filters').find_one(req=None, name=name)
url = '/content_filters/{}'.format(filter['_id'])
headers = if_match(context, filter.get('_etag'))
context.response = context.client.delete(get_prefixed_url(context.app, url), headers=headers)
@when('we rewrite "{item_id}"')
def step_impl_when_rewrite(context, item_id):
context_data = {}
_id = apply_placeholders(context, item_id)
if context.text:
context_data.update(json.loads(apply_placeholders(context, context.text)))
data = json.dumps(context_data)
context.response = context.client.post(
get_prefixed_url(context.app, '/archive/{}/rewrite'.format(_id)),
data=data, headers=context.headers)
if context.response.status_code == 400:
return
resp = parse_json_response(context.response)
set_placeholder(context, 'REWRITE_OF', _id)
set_placeholder(context, 'REWRITE_ID', resp['_id'])
@then('we get "{field_name}" does not exist')
def then_field_is_not_populated_in_results(context, field_name):
resps = parse_json_response(context.response)
if '_items' in resps:
for resp in resps['_items']:
assert field_name not in resp, 'field exists'
else:
assert field_name not in resps, 'field exists'
@then('we get "{field_name}" does exist')
def then_field_is_not_populated_in_results(context, field_name):
resps = parse_json_response(context.response)
for resp in resps['_items']:
assert field_name in resp, 'field does not exist'
@when('we publish "{item_id}" with "{pub_type}" type and "{state}" state')
def step_impl_when_publish_url(context, item_id, pub_type, state):
item_id = apply_placeholders(context, item_id)
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context_data = {"state": state}
if context.text:
data = apply_placeholders(context, context.text)
context_data.update(json.loads(data))
data = json.dumps(context_data)
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/{}/{}'.format(pub_type, item_id)),
data=data, headers=headers)
store_placeholder(context, 'archive_{}'.format(pub_type))
@then('the ingest item is routed based on routing scheme and rule "{rule_name}"')
def then_ingest_item_is_routed_based_on_routing_scheme(context, rule_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
validate_routed_item(context, rule_name, True)
@then('the ingest item is routed and transformed based on routing scheme and rule "{rule_name}"')
def then_ingest_item_is_routed_transformed_based_on_routing_scheme(context, rule_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
validate_routed_item(context, rule_name, True, True)
@then('the ingest item is not routed based on routing scheme and rule "{rule_name}"')
def then_ingest_item_is_not_routed_based_on_routing_scheme(context, rule_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
validate_routed_item(context, rule_name, False)
def validate_routed_item(context, rule_name, is_routed, is_transformed=False):
data = json.loads(apply_placeholders(context, context.text))
def validate_rule(action, state):
for destination in rule.get('actions', {}).get(action, []):
query = {
'and': [
{'term': {'ingest_id': str(data['ingest'])}},
{'term': {'task.desk': str(destination['desk'])}},
{'term': {'task.stage': str(destination['stage'])}},
{'term': {'state': state}}
]
}
item = get_archive_items(query) + get_published_items(query)
if is_routed:
assert len(item) > 0, 'No routed items found for criteria: ' + str(query)
assert item[0]['ingest_id'] == data['ingest']
assert item[0]['task']['desk'] == str(destination['desk'])
assert item[0]['task']['stage'] == str(destination['stage'])
assert item[0]['state'] == state
if is_transformed:
assert item[0]['abstract'] == 'Abstract has been updated'
assert_items_in_package(item[0], state, str(destination['desk']), str(destination['stage']))
else:
assert len(item) == 0
scheme = get_resource_service('routing_schemes').find_one(_id=data['routing_scheme'], req=None)
rule = next((rule for rule in scheme['rules'] if rule['name'].lower() == rule_name.lower()), {})
validate_rule('fetch', 'routed')
validate_rule('publish', 'published')
@when('we schedule the routing scheme "{scheme_id}"')
def when_we_schedule_the_routing_scheme(context, scheme_id):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
scheme_id = apply_placeholders(context, scheme_id)
url = apply_placeholders(context, 'routing_schemes/%s' % scheme_id)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
rule = res.get('rules')[0]
now = utcnow()
from apps.rules.routing_rules import Weekdays
rule['schedule'] = {
'day_of_week': [
Weekdays.dayname(now + timedelta(days=1)),
Weekdays.dayname(now + timedelta(days=2))
],
'hour_of_day_from': '16:00:00',
'hour_of_day_to': '20:00:00'
}
if len(res.get('rules')) > 1:
rule = res.get('rules')[1]
rule['schedule'] = {
'day_of_week': [Weekdays.dayname(now)]
}
context.response = context.client.patch(get_prefixed_url(context.app, href),
data=json.dumps({'rules': res.get('rules', [])}),
headers=headers)
assert_200(context.response)
def get_archive_items(query):
req = ParsedRequest()
req.max_results = 100
req.args = {'filter': json.dumps(query)}
return list(get_resource_service('archive').get(lookup=None, req=req))
def get_published_items(query):
req = ParsedRequest()
req.max_results = 100
req.args = {'filter': json.dumps(query)}
return list(get_resource_service('published').get(lookup=None, req=req))
def assert_items_in_package(item, state, desk, stage):
if item.get('groups'):
terms = [{'term': {'_id': ref.get('residRef')}}
for ref in [ref for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]]
query = {'or': terms}
items = get_archive_items(query)
assert len(items) == len(terms)
for item in items:
assert item.get('state') == state
assert item.get('task', {}).get('desk') == desk
assert item.get('task', {}).get('stage') == stage
@given('I logout')
def logout(context):
we_have_sessions_get_id(context, '/sessions')
step_impl_when_delete_url(context, '/auth_db/{}'.format(context.session_id))
assert_200(context.response)
@then('we get "{url}" and match')
def we_get_and_match(context, url):
url = apply_placeholders(context, url)
response_data = get_res(url, context)
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data, response_data), True,
msg=str(context_data) + '\n != \n' + str(response_data))
@then('there is no "{key}" in response')
def there_is_no_key_in_response(context, key):
data = get_json_data(context.response)
assert key not in data, 'key "%s" is in %s' % (key, data)
@then('there is no "{key}" in task')
def there_is_no_key_in_preferences(context, key):
data = get_json_data(context.response)['task']
assert key not in data, 'key "%s" is in task' % key
@then('there is no "{key}" in data')
def there_is_no_profile_in_data(context, key):
data = get_json_data(context.response)['_items'][0]['data']
assert key not in data, 'key "%s" is in data' % key
@then('broadcast "{key}" has value "{value}"')
def broadcast_key_has_value(context, key, value):
data = get_json_data(context.response).get('broadcast', {})
value = apply_placeholders(context, value)
if value.lower() == 'none':
assert data[key] is None, 'key "%s" is not none and has value "%s"' % (key, data[key])
else:
assert data[key] == value, 'key "%s" does not have valid value "%s"' % (key, data[key])
@then('there is no "{key}" preference')
def there_is_no_preference(context, key):
data = get_json_data(context.response)
assert key not in data['user_preferences'], '%s is in %s' % (key, data['user_preferences'].keys())
@then('there is no "{key}" in "{namespace}" preferences')
def there_is_no_key_in_namespace_preferences(context, key, namespace):
data = get_json_data(context.response)['user_preferences']
assert key not in data[namespace], 'key "%s" is in %s' % (key, data[namespace])
@then('we check if article has Embargo')
def step_impl_then_check_embargo(context):
assert_200(context.response)
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
if response_data.get('_meta') and response_data.get('_items'):
for item in response_data.get('_items'):
assert_embargo(context, item)
else:
assert_embargo(context, response_data)
def assert_embargo(context, item):
if not item.get('embargo'):
fail_and_print_body(context, context.response, 'Embargo not found')
@when('embargo lapses for "{item_id}"')
def embargo_lapses(context, item_id):
item_id = apply_placeholders(context, item_id)
item = get_res("/archive/%s" % item_id, context)
updates = {'embargo': (utcnow() - timedelta(minutes=10)),
'schedule_settings': {'utc_embargo': (utcnow() - timedelta(minutes=10))}}
with context.app.test_request_context(context.app.config['URL_PREFIX']):
get_resource_service('archive').system_update(id=item['_id'], original=item, updates=updates)
@then('we validate the published item expiry to be after publish expiry set in desk settings {publish_expiry_in_desk}')
def validate_published_item_expiry(context, publish_expiry_in_desk):
assert_200(context.response)
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
if response_data.get('_meta') and response_data.get('_items'):
for item in response_data.get('_items'):
assert_expiry(item, publish_expiry_in_desk)
else:
assert_expiry(response_data, publish_expiry_in_desk)
@then('we get updated timestamp "{field}"')
def step_we_get_updated_timestamp(context, field):
data = get_json_data(context.response)
timestamp = arrow.get(data[field])
now = utcnow()
assert timestamp + timedelta(seconds=5) > now, 'timestamp < now (%s, %s)' % (timestamp, now) # 5s tolerance
def assert_expiry(item, publish_expiry_in_desk):
embargo = item.get('embargo')
actual = parse_date(item.get('expiry'))
error_message = 'Published Item Expiry validation fails'
publish_expiry_in_desk = int(publish_expiry_in_desk)
if embargo:
expected = get_expiry_date(minutes=publish_expiry_in_desk,
offset=datetime.strptime(embargo, '%Y-%m-%dT%H:%M:%S%z'))
if actual != expected:
raise WooperAssertionError("{}. Expected: {}, Actual: {}".format(error_message, expected, actual))
else:
expected = get_expiry_date(minutes=publish_expiry_in_desk)
if expected < actual:
raise WooperAssertionError("{}. Expected: {}, Actual: {}".format(error_message, expected, actual))
@when('run import legal publish queue')
def run_import_legal_publish_queue(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
from apps.legal_archive import ImportLegalPublishQueueCommand
ImportLegalPublishQueueCommand().run()
@when('we expire items')
def expire_content(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
ids = json.loads(apply_placeholders(context, context.text))
expiry = utcnow() - timedelta(minutes=5)
for item_id in ids:
original = get_resource_service('archive').find_one(req=None, _id=item_id)
get_resource_service('archive').system_update(item_id, {'expiry': expiry}, original)
get_resource_service('published').update_published_items(item_id, 'expiry', expiry)
from apps.archive.commands import RemoveExpiredContent
RemoveExpiredContent().run()
@when('the publish schedule lapses')
def run_overdue_schedule_jobs(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
ids = json.loads(apply_placeholders(context, context.text))
lapse_time = utcnow() - timedelta(minutes=5)
updates = {
'publish_schedule': lapse_time,
'schedule_settings': {
'utc_publish_schedule': lapse_time,
'time_zone': None
}
}
for item_id in ids:
original = get_resource_service('archive').find_one(req=None, _id=item_id)
get_resource_service('archive').system_update(item_id, updates, original)
get_resource_service('published').update_published_items(item_id, 'publish_schedule', lapse_time)
get_resource_service('published').update_published_items(item_id, 'schedule_settings.utc_publish_schedule',
lapse_time)
@when('we transmit items')
def expire_content(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
from superdesk.publish.publish_content import PublishContent
PublishContent().run()
@when('we remove item "{_id}" from mongo')
def remove_item_from_mongo(context, _id):
with context.app.app_context():
context.app.data.mongo.remove('archive', {'_id': _id})
@then('we get text "{text}" in response field "{field}"')
def we_get_text_in_field(context, text, field):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
resp = parse_json_response(context.response)
assert field in resp, 'Field {} not found in response.'.format(field)
assert isinstance(resp.get(field), str), 'Invalid type'
assert text in resp.get(field, ''), '{} contains text: {}. Text To find: {}'.format(field,
resp.get(field, ''),
text)
@then('we reset priority flag for updated articles')
def we_get_reset_default_priority_for_updated_articles(context):
context.app.config['RESET_PRIORITY_VALUE_FOR_UPDATE_ARTICLES'] = True
@then('we mark the items not moved to legal')
def we_mark_the_items_not_moved_to_legal(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
ids = json.loads(apply_placeholders(context, context.text))
for item_id in ids:
get_resource_service('published').update_published_items(item_id, 'moved_to_legal', False)
@when('we run import legal archive command')
def we_run_import_legal_archive_command(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
from apps.legal_archive.commands import ImportLegalArchiveCommand
ImportLegalArchiveCommand().run()
@then('we find no reference of package "{reference}" in item')
def we_find_no_reference_of_package_in_item(context, reference):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
reference = apply_placeholders(context, reference)
resp = parse_json_response(context.response)
linked_in_packages = resp.get('linked_in_packages', [])
assert reference not in [p.get('package') for p in linked_in_packages], \
'Package reference {} found in item'.format(reference)
@then('we set spike exipry "{expiry}"')
def we_set_spike_exipry(context, expiry):
context.app.settings['SPIKE_EXPIRY_MINUTES'] = int(expiry)
@then('we set published item expiry {expiry}')
def we_set_published_item_expiry(context, expiry):
context.app.settings['PUBLISHED_CONTENT_EXPIRY_MINUTES'] = int(expiry)
@then('we set copy metadata from parent flag')
def we_set_copy_metadata_from_parent(context):
context.app.settings['COPY_METADATA_FROM_PARENT'] = True
@then('we assert the content api item "{item_id}" is published to subscriber "{subscriber}"')
def we_assert_content_api_item_is_published_to_subscriber(context, item_id, subscriber):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
subscriber = apply_placeholders(context, subscriber)
req = ParsedRequest()
req.projection = json.dumps({'subscribers': 1})
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
subscriber = apply_placeholders(context, subscriber)
assert len(item.get('subscribers', [])) > 0, 'No subscribers found.'
assert subscriber in item.get('subscribers', []), 'Subscriber with Id: {} not found.'.format(subscriber)
@then('we assert the content api item "{item_id}" is not published to subscriber "{subscriber}"')
def we_assert_content_api_item_is_not_published_to_subscriber(context, item_id, subscriber):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
subscriber = apply_placeholders(context, subscriber)
req = ParsedRequest()
req.projection = json.dumps({'subscribers': 1})
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
subscriber = apply_placeholders(context, subscriber)
assert subscriber not in item.get('subscribers', []), \
'Subscriber with Id: {} found for the item. '.format(subscriber)
@then('we assert the content api item "{item_id}" is not published to any subscribers')
def we_assert_content_api_item_is_not_published(context, item_id):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
req = ParsedRequest()
req.projection = json.dumps({'subscribers': 1})
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
assert len(item.get('subscribers', [])) == 0, \
'Item published to subscribers {}.'.format(item.get('subscribers', []))
@then('we ensure that archived schema extra fields are not present in duplicated item')
def we_ensure_that_archived_schema_extra_fields_are_not_present(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
eve_keys = set([config.ID_FIELD, config.LAST_UPDATED, config.DATE_CREATED, config.VERSION, config.ETAG])
archived_schema_keys = set(context.app.config['DOMAIN']['archived']['schema'].keys())
archived_schema_keys.union(eve_keys)
archive_schema_keys = set(context.app.config['DOMAIN']['archive']['schema'].keys())
archive_schema_keys.union(eve_keys)
extra_fields = [key for key in archived_schema_keys if key not in archive_schema_keys]
duplicate_item = json.loads(context.response.get_data())
for field in extra_fields:
assert field not in duplicate_item, 'Field {} found the duplicate item'.format(field)
@then('we assert content api item "{item_id}" with associated item "{embedded_id}" is published to "{subscriber}"')
def we_assert_that_associated_item_for_subscriber(context, item_id, embedded_id, subscriber):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
subscriber = apply_placeholders(context, subscriber)
embedded_id = apply_placeholders(context, embedded_id)
req = ParsedRequest()
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
assert embedded_id in (item.get('associations') or {}), '{} association not found.'.format(embedded_id)
assert subscriber in (item['associations'][embedded_id] or {}).get('subscribers', []), \
'{} subscriber not found in associations {}'.format(subscriber, embedded_id)
@then('we assert content api item "{item_id}" with associated item "{embedded_id}" is not published to "{subscriber}"')
def we_assert_that_associated_item_for_subscriber(context, item_id, embedded_id, subscriber):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
subscriber = apply_placeholders(context, subscriber)
embedded_id = apply_placeholders(context, embedded_id)
req = ParsedRequest()
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
assert embedded_id in (item.get('associations') or {}), '{} association not found.'.format(embedded_id)
assert subscriber not in (item['associations'][embedded_id] or {}).get('subscribers', []), \
'{} subscriber found in associations {}'.format(subscriber, embedded_id)
@then('file exists "{path}"')
def then_file_exists(context, path):
assert os.path.isfile(path), '{} is not a file'.format(path)
| agpl-3.0 | -117,397,680,062,153,810 | 38.317652 | 120 | 0.64768 | false | 3.620787 | true | false | false |
ESS-LLP/erpnext-healthcare | erpnext/hr/doctype/payroll_entry/payroll_entry.py | 1 | 20575 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from dateutil.relativedelta import relativedelta
from frappe.utils import cint, flt, nowdate, add_days, getdate, fmt_money, add_to_date, DATE_FORMAT, date_diff
from frappe import _
from erpnext.accounts.utils import get_fiscal_year
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
class PayrollEntry(Document):
def on_submit(self):
self.create_salary_slips()
def before_submit(self):
if self.validate_attendance:
if self.validate_employee_attendance():
frappe.throw(_("Cannot Submit, Employees left to mark attendance"))
def get_emp_list(self):
"""
Returns list of active employees based on selected criteria
and for which salary structure exists
"""
cond = self.get_filter_condition()
cond += self.get_joining_releiving_condition()
condition = ''
if self.payroll_frequency:
condition = """and payroll_frequency = '%(payroll_frequency)s'"""% {"payroll_frequency": self.payroll_frequency}
sal_struct = frappe.db.sql_list("""
select
name from `tabSalary Structure`
where
docstatus = 1 and
is_active = 'Yes'
and company = %(company)s and
ifnull(salary_slip_based_on_timesheet,0) = %(salary_slip_based_on_timesheet)s
{condition}""".format(condition=condition),
{"company": self.company, "salary_slip_based_on_timesheet":self.salary_slip_based_on_timesheet})
if sal_struct:
cond += "and t2.salary_structure IN %(sal_struct)s "
cond += "and %(from_date)s >= t2.from_date"
emp_list = frappe.db.sql("""
select
distinct t1.name as employee, t1.employee_name, t1.department, t1.designation
from
`tabEmployee` t1, `tabSalary Structure Assignment` t2
where
t1.name = t2.employee
and t2.docstatus = 1
%s order by t2.from_date desc
""" % cond, {"sal_struct": tuple(sal_struct), "from_date": self.end_date}, as_dict=True)
return emp_list
def fill_employee_details(self):
self.set('employees', [])
employees = self.get_emp_list()
if not employees:
frappe.throw(_("No employees for the mentioned criteria"))
for d in employees:
self.append('employees', d)
self.number_of_employees = len(employees)
if self.validate_attendance:
return self.validate_employee_attendance()
def get_filter_condition(self):
self.check_mandatory()
cond = ''
for f in ['company', 'branch', 'department', 'designation']:
if self.get(f):
cond += " and t1." + f + " = '" + self.get(f).replace("'", "\'") + "'"
return cond
def get_joining_releiving_condition(self):
cond = """
and ifnull(t1.date_of_joining, '0000-00-00') <= '%(end_date)s'
and ifnull(t1.relieving_date, '2199-12-31') >= '%(start_date)s'
""" % {"start_date": self.start_date, "end_date": self.end_date}
return cond
def check_mandatory(self):
for fieldname in ['company', 'start_date', 'end_date']:
if not self.get(fieldname):
frappe.throw(_("Please set {0}").format(self.meta.get_label(fieldname)))
def create_salary_slips(self):
"""
Creates salary slip for selected employees if already not created
"""
self.check_permission('write')
self.created = 1
emp_list = [d.employee for d in self.get_emp_list()]
if emp_list:
args = frappe._dict({
"salary_slip_based_on_timesheet": self.salary_slip_based_on_timesheet,
"payroll_frequency": self.payroll_frequency,
"start_date": self.start_date,
"end_date": self.end_date,
"company": self.company,
"posting_date": self.posting_date,
"deduct_tax_for_unclaimed_employee_benefits": self.deduct_tax_for_unclaimed_employee_benefits,
"deduct_tax_for_unsubmitted_tax_exemption_proof": self.deduct_tax_for_unsubmitted_tax_exemption_proof,
"payroll_entry": self.name
})
if len(emp_list) > 30:
frappe.enqueue(create_salary_slips_for_employees, timeout=600, employees=emp_list, args=args)
else:
create_salary_slips_for_employees(emp_list, args, publish_progress=False)
def get_sal_slip_list(self, ss_status, as_dict=False):
"""
Returns list of salary slips based on selected criteria
"""
cond = self.get_filter_condition()
ss_list = frappe.db.sql("""
select t1.name, t1.salary_structure from `tabSalary Slip` t1
where t1.docstatus = %s and t1.start_date >= %s and t1.end_date <= %s
and (t1.journal_entry is null or t1.journal_entry = "") and ifnull(salary_slip_based_on_timesheet,0) = %s %s
""" % ('%s', '%s', '%s','%s', cond), (ss_status, self.start_date, self.end_date, self.salary_slip_based_on_timesheet), as_dict=as_dict)
return ss_list
def submit_salary_slips(self):
self.check_permission('write')
ss_list = self.get_sal_slip_list(ss_status=0)
if len(ss_list) > 30:
frappe.enqueue(submit_salary_slips_for_employees, timeout=600, payroll_entry=self, salary_slips=ss_list)
else:
submit_salary_slips_for_employees(self, ss_list, publish_progress=False)
def email_salary_slip(self, submitted_ss):
if frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee"):
for ss in submitted_ss:
ss.email_salary_slip()
def get_loan_details(self):
"""
Get loan details from submitted salary slip based on selected criteria
"""
cond = self.get_filter_condition()
return frappe.db.sql(""" select eld.loan_account, eld.loan,
eld.interest_income_account, eld.principal_amount, eld.interest_amount, eld.total_payment
from
`tabSalary Slip` t1, `tabSalary Slip Loan` eld
where
t1.docstatus = 1 and t1.name = eld.parent and start_date >= %s and end_date <= %s %s
""" % ('%s', '%s', cond), (self.start_date, self.end_date), as_dict=True) or []
def get_salary_component_account(self, salary_component):
account = frappe.db.get_value("Salary Component Account",
{"parent": salary_component, "company": self.company}, "default_account")
if not account:
frappe.throw(_("Please set default account in Salary Component {0}")
.format(salary_component))
return account
def get_salary_components(self, component_type):
salary_slips = self.get_sal_slip_list(ss_status = 1, as_dict = True)
if salary_slips:
salary_components = frappe.db.sql("""select salary_component, amount, parentfield
from `tabSalary Detail` where parentfield = '%s' and parent in (%s)""" %
(component_type, ', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=True)
return salary_components
def get_salary_component_total(self, component_type = None):
salary_components = self.get_salary_components(component_type)
if salary_components:
component_dict = {}
for item in salary_components:
add_component_to_accrual_jv_entry = True
if component_type == "earnings":
is_flexible_benefit, only_tax_impact = frappe.db.get_value("Salary Component", item['salary_component'], ['is_flexible_benefit', 'only_tax_impact'])
if is_flexible_benefit == 1 and only_tax_impact ==1:
add_component_to_accrual_jv_entry = False
if add_component_to_accrual_jv_entry:
component_dict[item['salary_component']] = component_dict.get(item['salary_component'], 0) + item['amount']
account_details = self.get_account(component_dict = component_dict)
return account_details
def get_account(self, component_dict = None):
account_dict = {}
for s, a in component_dict.items():
account = self.get_salary_component_account(s)
account_dict[account] = account_dict.get(account, 0) + a
return account_dict
def get_default_payroll_payable_account(self):
payroll_payable_account = frappe.get_cached_value('Company',
{"company_name": self.company}, "default_payroll_payable_account")
if not payroll_payable_account:
frappe.throw(_("Please set Default Payroll Payable Account in Company {0}")
.format(self.company))
return payroll_payable_account
def make_accrual_jv_entry(self):
self.check_permission('write')
earnings = self.get_salary_component_total(component_type = "earnings") or {}
deductions = self.get_salary_component_total(component_type = "deductions") or {}
default_payroll_payable_account = self.get_default_payroll_payable_account()
loan_details = self.get_loan_details()
jv_name = ""
precision = frappe.get_precision("Journal Entry Account", "debit_in_account_currency")
if earnings or deductions:
journal_entry = frappe.new_doc('Journal Entry')
journal_entry.voucher_type = 'Journal Entry'
journal_entry.user_remark = _('Accrual Journal Entry for salaries from {0} to {1}')\
.format(self.start_date, self.end_date)
journal_entry.company = self.company
journal_entry.posting_date = self.posting_date
accounts = []
payable_amount = 0
# Earnings
for acc, amount in earnings.items():
payable_amount += flt(amount, precision)
accounts.append({
"account": acc,
"debit_in_account_currency": flt(amount, precision),
"cost_center": self.cost_center,
"project": self.project
})
# Deductions
for acc, amount in deductions.items():
payable_amount -= flt(amount, precision)
accounts.append({
"account": acc,
"credit_in_account_currency": flt(amount, precision),
"cost_center": self.cost_center,
"project": self.project
})
# Loan
for data in loan_details:
accounts.append({
"account": data.loan_account,
"credit_in_account_currency": data.principal_amount
})
if data.interest_amount and not data.interest_income_account:
frappe.throw(_("Select interest income account in loan {0}").format(data.loan))
if data.interest_income_account and data.interest_amount:
accounts.append({
"account": data.interest_income_account,
"credit_in_account_currency": data.interest_amount,
"cost_center": self.cost_center,
"project": self.project
})
payable_amount -= flt(data.total_payment, precision)
# Payable amount
accounts.append({
"account": default_payroll_payable_account,
"credit_in_account_currency": flt(payable_amount, precision)
})
journal_entry.set("accounts", accounts)
journal_entry.title = default_payroll_payable_account
journal_entry.save()
try:
journal_entry.submit()
jv_name = journal_entry.name
self.update_salary_slip_status(jv_name = jv_name)
except Exception as e:
frappe.msgprint(e)
return jv_name
def make_payment_entry(self):
self.check_permission('write')
cond = self.get_filter_condition()
salary_slip_name_list = frappe.db.sql(""" select t1.name from `tabSalary Slip` t1
where t1.docstatus = 1 and start_date >= %s and end_date <= %s %s
""" % ('%s', '%s', cond), (self.start_date, self.end_date), as_list = True)
if salary_slip_name_list and len(salary_slip_name_list) > 0:
salary_slip_total = 0
for salary_slip_name in salary_slip_name_list:
salary_slip = frappe.get_doc("Salary Slip", salary_slip_name[0])
for sal_detail in salary_slip.earnings:
is_flexible_benefit, only_tax_impact, creat_separate_je, statistical_component = frappe.db.get_value("Salary Component", sal_detail.salary_component,
['is_flexible_benefit', 'only_tax_impact', 'create_separate_payment_entry_against_benefit_claim', 'statistical_component'])
if only_tax_impact != 1 and statistical_component != 1:
if is_flexible_benefit == 1 and creat_separate_je == 1:
self.create_journal_entry(sal_detail.amount, sal_detail.salary_component)
else:
salary_slip_total += sal_detail.amount
for sal_detail in salary_slip.deductions:
statistical_component = frappe.db.get_value("Salary Component", sal_detail.salary_component, 'statistical_component')
if statistical_component != 1:
salary_slip_total -= sal_detail.amount
if salary_slip_total > 0:
self.create_journal_entry(salary_slip_total, "salary")
def create_journal_entry(self, je_payment_amount, user_remark):
default_payroll_payable_account = self.get_default_payroll_payable_account()
precision = frappe.get_precision("Journal Entry Account", "debit_in_account_currency")
journal_entry = frappe.new_doc('Journal Entry')
journal_entry.voucher_type = 'Bank Entry'
journal_entry.user_remark = _('Payment of {0} from {1} to {2}')\
.format(user_remark, self.start_date, self.end_date)
journal_entry.company = self.company
journal_entry.posting_date = self.posting_date
payment_amount = flt(je_payment_amount, precision)
journal_entry.set("accounts", [
{
"account": self.payment_account,
"credit_in_account_currency": payment_amount
},
{
"account": default_payroll_payable_account,
"debit_in_account_currency": payment_amount,
"reference_type": self.doctype,
"reference_name": self.name
}
])
journal_entry.save(ignore_permissions = True)
def update_salary_slip_status(self, jv_name = None):
ss_list = self.get_sal_slip_list(ss_status=1)
for ss in ss_list:
ss_obj = frappe.get_doc("Salary Slip",ss[0])
frappe.db.set_value("Salary Slip", ss_obj.name, "journal_entry", jv_name)
def set_start_end_dates(self):
self.update(get_start_end_dates(self.payroll_frequency,
self.start_date or self.posting_date, self.company))
def validate_employee_attendance(self):
employees_to_mark_attendance = []
days_in_payroll, days_holiday, days_attendance_marked = 0, 0, 0
for employee_detail in self.employees:
days_holiday = self.get_count_holidays_of_employee(employee_detail.employee)
days_attendance_marked = self.get_count_employee_attendance(employee_detail.employee)
days_in_payroll = date_diff(self.end_date, self.start_date) + 1
if days_in_payroll > days_holiday + days_attendance_marked:
employees_to_mark_attendance.append({
"employee": employee_detail.employee,
"employee_name": employee_detail.employee_name
})
return employees_to_mark_attendance
def get_count_holidays_of_employee(self, employee):
holiday_list = get_holiday_list_for_employee(employee)
holidays = 0
if holiday_list:
days = frappe.db.sql("""select count(*) from tabHoliday where
parent=%s and holiday_date between %s and %s""", (holiday_list,
self.start_date, self.end_date))
if days and days[0][0]:
holidays = days[0][0]
return holidays
def get_count_employee_attendance(self, employee):
marked_days = 0
attendances = frappe.db.sql("""select count(*) from tabAttendance where
employee=%s and docstatus=1 and attendance_date between %s and %s""",
(employee, self.start_date, self.end_date))
if attendances and attendances[0][0]:
marked_days = attendances[0][0]
return marked_days
@frappe.whitelist()
def get_start_end_dates(payroll_frequency, start_date=None, company=None):
'''Returns dict of start and end dates for given payroll frequency based on start_date'''
if payroll_frequency == "Monthly" or payroll_frequency == "Bimonthly" or payroll_frequency == "":
fiscal_year = get_fiscal_year(start_date, company=company)[0]
month = "%02d" % getdate(start_date).month
m = get_month_details(fiscal_year, month)
if payroll_frequency == "Bimonthly":
if getdate(start_date).day <= 15:
start_date = m['month_start_date']
end_date = m['month_mid_end_date']
else:
start_date = m['month_mid_start_date']
end_date = m['month_end_date']
else:
start_date = m['month_start_date']
end_date = m['month_end_date']
if payroll_frequency == "Weekly":
end_date = add_days(start_date, 6)
if payroll_frequency == "Fortnightly":
end_date = add_days(start_date, 13)
if payroll_frequency == "Daily":
end_date = start_date
return frappe._dict({
'start_date': start_date, 'end_date': end_date
})
def get_frequency_kwargs(frequency_name):
frequency_dict = {
'monthly': {'months': 1},
'fortnightly': {'days': 14},
'weekly': {'days': 7},
'daily': {'days': 1}
}
return frequency_dict.get(frequency_name)
@frappe.whitelist()
def get_end_date(start_date, frequency):
start_date = getdate(start_date)
frequency = frequency.lower() if frequency else 'monthly'
kwargs = get_frequency_kwargs(frequency) if frequency != 'bimonthly' else get_frequency_kwargs('monthly')
# weekly, fortnightly and daily intervals have fixed days so no problems
end_date = add_to_date(start_date, **kwargs) - relativedelta(days=1)
if frequency != 'bimonthly':
return dict(end_date=end_date.strftime(DATE_FORMAT))
else:
return dict(end_date='')
def get_month_details(year, month):
ysd = frappe.db.get_value("Fiscal Year", year, "year_start_date")
if ysd:
import calendar, datetime
diff_mnt = cint(month)-cint(ysd.month)
if diff_mnt<0:
diff_mnt = 12-int(ysd.month)+cint(month)
msd = ysd + relativedelta(months=diff_mnt) # month start date
month_days = cint(calendar.monthrange(cint(msd.year) ,cint(month))[1]) # days in month
mid_start = datetime.date(msd.year, cint(month), 16) # month mid start date
mid_end = datetime.date(msd.year, cint(month), 15) # month mid end date
med = datetime.date(msd.year, cint(month), month_days) # month end date
return frappe._dict({
'year': msd.year,
'month_start_date': msd,
'month_end_date': med,
'month_mid_start_date': mid_start,
'month_mid_end_date': mid_end,
'month_days': month_days
})
else:
frappe.throw(_("Fiscal Year {0} not found").format(year))
def get_payroll_entry_bank_entries(payroll_entry_name):
journal_entries = frappe.db.sql(
'select name from `tabJournal Entry Account` '
'where reference_type="Payroll Entry" '
'and reference_name=%s and docstatus=1',
payroll_entry_name,
as_dict=1
)
return journal_entries
@frappe.whitelist()
def payroll_entry_has_bank_entries(name):
response = {}
bank_entries = get_payroll_entry_bank_entries(name)
response['submitted'] = 1 if bank_entries else 0
return response
def create_salary_slips_for_employees(employees, args, publish_progress=True):
salary_slips_exists_for = get_existing_salary_slips(employees, args)
count=0
for emp in employees:
if emp not in salary_slips_exists_for:
args.update({
"doctype": "Salary Slip",
"employee": emp
})
ss = frappe.get_doc(args)
ss.insert()
count+=1
if publish_progress:
frappe.publish_progress(count*100/len(set(employees) - set(salary_slips_exists_for)),
title = _("Creating Salary Slips..."))
payroll_entry = frappe.get_doc("Payroll Entry", args.payroll_entry)
payroll_entry.db_set("salary_slips_created", 1)
payroll_entry.notify_update()
def get_existing_salary_slips(employees, args):
return frappe.db.sql_list("""
select distinct employee from `tabSalary Slip`
where docstatus!= 2 and company = %s
and start_date >= %s and end_date <= %s
and employee in (%s)
""" % ('%s', '%s', '%s', ', '.join(['%s']*len(employees))),
[args.company, args.start_date, args.end_date] + employees)
def submit_salary_slips_for_employees(payroll_entry, salary_slips, publish_progress=True):
submitted_ss = []
not_submitted_ss = []
frappe.flags.via_payroll_entry = True
count = 0
for ss in salary_slips:
ss_obj = frappe.get_doc("Salary Slip",ss[0])
if ss_obj.net_pay<0:
not_submitted_ss.append(ss[0])
else:
try:
ss_obj.submit()
submitted_ss.append(ss_obj)
except frappe.ValidationError:
not_submitted_ss.append(ss[0])
count += 1
if publish_progress:
frappe.publish_progress(count*100/len(salary_slips), title = _("Submitting Salary Slips..."))
if submitted_ss:
payroll_entry.make_accrual_jv_entry()
frappe.msgprint(_("Salary Slip submitted for period from {0} to {1}")
.format(ss_obj.start_date, ss_obj.end_date))
payroll_entry.email_salary_slip(submitted_ss)
payroll_entry.db_set("salary_slips_submitted", 1)
payroll_entry.notify_update()
if not submitted_ss and not not_submitted_ss:
frappe.msgprint(_("No salary slip found to submit for the above selected criteria OR salary slip already submitted"))
if not_submitted_ss:
frappe.msgprint(_("Could not submit some Salary Slips"))
def get_payroll_entries_for_jv(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""
select name from `tabPayroll Entry`
where `{key}` LIKE %(txt)s
and name not in
(select reference_name from `tabJournal Entry Account`
where reference_type="Payroll Entry")
order by name limit %(start)s, %(page_len)s"""
.format(key=searchfield), {
'txt': "%%%s%%" % frappe.db.escape(txt),
'start': start, 'page_len': page_len
})
| gpl-3.0 | 4,264,990,217,364,493,000 | 35.675579 | 154 | 0.693026 | false | 2.978 | false | false | false |
alvin777/excelsior | sort/benchmark.py | 1 | 2909 | #!/usr/bin/python
import time
from simple_sorts import *
from shell_sort import *
from quick_sort import *
from external_merge_sort import *
from radix_sort import *
from merge_sort import *
from heap_sort import *
from intro_sort import *
from timsort import *
from list_generators import *
result = {}
def run_until(sort_func, max_duration = 1.0, generator = random_generator):
print sort_func
duration = 0
list_size = 100
while duration < max_duration:
randomList = [x for x in generator(list_size)]
time_start = time.time()
try:
sort_func(randomList)
except RuntimeError:
print 'failed on list size: %5d' % list_size
return
duration = time.time() - time_start
print 'list size: %7d, duration: %0.3f' % (list_size, duration)
if not generator in result:
result[generator] = {}
if not list_size in result[generator]:
result[generator][list_size] = {}
result[generator][list_size][sort_func] = duration
list_size *= 2
def test_run_benchmarks():
generators_list = [random_generator, almost_sorted_generator, reverse_sorted_generator, few_uniq_generator]
# generators_list = [random_generator, reverse_sorted_generator]
# generators_list = [few_uniq_generator]
# sort_func_list = [bubble_sort, insertion_sort, insertion_sort2]
sort_func_list = [bubble_sort, insertion_sort, insertion_sort2, selection_sort, shell_sort, \
merge_sort, quick_sort, lambda x: quick_sort(x, splitByMedian), heap_sort,
lambda x: radix_sort(x, 1000), intro_sort, timsort]
# sort_func_list = [quick_sort, \
# lambda x: quick_sort(x, partition_func=splitByMiddleElement), \
# lambda x: quick_sort(x, partition_func=splitByMedian), \
# lambda x: quick_sort(x, leaf_sort_func=leaf_insertion_sort)]
# sort_func_list = [radix_sort, \
# lambda x: radix_sort(x, 2), \
# lambda x: radix_sort(x, 100),
# lambda x: radix_sort(x, 1000),
# lambda x: radix_sort(x, 10000)
# ]
for generator in generators_list:
print generator
for sort_func in sort_func_list:
run_until(sort_func, 0.5, generator)
for generator in generators_list:
print generator
for list_size in sorted(result[generator]):
sys.stdout.write(str(list_size) + "\t")
for sort_func in sort_func_list:
if sort_func in result[generator][list_size]:
sys.stdout.write("{:.3f}\t".format(result[generator][list_size][sort_func]))
else:
sys.stdout.write("\t")
sys.stdout.write("\n")
test_run_benchmarks() | gpl-2.0 | -2,014,888,064,060,402,700 | 34.487805 | 111 | 0.584393 | false | 3.691624 | false | false | false |
amitdhiman000/MyOffers | myadmin/views.py | 1 | 7396 | from myadmin.backenddb import (insert_default_areas, insert_custom_areas, insert_default_categories)
from offer.models import CategoryModel
from locus.models import (CountryModel ,StateModel, CityModel, AreaModel)
from mail.models import (PublicMessageModel)
from myadmin.preload_data import (gCountries, gCategories)
from base.apputil import (App_AdminRequired, App_Render)
# Create your views here.
@App_AdminRequired
def home(request):
data = {'title': 'MyAdmin'}
return App_Render(request, 'admin/admin_home_1.html', data)
@App_AdminRequired
def locus_area_view(request, country, state, city, area):
print(area)
areas = AreaModel.fetch_by_name(area, city, state, country)
data = {'title': 'MyAdmin', 'country': country, 'state': state, 'city': city, 'area': area, 'areas': areas}
return App_Render(request, 'admin/admin_locus_area_1.html', data)
@App_AdminRequired
def locus_city_view(request, country, state, city):
print(city)
filter = {'fk_city__name': city, 'fk_state__name': state, 'fk_country__name': country}
areas = AreaModel.fetch(filter)
data = {'title': 'MyAdmin', 'country': country, 'state': state, 'city': city, 'areas': areas}
return App_Render(request, 'admin/admin_locus_city_1.html', data)
@App_AdminRequired
def locus_state_view(request, country, state):
print(state)
filter = {'fk_state__name': state, 'fk_country__name': country}
cities = CityModel.fetch(filter)
data = {'title': 'MyAdmin', 'country': country, 'state': state, 'cities': cities}
return App_Render(request, 'admin/admin_locus_state_1.html', data)
@App_AdminRequired
def locus_country_view(request, country):
print(country)
states = StateModel.fetch({'fk_country__name': country})
data = {'title': 'MyAdmin', 'country': country, 'states': states}
return App_Render(request, 'admin/admin_locus_country_1.html', data)
@App_AdminRequired
def locus_view0(request):
countries = CountryModel.fetch_all()
states = StateModel.fetch({'fk_country__name': 'India'})
data = {'title': 'MyAdmin', 'countries': countries, 'states': states}
return App_Render(request, 'admin/admin_locus_view_1.html', data)
@App_AdminRequired
def locus_view(request, query=''):
print('query : '+query)
params = query.rstrip('/').split('/')
length = len(params)
print(params)
print('length : '+str(length))
if length == 1 and params[0] != '':
return locus_country_view(request, params[0])
elif length == 2:
return locus_state_view(request, params[0], params[1])
elif length == 3:
return locus_city_view(request, params[0], params[1], params[2])
elif length == 4:
return locus_area_view(request, params[0], params[1], params[2], params[3])
return locus_view0(request)
@App_AdminRequired
def locus_country_add_view(request, country):
states = {}
if country in gCountries:
states = gCountries[country]
data = {'title': 'MyAdmin', 'country': country, 'states': states}
return App_Render(request, 'admin/admin_locus_country_add_1.html', data)
@App_AdminRequired
def locus_add_view0(request):
countries = list(gCountries.keys())
data = {'title': 'MyAdmin', 'countries': countries}
return App_Render(request, 'admin/admin_locus_add_1.html', data)
@App_AdminRequired
def locus_add_view(request, query=''):
print('query : '+query)
params = query.rstrip('/').split('/')
length = len(params)
print(params)
print('length : '+str(length))
if length == 1 and params[0] != '':
return locus_country_add_view(request, params[0])
elif length == 2:
return locus_state_add_view(request, params[0], params[1])
elif length == 3:
return locus_city_add_view(request, params[0], params[1], params[2])
elif length == 4:
return locus_area_add_view(request, params[0], params[1], params[2], params[3])
return locus_add_view0(request)
@App_AdminRequired
def locus_auth(request, query=''):
print('query : '+query)
params = query.rstrip('/').split('/')
length = len(params)
print(params)
print('length : '+str(length))
if length < 3:
return None
country = params[0]
state = params[1]
city = params[2]
print(country, state, city)
if CityModel.fetch_by_name(city_name=city, state_name=state, country_name=country) is None:
insert_custom_areas(city, state, country)
areas = AreaModel.fetch_by_city(city)
data = {'title': 'Location', 'country': country, 'state': state, 'city': city, 'areas': areas}
return App_Render(request, 'admin/admin_locus_added_1.html', data)
@App_AdminRequired
def category_view(request, query=''):
print('query : '+query)
params = query.rstrip('/').split('/')
length = len(params)
print(params)
print('length : '+str(length))
name = "All"
if length > 0 and params[0] != '':
name = params[length - 1]
categories = CategoryModel.fetch_children(name)
data = {'title': 'MyAdmin', 'categories': categories}
return App_Render(request, 'admin/admin_category_1.html', data)
@App_AdminRequired
def category_add_view0(request):
base_cat = gCategories[0]['sub']
print(len(base_cat))
data = {'title': 'MyAdmin', 'categories': base_cat}
return App_Render(request, 'admin/admin_category_add_1.html', data)
@App_AdminRequired
def category_add_view1(request, params, length):
print(request)
index = 0
cat_list = gCategories
while index < length:
for cat in cat_list:
if cat['name'] == params[index]:
if 'sub' in cat:
cat_list = cat['sub']
else:
print('No more subcategories, jump to root')
cat_list = cat
index = length
break
index = index + 1
nav_links = []
url = '/myadmin/category-add/'
for param in params:
print('param : '+param)
url += param + "/"
nav_links.append({'text': param, 'href': url})
data = {}
if type(cat_list) is list:
categories = []
desired_attrs = ['name', 'desc']
for cat in cat_list:
categories.append({ key: value for key,value in cat.items() if key in desired_attrs })
print(len(categories))
print(categories)
data.update({'categories': categories})
else:
data.update({'category': cat_list})
data.update({'title': 'Add Category | MyAdmin', 'nav_links': nav_links, })
return App_Render(request, 'admin/admin_category_add_1.html', data)
@App_AdminRequired
def category_add(request, params):
insert_default_categories()
@App_AdminRequired
def category_add_view(request, query):
print('query : '+query)
params = query.rstrip('/').split('/')
length = len(params)
print(params)
print('length : '+str(length))
command = request.GET.get('command', '')
if command == 'Add':
category_add(request, params)
if params[0] == '':
params[0] = 'All';
return category_add_view1(request, params, length)
@App_AdminRequired
def messages_view(request):
print('chaum executing this')
messages = PublicMessageModel.fetch_all()
data = {'title': 'Messages', 'messages': messages}
return App_Render(request, 'admin/admin_message_1.html', data)
| apache-2.0 | -4,578,698,669,415,442,000 | 32.165919 | 111 | 0.636966 | false | 3.44 | false | false | false |
uqyge/combustionML | FPV_ANN_pureResNet/data_reader_2.py | 1 | 5981 | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler
class data_scaler(object):
def __init__(self):
self.norm = None
self.norm_1 = None
self.std = None
self.case = None
self.scale = 1
self.bias = 1e-20
# self.bias = 1
self.switcher = {
'min_std': 'min_std',
'std2': 'std2',
'std_min': 'std_min',
'min': 'min',
'no': 'no',
'log': 'log',
'log_min': 'log_min',
'log2': 'log2',
'tan': 'tan'
}
def fit_transform(self, input_data, case):
self.case = case
if self.switcher.get(self.case) == 'min_std':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.norm.fit_transform(input_data)
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'std2':
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
if self.switcher.get(self.case) == 'std_min':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
out = self.norm.fit_transform(out)
if self.switcher.get(self.case) == 'min':
self.norm = MinMaxScaler()
out = self.norm.fit_transform(input_data)
if self.switcher.get(self.case) == 'no':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = input_data
if self.switcher.get(self.case) == 'log':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
self.std = StandardScaler()
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'log_min':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
self.norm = MinMaxScaler()
out = self.norm.fit_transform(out)
if self.switcher.get(self.case) == 'log2':
self.norm = MinMaxScaler()
self.norm_1 = MinMaxScaler()
out = self.norm.fit_transform(input_data)
out = np.log(np.asarray(out) + self.bias)
out = self.norm_1.fit_transform(out)
if self.switcher.get(self.case) == 'tan':
self.norm = MaxAbsScaler()
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
out = self.norm.fit_transform(out)
out = np.tan(out / (2 * np.pi + self.bias))
return out
def transform(self, input_data):
if self.switcher.get(self.case) == 'min_std':
out = self.norm.transform(input_data)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'std2':
out = self.std.transform(input_data)
if self.switcher.get(self.case) == 'std_min':
out = self.std.transform(input_data)
out = self.norm.transform(out)
if self.switcher.get(self.case) == 'min':
out = self.norm.transform(input_data)
if self.switcher.get(self.case) == 'no':
out = input_data
if self.switcher.get(self.case) == 'log':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'log_min':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
out = self.norm.transform(out)
if self.switcher.get(self.case) == 'log2':
out = self.norm.transform(input_data)
out = np.log(np.asarray(out) + self.bias)
out = self.norm_1.transform(out)
if self.switcher.get(self.case) == 'tan':
out = self.std.transform(input_data)
out = self.norm.transform(out)
out = np.tan(out / (2 * np.pi + self.bias))
return out
def inverse_transform(self, input_data):
if self.switcher.get(self.case) == 'min_std':
out = self.std.inverse_transform(input_data)
out = self.norm.inverse_transform(out)
if self.switcher.get(self.case) == 'std2':
out = self.std.inverse_transform(input_data)
if self.switcher.get(self.case) == 'std_min':
out = self.norm.inverse_transform(input_data)
out = self.std.inverse_transform(out)
if self.switcher.get(self.case) == 'min':
out = self.norm.inverse_transform(input_data)
if self.switcher.get(self.case) == 'no':
out = input_data
if self.switcher.get(self.case) == 'log':
out = self.std.inverse_transform(input_data)
out = (np.exp(-out) - self.bias) * self.scale
if self.switcher.get(self.case) == 'log_min':
out = self.norm.inverse_transform(input_data)
out = (np.exp(-out) - self.bias) * self.scale
if self.switcher.get(self.case) == 'log2':
out = self.norm_1.inverse_transform(input_data)
out = np.exp(out) - self.bias
out = self.norm.inverse_transform(out)
if self.switcher.get(self.case) == 'tan':
out = (2 * np.pi + self.bias) * np.arctan(input_data)
out = self.norm.inverse_transform(out)
out = self.std.inverse_transform(out)
return out
def read_h5_data(fileName, input_features, labels):
df = pd.read_hdf(fileName)
df = df[df['f'] < 0.45]
input_df = df[input_features]
in_scaler = data_scaler()
input_np = in_scaler.fit_transform(input_df.values, 'no')
label_df = df[labels].clip(0)
# if 'PVs' in labels:
# label_df['PVs']=np.log(label_df['PVs']+1)
out_scaler = data_scaler()
label_np = out_scaler.fit_transform(label_df.values, 'std2')
return input_np, label_np, df, in_scaler, out_scaler | mit | 5,411,136,813,735,696,000 | 33.578035 | 75 | 0.546397 | false | 3.381006 | false | false | false |
UITools/saleor | saleor/shipping/migrations/0013_auto_20180822_0721.py | 1 | 4293 | # Generated by Django 2.0.3 on 2018-08-22 12:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import django_measurement.models
import django_prices.models
import saleor.core.weight
class Migration(migrations.Migration):
dependencies = [
('checkout', '0010_auto_20180822_0720'),
('order', '0052_auto_20180822_0720'),
('shipping', '0012_remove_legacy_shipping_methods'),
]
operations = [
migrations.CreateModel(
name='ShippingMethodTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(max_length=10)),
('name', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='ShippingZone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('countries', django_countries.fields.CountryField(max_length=749, multiple=True)),
],
options={
'permissions': (('manage_shipping', 'Manage shipping.'),),
},
),
migrations.AlterUniqueTogether(
name='shippingmethodcountry',
unique_together=set(),
),
migrations.RemoveField(
model_name='shippingmethodcountry',
name='shipping_method',
),
migrations.AlterModelOptions(
name='shippingmethod',
options={},
),
migrations.RemoveField(
model_name='shippingmethod',
name='description',
),
migrations.AddField(
model_name='shippingmethod',
name='maximum_order_price',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True),
),
migrations.AddField(
model_name='shippingmethod',
name='maximum_order_weight',
field=django_measurement.models.MeasurementField(blank=True, measurement_class='Mass', null=True),
),
migrations.AddField(
model_name='shippingmethod',
name='minimum_order_price',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, default=0, max_digits=12, null=True),
),
migrations.AddField(
model_name='shippingmethod',
name='minimum_order_weight',
field=django_measurement.models.MeasurementField(blank=True, default=saleor.core.weight.zero_weight, measurement_class='Mass', null=True),
),
migrations.AddField(
model_name='shippingmethod',
name='price',
field=django_prices.models.MoneyField(currency=settings.DEFAULT_CURRENCY, decimal_places=2, default=0, max_digits=12),
),
migrations.AddField(
model_name='shippingmethod',
name='type',
field=models.CharField(choices=[('price', 'Price based shipping'), ('weight', 'Weight based shipping')], default=None, max_length=30),
preserve_default=False,
),
migrations.DeleteModel(
name='ShippingMethodCountry',
),
migrations.AddField(
model_name='shippingmethodtranslation',
name='shipping_method',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='shipping.ShippingMethod'),
),
migrations.AddField(
model_name='shippingmethod',
name='shipping_zone',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_methods', to='shipping.ShippingZone'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='shippingmethodtranslation',
unique_together={('language_code', 'shipping_method')},
),
]
| bsd-3-clause | 1,969,370,652,778,655,500 | 39.885714 | 156 | 0.603075 | false | 4.444099 | false | false | false |
jakerockland/find-s | find-s.py | 1 | 3211 | # This program is an machine learning experiment with the FindS concept learning algorithm
# Based on an excercise from Machine Learning by Thomas Mitchell (1997)
# By: Jacob Rockland
#
# The attribute EnjoySport indicates whether or not Aldo enjoys his favorite
# water sport on this day
#
# For all possible days with the following attributes:
# Sky: Sunny/Rainy
# AirTemp: Warm/Cold
# Humidity: Normal/High
# Wind: Strong/Weak
# Water: Warm/Cool
# Forecast: Same/Change
#
# Let us represent the hypothesis with the vector:
# [Sky, AirTemp, Humidity, Wind, Water, Forecast]
#
# Where each constraint may be '?' to represent that any value is acceptable,
# '0' to represent that no value is acceptable, or a specific value (from above)
#
# A training example for the hypothesis is True if it correctly predicts that
# Aldo will enjoy his water sport on this day, and False otherwise
import random
attributes = [['Sunny','Rainy'],
['Warm','Cold'],
['Normal','High'],
['Strong','Weak'],
['Warm','Cool'],
['Same','Change']]
num_attributes = len(attributes)
def getRandomTrainingExample(target_concept = ['?'] * num_attributes):
training_example = []
classification = True
for i in range(num_attributes):
training_example.append(attributes[i][random.randint(0,1)])
if target_concept[i] != '?' and target_concept[i] != training_example[i]:
classification = False
return training_example, classification
def findS(training_examples = []):
hypothesis = ['0'] * num_attributes
for example in training_examples:
if example[1]:
for i in range(num_attributes):
example_attribute = example[0][i]
hypothesis_attribute = hypothesis[i]
if example_attribute == attributes[i][0]:
if hypothesis_attribute == '0':
hypothesis_attribute = attributes[i][0]
elif hypothesis_attribute == attributes[i][1]:
hypothesis_attribute = '?'
elif example_attribute == attributes[i][1]:
if hypothesis_attribute == '0':
hypothesis_attribute = attributes[i][1]
elif hypothesis_attribute == attributes[i][0]:
hypothesis_attribute = '?'
hypothesis[i] = hypothesis_attribute
return hypothesis
def experiment(target_concept = ['?'] * num_attributes):
training_examples = []
while findS(training_examples) != target_concept:
training_examples.append(getRandomTrainingExample(target_concept))
return len(training_examples)
def main():
target_concept = ['Sunny','Warm','?','?','?','?']
num_experiments = 1000
experiment_results = []
for i in range(num_experiments):
experiment_results.append(experiment(target_concept))
average_result = sum(experiment_results) / num_experiments
print(str(len(experiment_results)) + ' Experiments Ran')
print('Average # Examples Required: ' + str(average_result))
print('Target Concept:' + str(target_concept))
if __name__ == "__main__":
main()
| mit | 5,064,658,467,091,919,000 | 37.22619 | 90 | 0.629088 | false | 4.095663 | false | false | false |
souzabrizolara/py-home-shell | src/dao/appliancedao.py | 1 | 1109 | __author__ = 'alisonbento'
import basedao
from src.entities.hsappliance import HomeShellAppliance
import datetime
import configs
class ApplianceDAO(basedao.BaseDAO):
def __init__(self, connection):
basedao.BaseDAO.__init__(self, connection, 'hs_appliances', 'appliance_id')
def convert_row_to_object(self, entity_row):
appliance = HomeShellAppliance()
appliance.id = entity_row['appliance_id']
appliance.package = entity_row['package']
appliance.type = entity_row['type']
appliance.name = entity_row['type']
appliance.key = None
appliance.address = entity_row['address']
appliance.hash = entity_row['appliance_hash']
appliance.modified = entity_row['modified']
appliance.modified_datetime = datetime.datetime.strptime(appliance.modified, configs.DATABASE_DATE_FORMAT)
return appliance
def update(self, entity):
cursor = self.connection.cursor()
sql = "UPDATE " + self.table + " SET modified = ? WHERE appliance_id = ?"
cursor.execute(sql, (entity.modified, entity.id))
| apache-2.0 | 3,405,373,118,082,334,000 | 33.65625 | 114 | 0.66817 | false | 4.018116 | false | false | false |
Flutras/techstitution | app/mod_main/views.py | 1 | 15521 | from flask import Blueprint, render_template, request, redirect, url_for, Response, jsonify, flash
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, DateTimeField, TextField, SubmitField, TextAreaField, RadioField
from wtforms import validators, ValidationError
from wtforms.validators import InputRequired
from bson import ObjectId
from app import mongo
from bson import json_util
import json
mod_main = Blueprint('main', __name__)
@mod_main.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] == 'admin' and request.form['password'] == 'admin':
return redirect(url_for('main.index'))
else:
error = 'Invalid Credentials. Please try again.'
return render_template('mod_main/login.html', error=error)
else:
return render_template('mod_main/login.html', error=error)
class AddPeopleForm(FlaskForm):
firstname = StringField('Firstname', validators=[InputRequired("Please fill out firstname")])
lastname = StringField('Lastname', validators=[InputRequired()])
# submit = SubmitField("Submit")
@mod_main.route('/index', methods=['GET', 'POST'])
def indexpage():
form=AddPeopleForm();
if request.method == 'GET':
reports = mongo.db.reports.find()
return render_template('mod_main/index.html', reports=reports, form=form)
@mod_main.route('/', methods=['GET', 'POST'])
def index():
form = AddPeopleForm()
if request.method == 'GET':
reports = mongo.db.reports.find()
audits = mongo.db.audits.find()
return render_template('mod_main/dashboard.html', reports=reports, audits=audits, form=form)
elif request.method == 'POST' and form.validate_on_submit():
mongo.db.reports.insert({
"firstname": request.form['firstname'],
"lastname": request.form['lastname']
})
return redirect(url_for('main.index'))
@mod_main.route('/audit_list', methods=['GET', 'POST'])
def audit_list():
audits = mongo.db.audits.find()
return render_template('mod_main/audit_list.html', audits=audits)
# New Audit Form
class AddAuditForm(FlaskForm):
audit_ref_num = IntegerField('Reference number', validators=[InputRequired("Please enter audit reference number!")])
audit_title = StringField('Title', validators=[InputRequired("Please enter audit title!")])
audit_type = StringField('Audit type', validators=[InputRequired("Please enter audit type!")])
audit_organization = StringField('Organization', validators=[InputRequired("Please enter organization!")])
audit_start_date = DateTimeField('Audit Start Date', validators=[InputRequired("Please enter start date!")])
audit_end_date = DateTimeField('Audit End Date', validators=[InputRequired("Please enter end date!")])
audit_auditee = StringField('Auditee', validators=[InputRequired("Please enter auditee!")])
audit_place = StringField('Place', validators=[InputRequired("Please enter place!")])
audit_frefnum = IntegerField('Follow-up reference number', validators=[InputRequired("Please enter follow-up reference number!")])
audit_chrefnum = IntegerField('Change reference number', validators=[InputRequired("Please enter changed reference number!")])
audit_tl = StringField('Audit Team Leader', validators=[InputRequired("Please enter team leader name!")])
audit_tm = StringField('Audit Team Members', validators=[InputRequired("Please enter team members!")])
audit_ap = StringField('Auditee Participants', validators=[InputRequired("Please enter auditee participants!")])
submit = SubmitField("Submit")
@mod_main.route('/add_audit_form', methods=['GET', 'POST'])
def add_audit_form():
form = AddAuditForm()
if request.method == 'GET':
audits = mongo.db.audits.find()
return render_template('mod_main/add_audit_form.html', audits=audits, form=form)
elif request.method == 'POST':
data = request.form
new_inputs = ({})
counter = 1
while counter < 5:
if 'input'+str(counter) in data:
new_inputs.update({
'input_'+str(counter): data['input'+str(counter)]
})
counter += 1
print new_inputs
mongo.db.audits.insert({
"new_inputs": new_inputs,
"audit_ref_num": request.form['audit_ref_num'],
"audit_title": request.form['audit_title'],
"audit_type": request.form['audit_type'],
"audit_organization": request.form['audit_organization'],
"audit_start_date": request.form['audit_start_date'],
"audit_end_date": request.form['audit_end_date'],
"audit_auditee": request.form['audit_auditee'],
"audit_place": request.form['audit_place'],
"audit_frefnum": request.form['audit_frefnum'],
"audit_chrefnum": request.form['audit_chrefnum'],
"audit_tl": request.form['audit_tl'],
"audit_tm": request.form['audit_tm'],
"audit_ap": request.form['audit_ap']
})
return redirect(url_for('main.index'))
# New NC Form
class AddNCForm(FlaskForm):
nc_title = StringField('Title', validators=[InputRequired("Please enter NC title!")])
nc_operator_auditee = StringField('Operator Auditee', validators=[InputRequired("Please enter operator auditee!")])
nc_number = IntegerField('Number', validators=[InputRequired("Please enter number!")])
nc_date = DateTimeField('Date', validators=[InputRequired("Please enter date!")])
nc_status = StringField('Status', validators=[InputRequired("Please enter status!")])
nc_agreed_date_for_CAP = DateTimeField('Agreed date for CAP', validators=[InputRequired("Please enter agreed date for CAP!")])
nc_level = StringField('Level', validators=[InputRequired("Please enter level!")])
nc_due_date = DateTimeField('Due Date', validators=[InputRequired("Please enter due date!")])
nc_closure_date = DateTimeField('Closure Date', validators=[InputRequired("Please enter closure date!")])
nc_requirement_references = StringField('Requirement References', validators=[InputRequired("Please enter requirement refeences!")])
nc_further_references = StringField('Further References', validators=[InputRequired("Please enter further references!")])
nc_auditor_ofcaa = StringField('Auditor of CAA', validators=[InputRequired("Please enter auditor of CAA!")])
nc_auditee_rfCAP = StringField('Auditee responsible for CAP', validators=[InputRequired("Please enter auditee!")])
requirement_references = TextAreaField('', validators=[InputRequired("Please enter requirement references!")])
nc_details = TextAreaField('Non Conformity Details', validators=[InputRequired("Please enter details!")])
submit = SubmitField("Submit")
@mod_main.route('/<string:audit_id>/add_nc_form', methods=['GET', 'POST'])
def add_nc_form(audit_id):
form = AddNCForm()
if request.method == 'GET':
audit = mongo.db.audits.find({"_id": ObjectId(audit_id)})
return render_template('mod_main/add_nc_form.html', audit=audit, form=form)
elif request.method == 'POST':
# print "post request"
mongo.db.audits.update({"_id": ObjectId(audit_id)}, {"$set": {nonconformities: {
"nc_title": request.form['nc_title'],
"nc_operator_auditee": request.form['nc_operator_auditee'],
"nc_number": request.form['nc_number'],
"nc_date": request.form['nc_date'],
"nc_status": request.form['nc_status'],
"nc_agreed_date_for_CAP": request.form['nc_agreed_date_for_CAP'],
"nc_level": request.form['nc_level'],
"nc_due_date": request.form['nc_due_date'],
"nc_closure_date": request.form['nc_closure_date'],
"nc_requirement_references": request.form['nc_requirement_references'],
"nc_further_references": request.form['nc_further_references'],
"nc_auditor_ofcaa": request.form['nc_auditor_ofcaa'],
"nc_auditee_rfCAP": request.form['nc_auditee_rfCAP'],
"requirement_references": request.form['requirement_references'],
"nc_details": request.form['nc_details']
}}})
return redirect(url_for('main.show_audit', audit_id=audit_id))
# New NC Form
class AddCAForm(FlaskForm):
ca_description = StringField('Corrective Action Description', validators=[InputRequired("Please enter description!")])
ca_date_of_capapproval = DateTimeField('Date of CAP approval', validators=[InputRequired("Please enter date!")])
ca_due_date = DateTimeField('Due Date', validators=[InputRequired("Please enter due date!")])
ca_contact_person = StringField('Contact Person', validators=[InputRequired("Please enter contact!")])
ca_closure_date = DateTimeField('Closure Date', validators=[InputRequired("Please enter due date!")])
ca_due_date_history = TextAreaField('Due Date History', validators=[InputRequired("Please enter due date!")])
submit = SubmitField("Submit")
@mod_main.route('/<string:audit_id>/add_ca_form', methods=['GET', 'POST'])
def add_ca_form(audit_id):
form = AddCAForm()
if request.method == 'GET':
audit = mongo.db.audits.find({"_id": ObjectId(audit_id)})
return render_template('mod_main/add_ca_form.html', audit=audit, form=form)
elif request.method == 'POST':
# print "post request"
mongo.db.correctiveactions.update({"_id": ObjectId(audit_id)}, {"$set": {
"ca_description": request.form['ca_description'],
"ca_date_of_capapproval": request.form['ca_date_of_capapproval'],
"ca_due_date": request.form['ca_due_date'],
"ca_contact_person": request.form['ca_contact_person'],
"ca_closure_date": request.form['ca_closure_date'],
"ca_due_date_history": request.form['ca_due_date_history']
}})
return redirect(url_for('main.show_nc', audit_id=audit_id))
@mod_main.route('/add_people_form', methods=['GET', 'POST'])
def add_people_form():
form = AddPeopleForm()
if request.method == 'GET':
reports = mongo.db.reports.find()
return render_template('mod_main/add_people_form.html', reports=reports, form=form)
elif request.method == 'POST' and form.validate_on_submit():
# print "post request"
mongo.db.reports.insert({
"firstname": request.form['firstname'],
"lastname": request.form['lastname']
})
# return "Form successfully submitted!"
return redirect(url_for('main.indexpage'))
# behet post request ne kete url
@mod_main.route('/remove/audit', methods=['POST'])
def remove_audit():
if request.method == 'POST':
audit_id = request.form['id']
mongo.db.audits.remove({"_id": ObjectId(audit_id)})
return Response(json.dumps({"removed": True}), mimetype='application/json')
@mod_main.route('/remove/report', methods=['POST'])
def remove_report():
if request.method == 'POST':
report_id = request.form['id']
mongo.db.reports.remove({"_id": ObjectId(report_id)})
return Response(json.dumps({"removed": True}), mimetype='application/json')
@mod_main.route('/show_audit/<string:audit_id>', methods=['GET', 'POST'])
def show_audit(audit_id):
form = AddAuditForm()
if request.method == 'GET':
audit = mongo.db.audits.find_one({"_id": ObjectId(audit_id)})
return render_template('mod_main/audit_details.html', audit=audit, form=form)
@mod_main.route('/edit/<string:audit_id>', methods=['GET', 'POST'])
def edit_audit(audit_id):
form = AddAuditForm()
if request.method == 'GET':
audit = mongo.db.audits.find_one({"_id": ObjectId(audit_id)})
return render_template('mod_main/audit_edit.html', audit=audit, form=form)
elif request.method == 'POST':
audit = mongo.db.audits.find_one({"_id": ObjectId(audit_id)})
mongo.db.audits.update({"_id": ObjectId(audit_id)}, {"$set": {
"audit_ref_num": request.form['audit_ref_num'],
"audit_title": request.form['audit_title'],
"audit_type": request.form['audit_type'],
"audit_organization": request.form['audit_organization'],
"audit_start_date": request.form['audit_start_date'],
"audit_end_date": request.form['audit_end_date'],
"audit_auditee": request.form['audit_auditee'],
"audit_place": request.form['audit_place'],
"audit_frefnum": request.form['audit_frefnum'],
"audit_chrefnum": request.form['audit_chrefnum'],
"audit_tl": request.form['audit_tl'],
"audit_tm": request.form['audit_tm'],
"audit_ap": request.form['audit_ap']
}})
return redirect(url_for('main.show_audit', audit_id= audit_id))
# return 'Showing result ' + str(result)
@mod_main.route('/show_report/<string:report_id>', methods=['GET'])
def show_report(report_id):
result = mongo.db.reports.find_one({"_id": ObjectId(report_id)})
return 'Showing result ' + str(result)
@mod_main.route('/add-people', methods=['GET', 'POST'])
def add_people():
# TODO: Implement POST REQUEST
# if success:
form = AddPeopleForm()
reports = mongo.db.reports.find();
if request.method == 'GET':
return render_template('mod_main/index.html', form=form, reports=reports)
elif request.method == 'POST':
# Get form
form = AddPeopleForm()
# Get form data
data = form.data
# Add document to the database
added_report_id = mongo.db.reports.insert(data)
# Get the added document
report_doc = mongo.db.reports.find_one({"_id": ObjectId(added_report_id)})
# Return a json response
return Response(json_util.dumps(report_doc),mimetype="application/json")
else:
return Response(json_util.dumps({"error":"Something went wrong!"}),mimetype="application/json")
@mod_main.route('/add-audit', methods=['GET', 'POST'])
def add_audit():
# TODO: Implement POST REQUEST
# if success:
form = AddAuditForm()
if request.method == 'POST':
if form.validate() == False:
# flash('All fields are required!')
audits = mongo.db.audits.find()
return render_template('mod_main/add_audit.html', audits=audits, form=form)
else:
mongo.db.audits.insert({
"audit_title": request.form['audit_title'],
"audit_ref_num": request.form['audit_ref_num'],
"audit_start_date": request.form['audit_start_date']
})
return redirect(url_for('main.audit_list'))
elif request.method == 'GET':
return render_template('mod_main/add_audit.html', form=form)
# views for new bootstrap admin dashboard theme template
@mod_main.route('/corrective_actions', methods=['GET', 'POST'])
def corrective_actions():
# audits = mongo.db.audits.find()
return render_template('mod_main/corrective_actions.html')
@mod_main.route('/forms', methods=['GET', 'POST'])
def forms():
# audits = mongo.db.audits.find()
return render_template('mod_main/forms.html')
@mod_main.route('/blank-page', methods=['GET', 'POST'])
def blank_page():
# audits = mongo.db.audits.find()
return render_template('mod_main/blank-page.html')
| cc0-1.0 | -1,095,109,134,127,927,400 | 42.844633 | 136 | 0.644611 | false | 3.768148 | false | false | false |
theDarkForce/websearch | webseach_book.py | 1 | 2428 | # -*- coding: UTF-8 -*-
# webseach
# create at 2015/10/30
# autor: qianqians
import sys
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append('../')
from webget import gethtml
import pymongo
from doclex import doclex
import time
collection_key = None
def seach(urllist):
def process_keyurl(keyurl):
if keyurl is not None:
for key, urllist in keyurl.iteritems():
for url in urllist:
urlinfo = gethtml.process_url(url)
if urlinfo is None:
continue
list, keyurl1 = urlinfo
if list is not None:
gethtml.collection.insert({'key':key, 'url':url, 'timetmp':time.time()})
if keyurl1 is not None:
process_keyurl(keyurl1)
def process_urllist(url_list):
for url in url_list:
#print url,"sub url"
urlinfo = gethtml.process_url(url)
if urlinfo is None:
continue
list, keyurl = urlinfo
if list is not None:
process_urllist(list)
if keyurl is not None:
process_keyurl(keyurl)
time.sleep(0.1)
suburl = []
subkeyurl = {}
for url in urllist:
print url, "root url"
urlinfo = gethtml.process_url(url)
if urlinfo is None:
continue
list, keyurl = urlinfo
suburl.extend(list)
subkeyurl.update(keyurl)
try:
process_urllist(suburl)
process_keyurl(subkeyurl)
except:
import traceback
traceback.print_exc()
urllist = ["http://www.qidian.com/Default.aspx",
"http://www.zongheng.com/",
"http://chuangshi.qq.com/"
]
def refkeywords():
c = collection_key.find()
keywords = []
for it in c:
keywords.append(it["key"])
doclex.keykorks = keywords
if __name__ == '__main__':
conn = pymongo.Connection('localhost',27017)
db = conn.webseach
gethtml.collection = db.webpage
gethtml.collection_url_profile = db.urlprofile
gethtml.collection_url_title = db.urltitle
collection_key = db.keys
t = 0
while True:
timetmp = time.time()-t
if timetmp > 86400:
refkeywords()
t = time.time()
#urllist = seach(urllist)
seach(urllist) | bsd-2-clause | 5,170,723,839,560,860,000 | 21.700935 | 96 | 0.543657 | false | 3.68997 | false | false | false |
mmw125/MuDimA | server/database_reader.py | 1 | 7747 | """Functions for reading from the database."""
import constants
import database_utils
import models
def get_urls():
"""Get all of the urls in articles in the database."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT link FROM article;")
urls = set(item[0] for item in cursor.fetchall())
cursor.execute("SELECT link FROM bad_article;")
return urls.union(item[0] for item in cursor.fetchall())
def get_number_topics(category=None):
"""Get just the number of topics from the database."""
with database_utils.DatabaseConnection() as (connection, cursor):
if category is None:
cursor.execute("SELECT 1 FROM article, topic WHERE article.topic_id = topic.id AND "
"article.topic_id IS NOT NULL GROUP BY topic.id ORDER BY count(*) DESC;")
else:
cursor.execute("SELECT 1 FROM article, topic WHERE article.topic_id = topic.id AND article.category = ? AND"
" article.topic_id IS NOT NULL GROUP BY topic.id ORDER BY count(*) DESC;", (category,))
return len(cursor.fetchall())
def get_topics(category=None, page_number=0, articles_per_page=constants.ARTICLES_PER_PAGE):
"""Get the topics for the given page."""
with database_utils.DatabaseConnection() as (connection, cursor):
start = page_number * articles_per_page
end = (page_number + 1) * articles_per_page
total_items = get_number_topics()
if category is None:
cursor.execute("SELECT topic.name, topic.id, topic.image_url, topic.category, count(*) FROM article, topic "
"WHERE article.topic_id = topic.id AND article.topic_id IS NOT NULL "
"GROUP BY topic.id ORDER BY count(*) DESC;")
else:
cursor.execute("SELECT topic.name, topic.id, topic.image_url, topic.category, count(*) FROM article, topic "
"WHERE article.topic_id = topic.id AND topic.category = ? AND article.topic_id IS NOT NULL "
"GROUP BY topic.id ORDER BY count(*) DESC;", (category,))
return sorted([{"total_items": total_items, "title": item[0], "id": item[1],
"image": item[2], "category": item[3], "count": item[4]}
for item in cursor.fetchall()[start:end]], key=lambda x: -x["count"])
def get_sources():
"""Get all of the stories for the topic with the given topic id. Returns empty dict if topic not in database."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT source, count(1) FROM article GROUP BY source")
return cursor.fetchall()
def get_stories_for_topic(topic_id):
"""Get all of the stories for the topic with the given topic id. Returns empty dict if topic not in database."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT name FROM topic WHERE id=?", (topic_id,))
db_item = cursor.fetchone()
if db_item is not None:
title = db_item[0]
cursor.execute("SELECT name, link, image_url, group_fit_x, group_fit_y, popularity, source, favicon "
"FROM article WHERE topic_id=?",
(topic_id,))
items = cursor.fetchall()
else:
title, items = None, []
return {"title": title, "articles": [{"name": item[0], "link": item[1], "image": item[2], "x": item[3],
"y": item[4], "popularity": item[5], "source": item[6], "favicon": item[7]
} for item in items]}
def get_ungrouped_articles():
"""Get the items in the database and puts them into Article and Grouping objects."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT name, link, article_text FROM article "
"WHERE article_text != '' AND topic_id IS NULL;")
articles = []
for item in cursor.fetchall():
name, url, article_text = item
articles.append(models.Article(url=url, title=name, text=article_text, in_database=True,
keywords=_get_article_keywords(url, cursor)))
return articles
def get_top_keywords(num=constants.DEFAULT_NUM_KEYWORDS):
"""Get the top keywords used in the database."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT keyword, COUNT(1) AS c FROM keyword GROUP BY keyword ORDER BY c DESC LIMIT ?;", (num,))
return [item[0] for item in cursor.fetchall()]
def get_groups_with_unfit_articles():
"""Get the ids of the groups in the database that have articles that are not fit."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT topic_id FROM article WHERE group_fit_x IS NULL AND topic_id IS NOT NULL "
"GROUP BY topic_id;")
return [i[0] for i in cursor.fetchall()]
def get_number_articles_without_overall_fit():
"""Get the number of articles in the database without an overall fit."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT topic_id FROM article WHERE group_fit_x IS NULL AND topic_id IS NOT NULL;")
return len(cursor.fetchall())
def _get_article_keywords(article_url, cursor):
"""Get the keywords for the given article."""
cursor.execute("SELECT keyword FROM keyword WHERE article_link = ?;", (article_url,))
return set(item[0] for item in cursor.fetchall())
def get_grouped_articles():
"""Get the items in the database and puts them into Article and Grouping objects."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT name, topic_id, link, article_text, image_url FROM article "
"WHERE article_text != '' AND topic_id IS NOT NULL;")
groups = {}
for item in cursor.fetchall():
name, id, url, article_text, image_url = item
article = models.Article(url=url, title=name, text=article_text, urlToImage=image_url, in_database=True)
article.set_keywords(_get_article_keywords(url, cursor))
if id in groups:
groups.get(id).add_article(article, new_article=False)
else:
groups[id] = models.Grouping(article, uuid=id, in_database=True, has_new_articles=False)
return list(groups.values())
def get_articles(keyword, page=0, limit=10, order_by=None, descending=True):
"""Get the items in the database and puts them into Article and Grouping objects."""
order_by = "date" if order_by is None else order_by
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT name, link, image_url, fit_x, fit_y, popularity, source, favicon "
"FROM keyword JOIN article ON keyword.article_link = article.link "
"WHERE keyword = ? OR ? GROUP BY article_link ORDER BY ? DESC;",
(keyword, keyword is None, order_by))
items = [item for item in cursor.fetchall()]
num_items = len(items)
if not descending:
items.reverse()
start = limit * page
items = items[start:start + limit]
return {"num": num_items, "articles": [{
"name": item[0], "link": item[1], "image": item[2], "x": item[3], "y": item[4],
"popularity": item[5], "source": item[6], "favicon": item[7]} for item in items]}
| gpl-3.0 | 4,023,521,665,624,149,500 | 51.70068 | 120 | 0.61469 | false | 4.136145 | false | false | false |
andrew-rogers/DSP | GPS/file_reader.py | 1 | 1929 | #!/usr/bin/env python3
"""Global Position System (GPS) file reader for captured IQ signal
The Standard Positioning Service (SPS) spec can be found at
https://www.navcen.uscg.gov/pubs/gps/sigspec/gpssps1.pdf
"""
# Copyright (c) 2021 Andrew Rogers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
class FileReader :
# Data file available from https://sourceforge.net/projects/gnss-sdr/files/data/
def __init__( self, filename='2013_04_04_GNSS_SIGNAL_at_CTTC_SPAIN/2013_04_04_GNSS_SIGNAL_at_CTTC_SPAIN.dat') :
self.offset = 0
self.filename = filename
def read( self, num_samples ) :
data=np.fromfile(self.filename, dtype=np.int16, offset=self.offset, count=num_samples*2)
self.offset = self.offset + 2 * len(data)
# Convert values to complex
data=data.reshape(num_samples,2)
data=np.matmul(data,[1,1j])
return data
| gpl-3.0 | -1,833,778,985,490,122,800 | 39.1875 | 115 | 0.733022 | false | 3.827381 | false | false | false |
gurneyalex/odoo | addons/auth_signup/models/res_partner.py | 4 | 7625 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import random
import werkzeug.urls
from collections import defaultdict
from datetime import datetime, timedelta
from odoo import api, exceptions, fields, models, _
class SignupError(Exception):
pass
def random_token():
# the token has an entropy of about 120 bits (6 bits/char * 20 chars)
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.SystemRandom().choice(chars) for _ in range(20))
def now(**kwargs):
return datetime.now() + timedelta(**kwargs)
class ResPartner(models.Model):
_inherit = 'res.partner'
signup_token = fields.Char(copy=False, groups="base.group_erp_manager")
signup_type = fields.Char(string='Signup Token Type', copy=False, groups="base.group_erp_manager")
signup_expiration = fields.Datetime(copy=False, groups="base.group_erp_manager")
signup_valid = fields.Boolean(compute='_compute_signup_valid', string='Signup Token is Valid')
signup_url = fields.Char(compute='_compute_signup_url', string='Signup URL')
@api.depends('signup_token', 'signup_expiration')
def _compute_signup_valid(self):
dt = now()
for partner, partner_sudo in zip(self, self.sudo()):
partner.signup_valid = bool(partner_sudo.signup_token) and \
(not partner_sudo.signup_expiration or dt <= partner_sudo.signup_expiration)
def _compute_signup_url(self):
""" proxy for function field towards actual implementation """
result = self.sudo()._get_signup_url_for_action()
for partner in self:
if any(u.has_group('base.group_user') for u in partner.user_ids if u != self.env.user):
self.env['res.users'].check_access_rights('write')
partner.signup_url = result.get(partner.id, False)
def _get_signup_url_for_action(self, url=None, action=None, view_type=None, menu_id=None, res_id=None, model=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
res = dict.fromkeys(self.ids, False)
for partner in self:
base_url = partner.get_base_url()
# when required, make sure the partner has a valid signup token
if self.env.context.get('signup_valid') and not partner.user_ids:
partner.sudo().signup_prepare()
route = 'login'
# the parameters to encode for the query
query = dict(db=self.env.cr.dbname)
signup_type = self.env.context.get('signup_force_type_in_url', partner.sudo().signup_type or '')
if signup_type:
route = 'reset_password' if signup_type == 'reset' else signup_type
if partner.sudo().signup_token and signup_type:
query['token'] = partner.sudo().signup_token
elif partner.user_ids:
query['login'] = partner.user_ids[0].login
else:
continue # no signup token, no user, thus no signup url!
if url:
query['redirect'] = url
else:
fragment = dict()
base = '/web#'
if action == '/mail/view':
base = '/mail/view?'
elif action:
fragment['action'] = action
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['res_id'] = res_id
if fragment:
query['redirect'] = base + werkzeug.urls.url_encode(fragment)
url = "/web/%s?%s" % (route, werkzeug.urls.url_encode(query))
if not self.env.context.get('relative_url'):
url = werkzeug.urls.url_join(base_url, url)
res[partner.id] = url
return res
def action_signup_prepare(self):
return self.signup_prepare()
def signup_get_auth_param(self):
""" Get a signup token related to the partner if signup is enabled.
If the partner already has a user, get the login parameter.
"""
if not self.env.user.has_group('base.group_user') and not self.env.is_admin():
raise exceptions.AccessDenied()
res = defaultdict(dict)
allow_signup = self.env['res.users']._get_signup_invitation_scope() == 'b2c'
for partner in self:
partner = partner.sudo()
if allow_signup and not partner.user_ids:
partner.signup_prepare()
res[partner.id]['auth_signup_token'] = partner.signup_token
elif partner.user_ids:
res[partner.id]['auth_login'] = partner.user_ids[0].login
return res
def signup_cancel(self):
return self.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
def signup_prepare(self, signup_type="signup", expiration=False):
""" generate a new token for the partners with the given validity, if necessary
:param expiration: the expiration datetime of the token (string, optional)
"""
for partner in self:
if expiration or not partner.signup_valid:
token = random_token()
while self._signup_retrieve_partner(token):
token = random_token()
partner.write({'signup_token': token, 'signup_type': signup_type, 'signup_expiration': expiration})
return True
@api.model
def _signup_retrieve_partner(self, token, check_validity=False, raise_exception=False):
""" find the partner corresponding to a token, and possibly check its validity
:param token: the token to resolve
:param check_validity: if True, also check validity
:param raise_exception: if True, raise exception instead of returning False
:return: partner (browse record) or False (if raise_exception is False)
"""
partner = self.search([('signup_token', '=', token)], limit=1)
if not partner:
if raise_exception:
raise exceptions.UserError(_("Signup token '%s' is not valid") % token)
return False
if check_validity and not partner.signup_valid:
if raise_exception:
raise exceptions.UserError(_("Signup token '%s' is no longer valid") % token)
return False
return partner
@api.model
def signup_retrieve_info(self, token):
""" retrieve the user info about the token
:return: a dictionary with the user information:
- 'db': the name of the database
- 'token': the token, if token is valid
- 'name': the name of the partner, if token is valid
- 'login': the user login, if the user already exists
- 'email': the partner email, if the user does not exist
"""
partner = self._signup_retrieve_partner(token, raise_exception=True)
res = {'db': self.env.cr.dbname}
if partner.signup_valid:
res['token'] = token
res['name'] = partner.name
if partner.user_ids:
res['login'] = partner.user_ids[0].login
else:
res['email'] = res['login'] = partner.email or ''
return res
| agpl-3.0 | 5,222,862,158,435,340,000 | 42.323864 | 119 | 0.590557 | false | 4.205736 | false | false | false |
importre/kotlin-unwrap | utils/gen.py | 1 | 1264 | #! /usr/bin/env python3
import os
impl = '''
class Unwrap(private var valid: Boolean) {
infix fun <R> nah(f: () -> R) {
if (!valid) f()
}
}
'''
template = '''
inline fun <{0}, R> unwrap(
{1},
block: ({0}) -> R): Unwrap {{
val valid = null !in arrayOf{4}({2})
if (valid) block({3})
return Unwrap(valid = valid)
}}
'''
if __name__ == '__main__':
max = 5
root = os.path.join('src', 'main', 'kotlin', '')
path = [i[0] for i in os.walk(root)
if i[0].endswith(os.sep + 'unwrap')][0].replace(root, '')
codes = ['package {}\n'.format(path.replace(os.sep, '.')), impl]
for iter in range(1, max + 1):
types = ', '.join(['T{}'.format(i + 1) for i in range(iter)])
params = ', '.join(['t{0}: T{0}?'.format(i + 1) for i in range(iter)])
args1 = ', '.join(['t{}'.format(i + 1) for i in range(iter)])
args2 = ', '.join(['t{}!!'.format(i + 1) for i in range(iter)])
arrayType = '<Any?>' if (iter == 1) else ''
code = template.format(types, params, args1, args2, arrayType)
codes.append(code)
filename = os.path.join(root, path, 'Unwrap.kt')
with open(filename, 'w') as fout:
fout.write(''.join(codes).strip() + '\n')
pass
| apache-2.0 | 3,446,911,921,353,584,000 | 29.095238 | 78 | 0.508703 | false | 2.939535 | false | false | false |
josiah-wolf-oberholtzer/supriya | supriya/ugens/dynamics.py | 1 | 3847 | import collections
from supriya import CalculationRate
from supriya.synthdefs import PseudoUGen, UGen
from .delay import DelayN
class Amplitude(UGen):
"""
An amplitude follower.
::
>>> source = supriya.ugens.In.ar(0)
>>> amplitude = supriya.ugens.Amplitude.kr(
... attack_time=0.01, release_time=0.01, source=source,
... )
>>> amplitude
Amplitude.kr()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("attack_time", 0.01), ("release_time", 0.01)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class Compander(UGen):
"""
A general purpose hard-knee dynamics processor.
"""
_ordered_input_names = collections.OrderedDict(
[
("source", None),
("control", 0.0),
("threshold", 0.5),
("slope_below", 1.0),
("slope_above", 1.0),
("clamp_time", 0.01),
("relax_time", 0.1),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class CompanderD(PseudoUGen):
"""
A convenience constructor for Compander.
"""
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
source=None,
threshold=0.5,
clamp_time=0.01,
relax_time=0.1,
slope_above=1.0,
slope_below=1.0,
):
"""
Constructs an audio-rate dynamics processor.
.. container:: example
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> compander_d = supriya.ugens.CompanderD.ar(source=source,)
>>> supriya.graph(compander_d) # doctest: +SKIP
::
>>> print(compander_d)
synthdef:
name: d4e7b88df56af5070a88f09b0f8c633e
ugens:
- In.ar:
bus: 0.0
- DelayN.ar:
delay_time: 0.01
maximum_delay_time: 0.01
source: In.ar[0]
- Compander.ar:
clamp_time: 0.01
control: DelayN.ar[0]
relax_time: 0.1
slope_above: 1.0
slope_below: 1.0
source: In.ar[0]
threshold: 0.5
Returns ugen graph.
"""
control = DelayN.ar(
source=source, maximum_delay_time=clamp_time, delay_time=clamp_time
)
return Compander._new_expanded(
clamp_time=clamp_time,
calculation_rate=CalculationRate.AUDIO,
relax_time=relax_time,
slope_above=slope_above,
slope_below=slope_below,
source=source,
control=control,
threshold=threshold,
)
class Limiter(UGen):
"""
A peak limiter.
::
>>> source = supriya.ugens.In.ar(0)
>>> limiter = supriya.ugens.Limiter.ar(duration=0.01, level=1, source=source,)
>>> limiter
Limiter.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("level", 1), ("duration", 0.01)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class Normalizer(UGen):
"""
A dynamics flattener.
::
>>> source = supriya.ugens.In.ar(0)
>>> normalizer = supriya.ugens.Normalizer.ar(duration=0.01, level=1, source=source,)
>>> normalizer
Normalizer.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("level", 1), ("duration", 0.01)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
| mit | 7,030,170,808,679,921,000 | 24.476821 | 92 | 0.494671 | false | 3.778978 | false | false | false |
CDE-UNIBE/qcat | apps/search/search.py | 1 | 8779 | from functools import lru_cache
from django.conf import settings
from elasticsearch import TransportError
from questionnaire.models import Questionnaire
from .index import get_elasticsearch
from .utils import get_alias, ElasticsearchAlias
es = get_elasticsearch()
def get_es_query(
filter_params: list=None, query_string: str='',
match_all: bool=True) -> dict:
"""
Kwargs:
``filter_params`` (list): A list of filter parameters. Each
parameter is a tuple consisting of the following elements:
[0]: questiongroup
[1]: key
[2]: values (list)
[3]: operator
[4]: type (eg. checkbox / text)
``query_string`` (str): A query string for the full text search.
``match_all`` (bool): Whether the query MUST match all filters or not.
If not all filters must be matched, the results are ordered by relevance
to show hits matching more filters at the top. Defaults to False.
Returns:
``dict``. A dictionary containing the query to be passed to ES.
"""
if filter_params is None:
filter_params = []
es_queries = []
def _get_terms(qg, k, v):
return {
'terms': {
f'filter_data.{qg}__{k}': [v.lower()]
}
}
# Filter parameters: Nested subqueries to access the correct
# questiongroup.
for filter_param in list(filter_params):
if filter_param.type in [
'checkbox', 'image_checkbox', 'select_type', 'select_model',
'radio', 'bool']:
# So far, range operators only works with one filter value. Does it
# even make sense to have multiple of these joined by OR with the
# same operator?
if filter_param.operator in ['gt', 'gte', 'lt', 'lte']:
raise NotImplementedError(
'Filtering by range is not yet implemented.')
else:
if len(filter_param.values) > 1:
matches = [
_get_terms(filter_param.questiongroup,
filter_param.key, v) for v in
filter_param.values]
query = {
'bool': {
'should': matches
}
}
else:
query = _get_terms(
filter_param.questiongroup, filter_param.key,
filter_param.values[0])
es_queries.append(query)
elif filter_param.type in ['text', 'char']:
raise NotImplementedError(
'Filtering by text or char is not yet implemented/supported.')
elif filter_param.type in ['_date']:
raise NotImplementedError('Not yet implemented.')
elif filter_param.type in ['_flag']:
raise NotImplementedError('Not yet implemented.')
elif filter_param.type in ['_lang']:
es_queries.append({
'terms': {
'translations': [filter_param.values]
}
})
elif filter_param.type == '_edition':
es_queries.append({
'terms': {
'serializer_edition': [filter_param.values]
}
})
if query_string:
es_queries.append({
'multi_match': {
'query': get_escaped_string(query_string),
'fields': [
'list_data.name.*^4',
'list_data.definition.*',
'list_data.country'
],
'type': 'cross_fields',
'operator': 'and',
}
})
es_bool = 'must' if match_all is True else 'should'
if query_string == '':
# Default sort: By country, then by score.
sort = [
{
'list_data.country.keyword': {
'order': 'asc'
}
},
'_score',
]
else:
# If a phrase search is done, then only use the score to sort.
sort = ['_score']
return {
'query': {
'bool': {
es_bool: es_queries
}
},
'sort': sort,
}
def advanced_search(
filter_params: list=None, query_string: str='',
configuration_codes: list=None, limit: int=10,
offset: int=0, match_all: bool=True) -> dict:
"""
Kwargs:
``filter_params`` (list): A list of filter parameters. Each
parameter is a tuple consisting of the following elements:
[0]: questiongroup
[1]: key
[2]: values (list)
[3]: operator
[4]: type (eg. checkbox / text)
``query_string`` (str): A query string for the full text search.
``configuration_codes`` (list): An optional list of
configuration codes to limit the search to certain indices.
``limit`` (int): A limit of query results to return.
``offset`` (int): The number of query results to skip.
``match_all`` (bool): Whether the query MUST match all filters or not.
If not all filters must be matched, the results are ordered by relevance
to show hits matching more filters at the top. Defaults to False.
Returns:
``dict``. The search results as returned by
``elasticsearch.Elasticsearch.search``.
"""
query = get_es_query(
filter_params=filter_params, query_string=query_string,
match_all=match_all)
if configuration_codes is None:
configuration_codes = []
alias = get_alias(*ElasticsearchAlias.from_code_list(*configuration_codes))
return es.search(index=alias, body=query, size=limit, from_=offset)
def get_aggregated_values(
questiongroup, key, filter_type, filter_params: list=None,
query_string: str='', configuration_codes: list=None,
match_all: bool=True) -> dict:
if filter_params is None:
filter_params = []
# Remove the filter_param with the current questiongroup and key from the
# list of filter_params
relevant_filter_params = [
f for f in filter_params if
f.questiongroup != questiongroup and f.key != key]
query = get_es_query(
filter_params=relevant_filter_params, query_string=query_string,
match_all=match_all)
# For text values, use the keyword. This does not work for integer values
# (the way boolean values are stored).
# https://www.elastic.co/guide/en/elasticsearch/reference/current/fielddata.html
if filter_type == 'bool':
field = f'filter_data.{questiongroup}__{key}'
else:
field = f'filter_data.{questiongroup}__{key}.keyword'
query.update({
'aggs': {
'values': {
'terms': {
'field': field,
# Limit needs to be high enough to include all values.
'size': 1000,
}
}
},
'size': 0, # Do not include the actual hits
})
alias = get_alias(*ElasticsearchAlias.from_code_list(*configuration_codes))
es_query = es.search(index=alias, body=query)
buckets = es_query.get('aggregations', {}).get('values', {}).get('buckets', [])
return {b.get('key'): b.get('doc_count') for b in buckets}
def get_element(questionnaire: Questionnaire) -> dict:
"""
Get a single element from elasticsearch.
"""
alias = get_alias(
ElasticsearchAlias.from_configuration(configuration=questionnaire.configuration_object)
)
try:
return es.get_source(index=alias, id=questionnaire.pk, doc_type='questionnaire')
except TransportError:
return {}
def get_escaped_string(query_string: str) -> str:
"""
Replace all reserved characters when searching the ES index.
"""
for char in settings.ES_QUERY_RESERVED_CHARS:
query_string = query_string.replace(char, '\\{}'.format(char))
return query_string
@lru_cache(maxsize=1)
def get_indices_alias() -> list:
"""
Return a list of all elasticsearch index aliases. Only ES indices which
start with the QCAT prefix are respected. Editions are stripped away, only the 'type' of the
index / configuration is relevant.
"""
indices = []
for aliases in es.indices.get_alias('*').values():
for alias in aliases.get('aliases', {}).keys():
if settings.ES_INDEX_PREFIX not in alias:
continue
indices.append(alias.replace(settings.ES_INDEX_PREFIX, '').rsplit('_', 1)[0])
return indices
| apache-2.0 | 638,632,226,290,109,800 | 30.579137 | 96 | 0.552569 | false | 4.331031 | true | false | false |
Lindy21/CSE498-LRS | oauth_provider/views.py | 1 | 8387 | from oauth.oauth import OAuthError
from django.conf import settings
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseRedirect, HttpResponseForbidden)
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import get_callable
from django.template import RequestContext
from utils import initialize_server_request, send_oauth_error
from decorators import oauth_required
from stores import check_valid_callback
from consts import OUT_OF_BAND
from django.utils.decorators import decorator_from_middleware
from django.shortcuts import render_to_response
from lrs.forms import AuthClientForm
from lrs.models import Token
OAUTH_AUTHORIZE_VIEW = 'OAUTH_AUTHORIZE_VIEW'
OAUTH_CALLBACK_VIEW = 'OAUTH_CALLBACK_VIEW'
INVALID_PARAMS_RESPONSE = send_oauth_error(OAuthError(
_('Invalid request parameters.')))
def oauth_home(request):
rsp = """
<html><head></head><body><h1>Oauth Authorize</h1></body></html>"""
return HttpResponse(rsp)
def request_token(request):
"""
The Consumer obtains an unauthorized Request Token by asking the Service
Provider to issue a Token. The Request Token's sole purpose is to receive
User approval and can only be used to obtain an Access Token.
"""
# If oauth is not enabled, don't initiate the handshake
if settings.OAUTH_ENABLED:
oauth_server, oauth_request = initialize_server_request(request)
if oauth_server is None:
return INVALID_PARAMS_RESPONSE
try:
# create a request token
token = oauth_server.fetch_request_token(oauth_request)
# return the token
response = HttpResponse(token.to_string(), mimetype="text/plain")
except OAuthError, err:
response = send_oauth_error(err)
return response
else:
return HttpResponseBadRequest("OAuth is not enabled. To enable, set the OAUTH_ENABLED flag to true in settings")
# tom c added login_url
@login_required(login_url="/XAPI/accounts/login")
def user_authorization(request):
"""
The Consumer cannot use the Request Token until it has been authorized by
the User.
"""
oauth_server, oauth_request = initialize_server_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
try:
# get the request token
token = oauth_server.fetch_request_token(oauth_request)
# tom c .. we know user.. save it
token.user = request.user
token.save()
except OAuthError, err:
return send_oauth_error(err)
try:
# get the request callback, though there might not be one
callback = oauth_server.get_callback(oauth_request)
# OAuth 1.0a: this parameter should not be present on this version
if token.callback_confirmed:
return HttpResponseBadRequest("Cannot specify oauth_callback at authorization step for 1.0a protocol")
if not check_valid_callback(callback):
return HttpResponseBadRequest("Invalid callback URL")
except OAuthError:
callback = None
# OAuth 1.0a: use the token's callback if confirmed
if token.callback_confirmed:
callback = token.callback
if callback == OUT_OF_BAND:
callback = None
# entry point for the user
if request.method == 'GET':
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
authorize_view = get_callable(authorize_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % authorize_view_str
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = token.key
return authorize_view(request, token, callback, params)
# user grant access to the service
if request.method == 'POST':
# verify the oauth flag set in previous GET
if request.session.get('oauth', '') == token.key:
request.session['oauth'] = ''
try:
form = AuthClientForm(request.POST)
if form.is_valid():
if int(form.cleaned_data.get('authorize_access', 0)):
# authorize the token
token = oauth_server.authorize_token(token, request.user)
# return the token key
s = form.cleaned_data.get('scopes', '')
if isinstance(s, (list, tuple)):
s = ",".join([v.strip() for v in s])
# changed scope, gotta save
if s:
token.scope = s
token.save()
args = { 'token': token }
else:
args = { 'error': _('Access not granted by user.') }
else:
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
authorize_view = get_callable(authorize_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % authorize_view_str
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = token.key
return authorize_view(request, token, callback, params, form)
except OAuthError, err:
response = send_oauth_error(err)
if callback:
if "?" in callback:
url_delimiter = "&"
else:
url_delimiter = "?"
if 'token' in args:
query_args = args['token'].to_string(only_key=True)
else: # access is not authorized i.e. error
query_args = 'error=%s' % args['error']
response = HttpResponseRedirect('%s%s%s' % (callback, url_delimiter, query_args))
else:
# try to get custom callback view
callback_view_str = getattr(settings, OAUTH_CALLBACK_VIEW,
'oauth_provider.views.fake_callback_view')
try:
callback_view = get_callable(callback_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % callback_view_str
response = callback_view(request, **args)
else:
response = send_oauth_error(OAuthError(_('Action not allowed.')))
return response
def access_token(request):
"""
The Consumer exchanges the Request Token for an Access Token capable of
accessing the Protected Resources.
"""
oauth_server, oauth_request = initialize_server_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
try:
# get the request token
token = oauth_server.fetch_access_token(oauth_request)
# return the token
response = HttpResponse(token.to_string(), mimetype="text/plain")
except OAuthError, err:
response = send_oauth_error(err)
return response
def authorize_client(request, token=None, callback=None, params=None, form=None):
if not form:
form = AuthClientForm(initial={'scopes': token.scope_to_list(),
'obj_id': token.pk})
d = {}
d['form'] = form
d['name'] = token.consumer.name
d['description'] = token.consumer.description
d['params'] = params
return render_to_response('oauth_authorize_client.html', d, context_instance=RequestContext(request))
def callback_view(request, **args):
d = {}
if 'error' in args:
d['error'] = args['error']
d['verifier'] = args['token'].verifier
return render_to_response('oauth_verifier_pin.html', args, context_instance=RequestContext(request))
| apache-2.0 | 868,409,682,363,586,200 | 41.573604 | 120 | 0.595088 | false | 4.585566 | false | false | false |
joshuaunderwood7/HaskeLinGeom | pysrc/LG/Board.py | 1 | 2993 | def indexToLocation(x):
return ( (8-(x%8)) , (int(x/8)+1) )
class Location:
def __init__(self, x=1, y=1, z=1):
self.x = x
self.y = y
self.z = z
def parseStr(self, inStr):
inStr = inStr[1:-1]
inStr = inStr.split(',')
self.x = int(inStr[0])
self.y = int(inStr[1])
self.z = int(inStr[2])
def arrayShift(self):
self.x-=1
self.y-=1
self.z-=1
return self
def shiftBack(self):
self.x+=1
self.y+=1
self.z+=1
return self
def __repr__(self):
return '(' + str(self.x) + ', ' + \
str(self.y) + ', ' + \
str(self.z) + ')'
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
class Board:
def __init__(self, minx=1, maxx=1, miny=1, maxy=1, minz=1, maxz=1):
"""Default board is 1x1x1 and filled with #'s"""
self.minX = minx
self.maxX = maxx
self.minY = miny
self.maxY = maxy
self.minZ = minz
self.maxZ = maxz
self.locations = set()
for loc in [ (x,y,z) for z in range(minz, maxz+1) for y in range(miny, maxy+1) for x in range(minx, maxx+1)]:
self.locations.add(loc)
def fill(self, locations):
"""give a Location to assign to each square"""
self.locations.union(locations)
return self
def canAccess(self, location):
return (location in self.locations)
def get(self, location):
if self.canAccess(location):
return '#'
return ''
def set(self, location):
self.locations.add(location)
return self
def rangeOfX(self):
"""Return an eager list of X values"""
return range(self.minX, self.maxX+1)
def rangeOfY(self):
"""Return an eager list of Y values"""
return range(self.minY, self.maxY+1)
def rangeOfZ(self):
"""Return an eager list of Z values"""
return range(self.minZ, self.maxZ+1)
def __repr__(self):
returnString = "loacations = set("
for loc in self.locations:
returnString += srt(loc) + ", "
returnString += ")"
return returnString
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def getDistanceboard(self):
return Board(maxx=((self.maxX*2)-1), maxy=((self.maxY*2)-1), maxz=((self.maxZ*2)-1))
def middle(self):
"""Only returns approximate middle of the distance Board"""
return Location(self.maxX, self.maxY, self.maxZ)
chessboard = Board(maxx= 8, maxy=8)
distanceboard = chessboard.getDistanceboard()
chessboard3D = Board(maxx= 8, maxy=8, maxz=8)
| gpl-3.0 | -2,676,868,010,432,471,000 | 26.712963 | 117 | 0.539926 | false | 3.289011 | false | false | false |
i19870503/i19870503 | Python/eggnog2go_anno.py | 1 | 2591 | import os
import re
import pandas as pd
import string
import itertools
import numpy as np
import sys
import argparse
from collections import OrderedDict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create GO annotation and enrichment file')
parser.add_argument('-i',type=str,dest='infile',required=True,help="Input file")
parser.add_argument('-o',type=str,dest='out',required=True,help="Ouput file")
parser.add_argument('-db',type=str,dest='db',required=True,help="GO Database file")
args = parser.parse_args()
print (args)
def sort_uniq(sequence):
return (x[0] for x in itertools.groupby(sorted(sequence)))
path = "/home/zluna/Work/GO"
fout = open(args.out+"_anno.xls", 'w')
print("Gene_id", "GO_annotation", sep = '\t', file = fout)
go_db = pd.read_table(os.path.join(path, args.db), header = None)
eggout = pd.read_table(os.path.join(path, args.infile), header = None)
#pd.DataFrame.head(eggout)
#eggout.head(100)
dict = OrderedDict()
first_flag = 1
a = list(go_db[0])
for i in range(len(eggout)):
gene_id = eggout[0][i]
go_id = eggout[5][i]
if pd.isnull(eggout[5][i]):
go_id = ''
#print(gene_id, kegg_id, type(kegg_id), sep ='\t')
go_id = go_id.split(',')
if len(go_id) == 0:
continue
go_term = '; '.join(list(go_db[go_db[2].isin(go_id)][0]))
#print(gene_id, go_id, go_term, sep ='\t')
go_sum = []
sel_go_table = go_db[go_db[2].isin(go_id)]
for j in range(len(sel_go_table)):
go_sum.append(''.join(( list(sel_go_table[2])[j], "~", list(sel_go_table[0])[j])))
print(gene_id, str(go_sum).strip('[]').replace(']','').replace("'","").replace(", ","; "), sep = '\t', file = fout)
a = list(go_db[2])
### Use dictionary
for k in range(len(a)):
if str(go_sum).find(a[k]) != -1 :
if a[k] not in dict.keys():
### The value must be list type, if just give the 'gene_id' as the value of key, it can not use 'append' method to add the new 'gene_id' to the existing key.
dict[a[k]] = []
dict[a[k]].append(gene_id)
else:
dict[a[k]].append(gene_id)
#dict[a[j]] = [dict[a[j]], gene_id]
fout.close()
fout2 = open(args.out+"_enrich.xls", 'w')
print('GOID', 'Term', 'Genes', 'Gene_count', sep = '\t', file = fout2)
for key,values in dict.items():
print(key, list(go_db[go_db[2] == key][0]), str(values).strip('[]').replace(']','').replace("'",""), len(values), sep ='\t', file = fout2)
fout2.cloes()
| gpl-2.0 | -6,269,192,895,930,068,000 | 37.102941 | 157 | 0.580085 | false | 2.914511 | false | false | false |
rcwoolley/device-cloud-python | device_cloud/osal.py | 1 | 3073 | '''
Copyright (c) 2016-2017 Wind River Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, either express or implied.
'''
"""
Operating System Abstraction Layer (OSAL). This module provides abstractions of
functions that are different on different operating systems.
"""
import os
import platform
import subprocess
import sys
# Constants
NOT_SUPPORTED = -20
EXECUTION_FAILURE = -21
BAD_PARAMETER = -22
# Setup platform info statics
WIN32 = sys.platform.startswith('win32')
LINUX = sys.platform.startswith('linux')
MACOS = sys.platform.startswith('darwin')
POSIX = LINUX or MACOS
OTHER = not POSIX and not WIN32
# Define Functions
def execl(*args):
"""
Replaces the current process with a new instance of the specified
executable. This function will only return if there is an issue starting the
new instance, in which case it will return false. Otherwise, it will not
return.
"""
retval = EXECUTION_FAILURE
if POSIX:
os.execvp(args[0], args)
elif WIN32:
os.execvp(sys.executable, args)
else:
retval = NOT_SUPPORTED
return retval
def os_kernel():
"""
Get the operating system's kernel version
"""
ker = "Unknown"
if LINUX:
ker = platform.release()
elif WIN32 or MACOS:
ker = platform.version()
return ker
def os_name():
"""
Get the operating system name
"""
name = "Unknown"
if LINUX:
distro = platform.linux_distribution()
plat = subprocess.check_output(["uname", "-o"])[:-1].decode()
name = "{} ({})".format(distro[0], plat)
elif WIN32:
name = platform.system()
elif MACOS:
name = "macOS"
return name
def os_version():
"""
Get the operating system version
"""
ver = "Unknown"
if LINUX:
distro = platform.linux_distribution()
ver = "{}-{}".format(distro[1], distro[2])
elif WIN32:
ver = platform.release()
elif MACOS:
ver = platform.mac_ver()[0]
return ver
def system_reboot(delay=0, force=True):
"""
Reboot the system.
"""
return system_shutdown(delay=delay, reboot=True, force=force)
def system_shutdown(delay=0, reboot=False, force=True):
"""
Run the system shutdown command. Can be used to reboot the system.
"""
command = "shutdown "
if POSIX:
command += "-r " if reboot else "-h "
command += "now " if delay == 0 else "+{} ".format(delay)
elif WIN32:
command += "/r " if reboot else "/s "
command += "/t {} ".format(delay*60)
command += "/f" if force else ""
else:
return NOT_SUPPORTED
return os.system(command)
| apache-2.0 | 6,464,276,360,981,115,000 | 25.491379 | 84 | 0.633257 | false | 3.909669 | false | false | false |
dnarvaez/virtualenv-bootstrap | bootstrap.py | 1 | 4429 | #!/usr/bin/env python3
# Copyright 2013 Daniel Narvaez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is from https://github.com/dnarvaez/virtualenv-bootstrap
import hashlib
import json
import os
import shutil
import subprocess
import sys
import tarfile
import urllib.request
base_dir = os.path.dirname(os.path.abspath(__file__))
environ_namespace = "TEST"
start_message = "Installing virtualenv"
end_message = "\n"
packages = ["osourcer"]
submodules = []
virtualenv_version = "1.8.4"
virtualenv_dir = "sandbox"
cache_dir = "cache"
run_module = "osourcer.tool"
etag = "1"
def get_cache_dir():
return os.path.join(base_dir, cache_dir)
def get_virtualenv_dir():
return os.path.join(base_dir, virtualenv_dir)
def get_stamp_path():
return get_virtualenv_dir() + ".stamp"
def get_bin_path(name):
return os.path.join(get_virtualenv_dir(), "bin", name)
def create_virtualenv():
source_dir = os.path.join(get_cache_dir(),
"virtualenv-%s" % virtualenv_version)
if not os.path.exists(source_dir):
url = "https://pypi.python.org/packages/source/v/" \
"virtualenv/virtualenv-%s.tar.gz" % virtualenv_version
f = urllib.request.urlopen(url)
with tarfile.open(fileobj=f, mode="r:gz") as tar:
tar.extractall(get_cache_dir())
subprocess.check_call(["python3",
os.path.join(source_dir, "virtualenv.py"),
"-q", get_virtualenv_dir()])
def get_submodule_dirs():
return [os.path.join(base_dir, submodule) for submodule in submodules]
def install_packages():
args = [get_bin_path("pip"), "-q", "install"]
args.extend(packages)
args.extend(get_submodule_dirs())
subprocess.check_call(args)
def upgrade_submodules():
args = [get_bin_path("pip"), "-q", "install", "--no-deps", "--upgrade"]
args.extend(get_submodule_dirs())
subprocess.check_call(args)
def compute_submodules_hash():
data = ""
for submodule in submodules:
for root, dirs, files in os.walk(os.path.join(base_dir, submodule)):
for name in files:
path = os.path.join(root, name)
mtime = os.lstat(path).st_mtime
data = "%s%s %s\n" % (data, mtime, path)
return hashlib.sha256(data.encode("utf-8")).hexdigest()
def check_stamp():
try:
with open(get_stamp_path()) as f:
stamp = json.load(f)
except (IOError, ValueError):
return True, True
return (stamp["etag"] != etag,
stamp["submodules_hash"] != compute_submodules_hash())
def write_stamp():
stamp = {"etag": etag,
"submodules_hash": compute_submodules_hash()}
with open(get_stamp_path(), "w") as f:
json.dump(stamp, f)
def update_submodules():
update = os.environ.get(environ_namespace + "_UPDATE_SUBMODULES", "yes")
if update != "yes":
return
os.chdir(base_dir)
for module in submodules:
subprocess.check_call(["git", "submodule", "update", "--init",
module])
def main():
os.environ["PIP_DOWNLOAD_CACHE"] = get_cache_dir()
os.environ[environ_namespace + "_BASE_DIR"] = base_dir
os.environ[environ_namespace + "_VIRTUALENV"] = get_virtualenv_dir()
etag_changed, submodules_changed = check_stamp()
if etag_changed:
print(start_message)
update_submodules()
try:
shutil.rmtree(get_virtualenv_dir())
except OSError:
pass
create_virtualenv()
install_packages()
write_stamp()
print(end_message)
elif submodules_changed:
upgrade_submodules()
write_stamp()
args = [get_bin_path("python3"), "-m", run_module]
if len(sys.argv) > 1:
args.extend(sys.argv[1:])
os.execl(args[0], *args)
if __name__ == "__main__":
main()
| apache-2.0 | 4,880,690,577,747,265,000 | 24.601156 | 76 | 0.621585 | false | 3.592052 | false | false | false |
CCI-Tools/cate-core | cate/ops/index.py | 1 | 8641 |
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
Index calculation operations
Functions
=========
"""
import xarray as xr
import pandas as pd
from cate.core.op import op, op_input
from cate.ops.select import select_var
from cate.ops.subset import subset_spatial
from cate.ops.anomaly import anomaly_external
from cate.core.types import PolygonLike, VarName, ValidationError
from cate.util.monitor import Monitor
_ALL_FILE_FILTER = dict(name='All Files', extensions=['*'])
@op(tags=['index'])
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('var', value_set_source='ds', data_type=VarName)
def enso_nino34(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate nino34 index, which is defined as a five month running mean of
anomalies of monthly means of SST data in Nino3.4 region:: lon_min=-170
lat_min=-5 lon_max=-120 lat_max=5.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable (geophysial quantity) to use for index
calculation.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset according to the given
threshold. Where anomaly larger than the positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
n34 = '-170, -5, -120, 5'
name = 'ENSO N3.4 Index'
return _generic_index_calculation(ds, var, n34, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('region', value_set=['N1+2', 'N3', 'N34', 'N4', 'custom'])
@op_input('custom_region', data_type=PolygonLike)
def enso(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
region: str = 'n34',
custom_region: PolygonLike.TYPE = None,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ENSO index, which is defined as a five month running mean of
anomalies of monthly means of SST data in the given region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param region: Region for index calculation, the default is Nino3.4
:param custom_region: If 'custom' is chosen as the 'region', this parameter
has to be provided to set the desired region.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
regions = {'N1+2': '-90, -10, -80, 0',
'N3': '-150, -5, -90, 5',
'N3.4': '-170, -5, -120, 5',
'N4': '160, -5, -150, 5',
'custom': custom_region}
converted_region = PolygonLike.convert(regions[region])
if not converted_region:
raise ValidationError('No region has been provided to ENSO index calculation')
name = 'ENSO ' + region + ' Index'
if 'custom' == region:
name = 'ENSO Index over ' + PolygonLike.format(converted_region)
return _generic_index_calculation(ds, var, converted_region, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
def oni(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ONI index, which is defined as a three month running mean of
anomalies of monthly means of SST data in the Nino3.4 region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that containts the index timeseries
"""
n34 = '-170, -5, -120, 5'
name = 'ONI Index'
return _generic_index_calculation(ds, var, n34, 3, file, name, threshold, monitor)
def _generic_index_calculation(ds: xr.Dataset,
var: VarName.TYPE,
region: PolygonLike.TYPE,
window: int,
file: str,
name: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
A generic index calculation. Where an index is defined as an anomaly
against the given reference of a moving average of the given window size of
the given given region of the given variable of the given dataset.
:param ds: Dataset from which to calculate the index
:param var: Variable from which to calculate index
:param region: Spatial subset from which to calculate the index
:param window: Window size for the moving average
:param file: Path to the reference file
:param threshold: Absolute threshold that indicates an ENSO event
:param name: Name of the index
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries
"""
var = VarName.convert(var)
region = PolygonLike.convert(region)
with monitor.starting("Calculate the index", total_work=2):
ds = select_var(ds, var)
ds_subset = subset_spatial(ds, region)
anom = anomaly_external(ds_subset, file, monitor=monitor.child(1))
with monitor.child(1).observing("Calculate mean"):
ts = anom.mean(dim=['lat', 'lon'])
df = pd.DataFrame(data=ts[var].values, columns=[name], index=ts.time.values)
retval = df.rolling(window=window, center=True).mean().dropna()
if threshold is None:
return retval
retval['El Nino'] = pd.Series((retval[name] > threshold),
index=retval.index)
retval['La Nina'] = pd.Series((retval[name] < -threshold),
index=retval.index)
return retval
| mit | 3,199,853,828,415,307,300 | 43.312821 | 110 | 0.671566 | false | 3.967401 | false | false | false |
los-cocos/etc_code | cocos#248--RectMapCollider, player sometimes stuck/start.py | 1 | 6979 | """
A script to demo a defect in RectMapCollider, initial report by Netanel at
https://groups.google.com/forum/#!topic/cocos-discuss/a494vcH-u3I
The defect is that the player gets stuck at some positions, and it was confirmed
for cocos master Aug 1, 2015 (292ae676) and cocos-0.6.3-release, see cocos #248
The package 'blinker' (available from pipy) is needed to run this script
Further investigation shows that this happens when both of this concur
1. the player actively pushes against a blocking surface
2. player rect alligns with the grid tile.
changes from the OP bugdemo code:
lines irrelevant to the bug removed
changed player controls
added a view to show the potentially colliding cells that RectMapCollider
will consider (sin as red rectangle overlapping the player)
player pic edited to make visible the actual player boundary
Controlling the player:
use left-right for horizontal move, must keep pressing to move
use up-down to move vertical; a press adds/substracts up to y-velocity
Demoing the bug:
1. move to touch the left wall.
2. release 'left' key
3. move up and down, this works
4. keep pressed the 'left' key, and try to move down: player gets stuck
at some alineations
scene
background
scroller=ScrollingManager
tilemap <- load(...)['map0']
layer=Game (a ScrollableLayer)
sprite
particles
potential collisions view, ShowCollision
"""
from __future__ import division, print_function
from cocos.particle_systems import *
from cocos.particle import Color
from cocos.text import Label
from cocos.tiles import load, RectMapLayer
from cocos.mapcolliders import RectMapWithPropsCollider
from cocos.layer import Layer, ColorLayer, ScrollingManager, ScrollableLayer
from cocos.sprite import Sprite
from cocos.actions import *
from cocos.scene import Scene
from cocos.director import director
from pyglet.window import key
from pyglet.window.key import symbol_string, KeyStateHandler
from menu import GameMenu
import blinker
director.init(width=1920, height=480, autoscale = True, resizable = True)
Map = load("mapmaking.tmx")
scroller = ScrollingManager()
tilemap = Map['map0']
assert tilemap.origin_x == 0
assert tilemap.origin_y == 0
class Background(ColorLayer):
def __init__(self):
super(Background, self).__init__(65,120,255,255)
class ShowCollision(ScrollableLayer):
"""
A layer to show the cells a RectMapCollider considers potentially
colliding with the 'new' rect.
Use with CustomRectMapCollider so the event of interest is published
"""
def __init__(self):
super(ShowCollision, self).__init__()
self.collision_view = []
for i in range(10):
self.collision_view.append(ColorLayer(255, 0, 0, 255, width=64, height=64))
for e in self.collision_view:
self.add(e)
signal = blinker.signal("collider cells")
signal.connect(self.on_collision_changed)
def on_collision_changed(self, sender, payload=None):
for cell, view in zip(payload, self.collision_view):
view.position = (cell.i * 64, cell.j * 64)
view.opacity = 140
for i in range(len(payload), len(self.collision_view)):
self.collision_view[i].opacity = 0
class Game(ScrollableLayer):
is_event_handler = True
def __init__(self):
super(Game, self).__init__()
self.score = 0
# Add player
self.sprite = Sprite('magic.png')
self.sprite.position = 320, 240
self.sprite.direction = "right"
self.sprite.dx = 0
self.sprite.dy = 0
self.add(self.sprite, z=1)
# A list of balls
self.balls = set()
# Teleportation counter
self.teleportation = 0
self.sprite.jump = 0
def on_key_press(self, inp, modifers):
if symbol_string(inp) == "LEFT":
self.sprite.dx -= 3
print("press left, dx:", self.sprite.dx)
if symbol_string(inp) == "RIGHT":
self.sprite.dx += 3
print("press right, dx:", self.sprite.dx)
if symbol_string(inp) == "UP":
self.sprite.dy += 3
if self.sprite.dy > 6:
self.sprite.dy = 6
print("press up, dy:", self.sprite.dy)
if symbol_string(inp) == "DOWN":
self.sprite.dy -= 3
if self.sprite.dy < -6:
self.sprite.dy = -6
print("press down, dy:", self.sprite.dy)
def on_key_release(self, inp, modifers):
if symbol_string(inp) == "LEFT":
self.sprite.dx = 0
print("release left, dx:", self.sprite.dx)
if symbol_string(inp) == "RIGHT":
self.sprite.dx = 0
print("release right, dx:", self.sprite.dx)
class SpyCollider(RectMapWithPropsCollider):
"""
Same as RectMapWithPropsCollider, except it publishes which cells will be considered
for collision.
Usage:
# istantiate
a = SpyCollider()
# set the behavior for velocity change on collision with
# a.on_bump_handler = a.on_bump_slide
# add the signal we want to emit
a.signal = blinker.signal("collider cells")
# use as stock RectMapCollider
# catch the signal with something like ShowCollision
"""
def collide_map(self, maplayer, last, new, vx, vy):
"""collide_map en dos pasadas; """
objects = maplayer.get_in_region(*(new.bottomleft + new.topright))
self.signal.send(payload=objects)
return super(SpyCollider, self).collide_map(maplayer, last, new, vx, vy)
layer = Game()
collider = SpyCollider()
collider.on_bump_handler = collider.on_bump_slide
collider.signal = blinker.signal("collider cells")
#collider = RectMapCollider()
# WARN: this was hacked for bugdemo purposes only; don't use in real code:
# lots of globals
# position delta must use dt, else unpredictable view velocity
def update(dt):
""" Update game"""
last = layer.sprite.get_rect()
new = last.copy()
new.x += layer.sprite.dx
new.y += layer.sprite.dy
# dont care about velocity, pass 0, 0
collider.collide_map(tilemap, last, new, 0.0, 0.0)
layer.sprite.position = new.center
scroller.set_focus(*new.center)
# Schedule Updates
layer.schedule(update)
# Add map to scroller
scroller.add(tilemap)
#Create Scene
scene = Scene()
# Create and add background
background = Background()
scene.add(background)
#Add main layer to scroller
scroller.add(layer)
scroller.add(ShowCollision())
# Add scroller to scene
scene.add(scroller)
# Game menu configuration
menu = GameMenu(scene)
menuScene = Scene()
menuScene.add(menu)
director.run(menuScene)
| mit | 2,921,396,511,402,230,300 | 29.609649 | 91 | 0.640923 | false | 3.638686 | false | false | false |
mozman/ezdxf | tests/test_01_dxf_entities/test_131_field_list.py | 1 | 2369 | # Copyright (c) 2019 Manfred Moitzi
# License: MIT License
from typing import cast
import pytest
import ezdxf
from ezdxf.entities.idbuffer import FieldList
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
FIELDLIST = """0
FIELDLIST
5
0
102
{ACAD_REACTORS
330
0
102
}
330
0
100
AcDbIdSet
90
12
100
AcDbFieldList
"""
@pytest.fixture
def entity():
return FieldList.from_text(FIELDLIST)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'FIELDLIST' in ENTITY_CLASSES
def test_default_init():
entity = FieldList()
assert entity.dxftype() == 'FIELDLIST'
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = FieldList.new(handle='ABBA', owner='0', dxfattribs={
})
assert entity.dxf.flags == 0
assert len(entity.handles) == 0
def test_load_from_text(entity):
assert entity.dxf.flags == 12
assert len(entity.handles) == 0
def test_write_dxf():
entity = FieldList.from_text(FIELDLIST)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(FIELDLIST)
assert result == expected
@pytest.fixture(scope='module')
def doc():
return ezdxf.new('R2007')
def test_generic_field_list(doc):
field_list = doc.objects.new_entity('FIELDLIST', {})
assert field_list.dxftype() == 'FIELDLIST'
assert len(field_list.handles) == 0
def test_set_get_field_list(doc):
field_list = doc.objects.new_entity('FIELDLIST', {})
assert field_list.dxftype() == 'FIELDLIST'
field_list.handles = ['FF', 'EE', 'DD']
handles = field_list.handles
assert len(handles) == 3
assert handles == ['FF', 'EE', 'DD']
handles.append('FFFF')
assert handles[-1] == 'FFFF'
def test_dxf_tags(doc):
buffer = cast(FieldList, doc.objects.new_entity('FIELDLIST', {}))
buffer.handles = ['FF', 'EE', 'DD', 'CC']
tags = TagCollector.dxftags(buffer)[-4:]
assert len(tags) == 4
assert tags[0] == (330, 'FF')
assert tags[-1] == (330, 'CC')
def test_clone(doc):
buffer = cast(FieldList, doc.objects.new_entity('FIELDLIST', {}))
buffer.handles = ['FF', 'EE', 'DD', 'CC']
buffer2 = cast(FieldList, buffer.copy())
buffer2.handles[-1] = 'ABCD'
assert buffer.handles[:-1] == buffer2.handles[:-1]
assert buffer.handles[-1] != buffer2.handles[-1]
| mit | -7,176,212,482,512,180,000 | 21.561905 | 69 | 0.662727 | false | 3.133598 | true | false | false |
krautradio/PyRfK | lib/rfk/database/base.py | 1 | 14479 | import time
import hashlib
from datetime import timedelta
from passlib.hash import bcrypt
from sqlalchemy import *
from sqlalchemy.orm import relationship, backref, exc
from sqlalchemy.dialects.mysql import INTEGER as Integer
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.sql.expression import case
import re
import os
from flask.ext.login import AnonymousUserMixin
import rfk.database
from rfk.types import SET, ENUM
from rfk import exc as rexc
from rfk import CONFIG
from rfk.database import Base, UTCDateTime
from rfk.database.show import UserShow, Show
from rfk.helper import now, get_path
class Anonymous(AnonymousUserMixin):
def __init__(self):
AnonymousUserMixin.__init__(self)
self.locale = 'de'
self.timezone = 'Europe/Berlin'
def get_locale(self):
return self.locale
def get_timezone(self):
return self.timezone
def has_permission(self, code=None, permission=None):
return False
class User(Base):
__tablename__ = 'users'
user = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
username = Column(String(50), unique=True)
password = Column(String(64))
mail = Column(String(255))
country = Column(String(3))
register_date = Column(UTCDateTime, default=now)
last_login = Column(UTCDateTime, default=None)
def get_id(self):
return unicode(self.user)
def is_anonymous(self):
return False
def is_active(self):
return True
def is_authenticated(self):
return True
def get_locale(self):
return self.get_setting(code='locale')
def get_timezone(self):
return self.get_setting(code='timezone')
@staticmethod
def authenticate(username, password):
"""shorthand function for authentication a user
returns the user object
Keyword arguments:
username -- username
password -- unencrypted password
"""
user = User.get_user(username=username)
if user.check_password(password):
return user
else:
raise rexc.base.InvalidPasswordException()
@staticmethod
def get_user(id=None, username=None):
assert id or username
try:
if username is None:
return User.query.filter(User.user == id).one()
else:
return User.query.filter(User.username == username).one()
except exc.NoResultFound:
raise rexc.base.UserNotFoundException
@staticmethod
def check_username(username):
if re.match('^[0-9a-zA-Z_-]{3,}$', username) is None:
return False
else:
return True
@staticmethod
def make_password(password):
return bcrypt.encrypt(password)
@staticmethod
def add_user(username, password):
if not User.check_username(username):
raise rexc.base.InvalidUsernameException
try:
User.query.filter(User.username == username).one()
raise rexc.base.UserNameTakenException()
except exc.NoResultFound:
user = User(username=username, password=User.make_password(password))
rfk.database.session.add(user)
rfk.database.session.flush()
return user
def check_password(self, password):
try:
return bcrypt.verify(password, self.password)
except ValueError:
if hashlib.sha1(password).hexdigest() == self.password:
self.password = User.make_password(password)
return True
else:
return False
def add_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return False
except exc.NoResultFound:
self.permissions.append(UserPermission(permission))
return True
def has_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return True
except exc.NoResultFound:
return False
def get_setting(self, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
try:
us = UserSetting.query.filter(UserSetting.user == self,
UserSetting.setting == setting).one()
return us.get_value()
except exc.NoResultFound:
return None
def set_setting(self, value, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
UserSetting.set_value(self, setting, value)
rfk.database.session.flush()
def get_total_streamtime(self):
"""Returns a timedelta Object with the users total time streamed"""
try:
return timedelta(seconds= float(rfk.database.session
.query( func.sum( func.time_to_sec( func.timediff(Show.end,Show.begin) ) ) ) \
.join(UserShow).filter(UserShow.status == UserShow.STATUS.STREAMED,
UserShow.user == self).first()[0]))
except TypeError:
return timedelta(seconds=0)
def __repr__(self):
return "<USER username={0}>".format(self.username)
class Setting(Base):
__tablename__ = 'settings'
setting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
val_type = Column(Integer(unsigned=True))
TYPES = ENUM(['INT', 'STR'])
@staticmethod
def get_setting(code):
return Setting.query.filter(Setting.code == code).one()
@staticmethod
def add_setting(code, name, val_type):
try:
return Setting.query.filter(Setting.code == code).one()
except exc.NoResultFound:
return Setting(code=code, name=name, val_type=val_type)
class UserSetting(Base):
__tablename__ = 'user_settings'
userSetting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('settings'))
setting_id = Column("setting", Integer(unsigned=True),
ForeignKey('settings.setting',
onupdate="CASCADE",
ondelete="RESTRICT"))
setting = relationship("Setting")
val_int = Column(Integer)
val_str = Column(String(255))
def get_value(self):
if self.setting.val_type == Setting.TYPES.INT:
return self.val_int
elif self.setting.val_type == Setting.TYPES.STR:
return self.val_str
@staticmethod
def set_value(user, setting, value):
if value == True:
value = 1
elif value == False:
value = 0
try:
us = UserSetting.query.filter(UserSetting.user == user,
UserSetting.setting == setting).one()
except exc.NoResultFound:
us = UserSetting(user=user, setting=setting)
if us.setting.val_type == Setting.TYPES.INT:
us.val_int = value
elif us.setting.val_type == Setting.TYPES.STR:
us.val_str = value
class Permission(Base):
__tablename__ = 'permissions'
permission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
@staticmethod
def get_permission(code):
return Permission.query.filter(Permission.code == code).one()
@staticmethod
def add_permission(code, name):
try:
return Permission.query.filter(Permission.code == code).one()
except exc.NoResultFound:
return Permission(code=code, name=name)
class UserPermission(Base):
__tablename__ = 'user_permissions'
userPermission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('permissions', cascade="all, delete-orphan"))
permission_id = Column("permission", Integer(unsigned=True),
ForeignKey('permissions.permission',
onupdate="CASCADE",
ondelete="RESTRICT"))
permission = relationship("Permission", backref=backref('users', cascade="all, delete-orphan"))
def __init__(self, permission):
self.permission = permission
class Ban(Base):
__tablename__ = 'bans'
ban = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('bans'))
range = Column(String(50))
expiration = Column(UTCDateTime)
class News(Base):
__tablename__ = 'news'
news = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
time = Column(UTCDateTime, default=now())
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User")
title = Column(String(255))
content = Column(Text)
class ApiKey(Base):
__tablename__ = 'apikeys'
apikey = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref="apikeys")
key = Column(String(128))
counter = Column(Integer(unsigned=True), default=0)
access = Column(UTCDateTime, default=now())
application = Column(String(128))
description = Column(String(255))
flag = Column(Integer(unsigned=True), default=0)
FLAGS = SET(['DISABLED', 'FASTQUERY', 'KICK', 'BAN', 'AUTH'])
def gen_key(self):
c = 0
while True:
key = hashlib.sha1("%s%s%d%d" % (self.application, self.description, time.time(), c)).hexdigest()
if ApiKey.query.filter(ApiKey.key == key).first() == None:
break
self.key = key
@staticmethod
def check_key(key):
try:
apikey = ApiKey.query.filter(ApiKey.key == key).one()
except (exc.NoResultFound, exc.MultipleResultsFound):
raise rexc.api.KeyInvalidException()
if apikey.flag & ApiKey.FLAGS.DISABLED:
raise rexc.api.KeyDisabledException()
elif not apikey.flag & ApiKey.FLAGS.FASTQUERY:
if now() - apikey.access <= timedelta(seconds=1):
raise rexc.api.FastQueryException(last_access=apikey.access)
apikey.counter += 1
apikey.access = now()
return apikey
class Log(Base):
__tablename__ = 'log'
log = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
timestamp = Column(UTCDateTime, default=now)
severity = Column(Integer(unsigned=True))
module = Column(String(50))
message = Column(Text)
class Loop(Base):
__tablename__ = 'loops'
loop = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
begin = Column(Integer(unsigned=True), default=0)
end = Column(Integer(unsigned=True), default=1440)
filename = Column(String(50))
@hybrid_property
def length(self):
if (self.end >= self.begin):
return abs(self.end - self.begin)
else:
return abs((self.end + 2400) - self.begin)
@length.expression
def length(cls):
return func.abs(cast(case([(cls.begin <= cls.end, cls.end),
(cls.begin >= cls.end, cls.end + 2400)]), Integer) - cast(cls.begin, Integer))
@hybrid_method
def contains(self, point):
return case([(self.begin <= self.end, (self.begin <= point) & (self.end >= point)),
(self.begin >= self.end, (self.begin <= point) | (self.end >= point))])
@hybrid_property
def file_exists(self):
if self.filename is None:
return False
return os.path.exists(os.path.join(get_path(CONFIG.get('liquidsoap', 'looppath')), self.filename))
@staticmethod
def get_current_loop():
"""
returns the current loop to be scheduled
@todo maybe broken ;_;
"""
n = now()
#try to find a loop that should be running
loops = Loop.query.filter(Loop.contains(int(n.hour * 100 + (n.minute / 60.) * 100))).order_by(
Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
# we found no loops
# just try to find the longest one
loops = Loop.query.order_by(Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
#okay, now we have a problem, just retun none
return None
| bsd-3-clause | 1,369,849,253,642,008,600 | 34.662562 | 113 | 0.58243 | false | 4.344134 | false | false | false |
albertoferna/compmech | setup.py | 1 | 1198 | from glob import glob
import sys
import os
from subprocess import Popen
import numpy
#params = 'build_ext -inplace -IC:\clones\cubature\cubature ' + ' '.join(sys.argv[1:])
params = 'build_ext --inplace -I%s' % numpy.get_include() + ' '.join(sys.argv[1:]) + ' clean'
cwd = os.getcwd()
if os.name == 'nt':
use_sdk = 'DISTUTILS_USE_SDK'
if not use_sdk in os.environ.keys():
os.environ[use_sdk] = '1'
print('####################')
print('Compiling modules...')
print('####################')
print('')
basedirs = [
os.path.join('compmech', 'conecyl', 'clpt'),
os.path.join('compmech', 'conecyl', 'fsdt'),
os.path.join('compmech', 'integrate'),
os.path.join('compmech', 'conecyl', 'imperfections'),
os.path.join('compmech', 'aero', 'pistonplate', 'clpt'),
os.path.join('compmech', 'aero', 'pistonstiffpanel', 'clpt'),
]
for basedir in basedirs:
print('Compiling setup.py in %s' % basedir)
basedir = os.path.sep.join([cwd, basedir])
os.chdir(basedir)
for fname in glob('setup*.py'):
p = Popen(('python {} '.format(fname) + params), shell=True)
p.wait()
os.chdir(cwd)
| bsd-3-clause | 8,871,118,216,028,705,000 | 29.717949 | 93 | 0.569282 | false | 3.040609 | false | false | false |
MicBrain/Scheme-Interpreter | scheme.py | 1 | 21214 | """This module implements the core Scheme interpreter functions, including the
eval/apply mutual recurrence, environment model, and read-eval-print loop.
"""
from scheme_primitives import *
from scheme_reader import *
from ucb import main, trace
##############
# Eval/Apply #
##############
def scheme_eval(expr, env):
"""Evaluate Scheme expression EXPR in environment ENV. If ENV is None,
simply returns EXPR as its value without further evaluation.
>>> expr = read_line("(+ 2 2)")
>>> expr
Pair('+', Pair(2, Pair(2, nil)))
>>> scheme_eval(expr, create_global_frame())
scnum(4)
"""
while env is not None:
# Note: until extra-credit problem 22 is complete, env will
# always be None on the second iteration of the loop, so that
# the value of EXPR is returned at that point.
if expr is None:
raise SchemeError("Cannot evaluate an undefined expression.")
# Evaluate Atoms
if scheme_symbolp(expr):
expr, env = env.lookup(expr).get_actual_value(), None
elif scheme_atomp(expr):
env = None
# All non-atomic expressions are lists.
elif not scheme_listp(expr):
raise SchemeError("malformed list: {0}".format(str(expr)))
else:
first, rest = scheme_car(expr), scheme_cdr(expr)
# Evaluate Combinations
if (scheme_symbolp(first) # first might be unhashable
and first in SPECIAL_FORMS):
if proper_tail_recursion:
expr, env = SPECIAL_FORMS[first](rest, env)
else:
expr, env = SPECIAL_FORMS[first](rest, env)
expr, env = scheme_eval(expr, env), None
else:
procedure = scheme_eval(first, env)
args = procedure.evaluate_arguments(rest, env)
if proper_tail_recursion:
expr, env = procedure.apply(args, env)
else:
# UPDATED 4/14/2014 @ 19:08
expr, env = scheme_apply(procedure, args, env), None
return expr
proper_tail_recursion = True
################################################################
# Uncomment the following line to apply tail call optimization #
################################################################
# proper_tail_recursion = True
def scheme_apply(procedure, args, env):
"""Apply PROCEDURE (type Procedure) to argument values ARGS
in environment ENV. Returns the resulting Scheme value."""
# UPDATED 4/14/2014 @ 19:08
# Since .apply is allowed to do a partial evaluation, we finish up
# with a call to scheme_eval to complete the evaluation. scheme_eval
# will simply return expr if its env argument is None.
expr, env = procedure.apply(args, env)
return scheme_eval(expr, env)
################
# Environments #
################
class Frame:
"""An environment frame binds Scheme symbols to Scheme values."""
def __init__(self, parent):
"""An empty frame with a PARENT frame (that may be None)."""
self.bindings = {}
self.parent = parent
def __repr__(self):
if self.parent is None:
return "<Global Frame>"
else:
s = sorted('{0}: {1}'.format(k,v) for k,v in self.bindings.items())
return "<{{{0}}} -> {1}>".format(', '.join(s), repr(self.parent))
def __eq__(self, other):
return isinstance(other, Frame) and \
self.parent == other.parent
def lookup(self, symbol):
"""Return the value bound to SYMBOL. Errors if SYMBOL is not found.
As a convenience, also accepts Python strings, which it turns into
symbols."""
if type(symbol) is str:
symbol = intern(symbol)
if symbol in self.bindings:
return self.bindings[symbol]
if self.parent is not None:
return self.parent.lookup(symbol)
raise SchemeError("unknown identifier: {0}".format(str(symbol)))
def global_frame(self):
"""The global environment at the root of the parent chain."""
e = self
while e.parent is not None:
e = e.parent
return e
def make_call_frame(self, formals, vals):
"""Return a new local frame whose parent is SELF, in which the symbols
in the Scheme formal parameter list FORMALS are bound to the Scheme
values in the Scheme value list VALS. Raise an error if too many or too
few arguments are given.
>>> env = create_global_frame()
>>> formals, vals = read_line("(a b c)"), read_line("(1 2 3)")
>>> env.make_call_frame(formals, vals)
<{a: 1, b: 2, c: 3} -> <Global Frame>>
"""
frame = Frame(self)
if len(formals) != len(vals):
raise SchemeError
for expression in range(len(formals)):
frame.define(formals[expression], vals[expression])
return frame
def define(self, sym, val):
"""Define Scheme symbol SYM to have value VAL in SELF. As a
convenience, SYM may be Python string, which is converted first
to a Scheme symbol. VAL must be a SchemeValue."""
assert isinstance(val, SchemeValue), "values must be SchemeValues"
if type(sym) is str:
sym = intern(sym)
self.bindings[sym] = val
#####################
# Procedures #
#####################
class Procedure(SchemeValue):
"""The superclass of all kinds of procedure in Scheme."""
# Arcane Technical Note: The odd placement of the import from scheme in
# evaluate_arguments is necessary because it introduces mutually recursive
# imports between this file and scheme.py. The effect of putting it
# here is that we delay attempting to access scheme.scheme_eval until
# after the scheme module's initialization is finished.
def evaluate_arguments(self, arg_list, env):
"""Evaluate the expressions in ARG_LIST in ENV to produce
arguments for this procedure. Default definition for procedures."""
from scheme import scheme_eval
return arg_list.map(lambda operand: scheme_eval(operand, env))
class PrimitiveProcedure(Procedure):
"""A Scheme procedure defined as a Python function."""
def __init__(self, fn, use_env=False):
self.fn = fn
self.use_env = use_env
def __str__(self):
return '#[primitive]'
def __repr__(self):
return "PrimitiveProcedure({})".format(str(self))
def apply(self, args, env):
"""Apply a primitive procedure to ARGS in ENV. Returns
a pair (val, None), where val is the resulting value.
>>> twos = Pair(SchemeInt(2), Pair(SchemeInt(2), nil))
>>> plus = PrimitiveProcedure(scheme_add, False)
>>> plus.apply(twos, None)
(scnum(4), None)
"""
try:
converted_list = []
while args != nil:
converted_list.append(args.first)
args = args.second
if self.use_env:
converted_list.append(env)
val = self.fn(*converted_list)
return val, None
except TypeError:
raise SchemeError
class LambdaProcedure(Procedure):
"""A procedure defined by a lambda expression or the complex define form."""
def __init__(self, formals, body, env = None):
"""A procedure whose formal parameter list is FORMALS (a Scheme list),
whose body is the single Scheme expression BODY, and whose parent
environment is the Frame ENV. A lambda expression containing multiple
expressions, such as (lambda (x) (display x) (+ x 1)) can be handled by
using (begin (display x) (+ x 1)) as the body."""
self.formals = formals
self.body = body
self.env = env
def _symbol(self):
return 'lambda'
def __str__(self):
# UPDATED 4/16/2014 @ 13:20
return "({0} {1} {2})".format(self._symbol(),
str(self.formals), str(self.body))
def __repr__(self):
args = (self.formals, self.body, self.env)
return "{0}Procedure({1}, {2}, {3})".format(self._symbol().capitalize(),
*(repr(a) for a in args))
def __eq__(self, other):
return type(other) is type(self) and \
self.formals == other.formals and \
self.body == other.body and \
self.env == other.env
def apply(self, args, env):
environment = self.env.make_call_frame(self.formals, args)
if proper_tail_recursion:
return self.body, self.env.make_call_frame(self.formals, args)
else:
return scheme_eval(self.body, self.env.make_call_frame(self.formals, args)), None
class MuProcedure(LambdaProcedure):
"""A procedure defined by a mu expression, which has dynamic scope.
"""
def _symbol(self):
return 'mu'
def apply(self, args, env):
if proper_tail_recursion:
return self.body, env.make_call_frame(self.formals, args)
else:
return scheme_eval(self.body, env.make_call_frame(self.formals, args)), None
# Call-by-name (nu) extension.
class NuProcedure(LambdaProcedure):
"""A procedure whose parameters are to be passed by name."""
def _symbol(self):
return 'nu'
def evaluate_arguments(self, arg_list, env):
"""Evaluate the expressions in ARG_LIST in ENV to produce
arguments for this procedure. Default definition for procedures."""
return arg_list.map(lambda operand: Thunk(nil, operand, env))
class Thunk(LambdaProcedure):
"""A by-name value that is to be called as a parameterless function when
its value is fetched to be used."""
def get_actual_value(self):
return scheme_eval(self.body, self.env)
#################
# Special forms #
#################
# All of the 'do_..._form' methods return a value and an environment,
# as for the 'apply' method on Procedures. That is, they either return
# (V, None), indicating that the value of the special form is V, or they
# return (Expr, Env), indicating that the value of the special form is what
# you would get by evaluating Expr in the environment Env.
def do_lambda_form(vals, env, function_type=LambdaProcedure):
"""Evaluate a lambda form with formals VALS[0] and body VALS.second
in environment ENV, create_global_frame eating a procedure of type FUNCTION_TYPE
(a subtype of Procedure)."""
check_form(vals, 2)
operands = vals.first
check_formals(operands)
body = vals.second
if len(body)!= 1:
return function_type(operands, Pair("begin", body), env), None
return function_type(operands, body.first, env), None
def do_mu_form(vals, env):
"""Evaluate a mu (dynamically scoped lambda) form with formals VALS[0]
and body VALS.second in environment ENV."""
return do_lambda_form(vals, env, function_type=MuProcedure)
def do_nu_form(vals, env):
"""Evaluate a mu (call-by-name scoped lambda) form with formals VALS[0]
and body VALS.second in environment ENV."""
return do_lambda_form(vals, env, function_type=NuProcedure)
def do_define_form(vals, env):
"""Evaluate a define form with parameters VALS in environment ENV."""
check_form(vals, 2)
target = vals[0]
if scheme_symbolp(target):
check_form(vals, 2, 2)
env.define(target, scheme_eval(vals[1], env))
return (target, None)
elif scheme_pairp(target):
func_name = target.first
if isinstance(func_name, SchemeNumber) or isinstance(func_name, SchemeFloat):
raise SchemeError("bad argument to define")
lambda_vals = Pair(target.second, vals.second)
lambda_func = do_lambda_form(lambda_vals, env)[0]
env.define(func_name, lambda_func)
return func_name, None
else:
raise SchemeError("bad argument to define")
def do_quote_form(vals, env):
"""Evaluate a quote form with parameters VALS. ENV is ignored."""
check_form(vals, 1, 1)
return vals[0], None
def do_let_form(vals, env):
"""Evaluate a let form with parameters VALS in environment ENV."""
check_form(vals, 2)
bindings = vals[0]
exprs = vals.second
if not scheme_listp(bindings):
raise SchemeError("bad bindings list in let form")
# Add a frame containing bindings
names, values = nil, nil
for item in bindings:
values = Pair(scheme_eval(item.second.first, env), values)
names = Pair(item.first, names)
new_env = env.make_call_frame(names, values)
# Evaluate all but the last expression after bindings, and return the last
last = len(exprs)-1
for i in range(0, last):
scheme_eval(exprs[i], new_env)
return exprs[last], new_env
#########################
# Logical Special Forms #
#########################
def do_if_form(vals, env):
"""Evaluate if form with parameters VALS in environment ENV."""
check_form(vals, 2, 3)
if (scheme_eval(vals.first, env)):
return vals.second.first, env
elif len(vals) == 2:
return okay, None
return vals.second.second.first, env
def do_and_form(vals, env):
"""Evaluate short-circuited and with parameters VALS in environment ENV."""
if len(vals):
for i in range(len(vals) - 1):
if not(scheme_eval(vals[i], env)):
return scheme_false, None
return vals[len(vals) - 1], env
return scheme_true, None
def quote(value):
"""Return a Scheme expression quoting the Scheme VALUE.
>>> s = quote('hello')
>>> print(s)
(quote hello)
>>> scheme_eval(s, Frame(None)) # "hello" is undefined in this frame.
intern('hello')
"""
return Pair("quote", Pair(value, nil))
def do_or_form(vals, env):
"""Evaluate short-circuited or with parameters VALS in environment ENV."""
for value in vals:
eval_expression = scheme_eval(value, env)
if eval_expression:
return eval_expression, None
return scheme_false, None
def do_cond_form(vals, env):
"""Evaluate cond form with parameters VALS in environment ENV."""
num_clauses = len(vals)
for i, clause in enumerate(vals):
check_form(clause, 1)
if clause.first is else_sym:
if i < num_clauses-1:
raise SchemeError("else must be last")
test = scheme_true
if clause.second is nil:
raise SchemeError("badly formed else clause")
else:
test = scheme_eval(clause.first, env)
if test:
if len(clause.second) == 0:
return test, None
if len(clause.second) >= 2:
return Pair('begin', clause.second), env
return clause.second.first, env
return okay, None
def do_begin_form(vals, env):
"""Evaluate begin form with parameters VALS in environment ENV."""
check_form(vals, 0)
if scheme_nullp(vals):
return okay, None
for i in range(len(vals) - 1):
scheme_eval(vals[i], env)
return vals[len(vals) - 1], env
# Collected symbols with significance to the interpreter
and_sym = intern("and")
begin_sym = intern("begin")
cond_sym = intern("cond")
define_macro_sym = intern("define-macro")
define_sym = intern("define")
else_sym = intern("else")
if_sym = intern("if")
lambda_sym = intern("lambda")
let_sym = intern("let")
mu_sym = intern("mu")
nu_sym = intern("nu")
or_sym = intern("or")
quasiquote_sym = intern("quasiquote")
quote_sym = intern("quote")
set_bang_sym = intern("set!")
unquote_splicing_sym = intern("unquote-splicing")
unquote_sym = intern("unquote")
# Collected special forms
SPECIAL_FORMS = {
and_sym: do_and_form,
begin_sym: do_begin_form,
cond_sym: do_cond_form,
define_sym: do_define_form,
if_sym: do_if_form,
lambda_sym: do_lambda_form,
let_sym: do_let_form,
mu_sym: do_mu_form,
nu_sym: do_nu_form,
or_sym: do_or_form,
quote_sym: do_quote_form,
}
# Utility methods for checking the structure of Scheme programs
def check_form(expr, min, max = None):
"""Check EXPR (default SELF.expr) is a proper list whose length is
at least MIN and no more than MAX (default: no maximum). Raises
a SchemeError if this is not the case."""
if not scheme_listp(expr):
raise SchemeError("badly formed expression: " + str(expr))
length = len(expr)
if length < min:
raise SchemeError("too few operands in form")
elif max is not None and length > max:
raise SchemeError("too many operands in form")
def check_formals(formals):
"""Check that FORMALS is a valid parameter list, a Scheme list of symbols
in which each symbol is distinct. Raise a SchemeError if the list of formals
is not a well-formed list of symbols or if any symbol is repeated.
>>> check_formals(read_line("(a b c)"))
"""
seen_symbols = []
while len(formals):
if not(scheme_symbolp(formals.first)) or formals.first in seen_symbols:
raise SchemeError
seen_symbols.append(formals.first)
formals = formals.second
################
# Input/Output #
################
def read_eval_print_loop(next_line, env, quiet=False, startup=False,
interactive=False, load_files=()):
"""Read and evaluate input until an end of file or keyboard interrupt."""
if startup:
for filename in load_files:
scheme_load(scstr(filename), True, env)
while True:
try:
src = next_line()
while src.more_on_line:
expression = scheme_read(src)
result = scheme_eval(expression, env)
if not quiet and result is not None:
scheme_print(result)
except (SchemeError, SyntaxError, ValueError, RuntimeError) as err:
if (isinstance(err, RuntimeError) and
'maximum recursion depth exceeded' not in err.args[0]):
raise
print("Error:", err)
except KeyboardInterrupt: # <Control>-C
if not startup:
raise
print("\nKeyboardInterrupt")
if not interactive:
return
except EOFError: # <Control>-D, etc.
return
def scheme_load(*args):
"""Load a Scheme source file. ARGS should be of the form (SYM, ENV) or (SYM,
QUIET, ENV). The file named SYM is loaded in environment ENV, with verbosity
determined by QUIET (default true)."""
if not (2 <= len(args) <= 3):
vals = args[:-1]
raise SchemeError("wrong number of arguments to load: {0}".format(vals))
sym = args[0]
quiet = args[1] if len(args) > 2 else True
env = args[-1]
if (scheme_stringp(sym)):
sym = intern(str(sym))
check_type(sym, scheme_symbolp, 0, "load")
with scheme_open(str(sym)) as infile:
lines = infile.readlines()
args = (lines, None) if quiet else (lines,)
def next_line():
return buffer_lines(*args)
read_eval_print_loop(next_line, env.global_frame(), quiet=quiet)
return okay
def scheme_open(filename):
"""If either FILENAME or FILENAME.scm is the name of a valid file,
return a Python file opened to it. Otherwise, raise an error."""
try:
return open(filename)
except IOError as exc:
if filename.endswith('.scm'):
raise SchemeError(str(exc))
try:
return open(filename + '.scm')
except IOError as exc:
raise SchemeError(str(exc))
def create_global_frame():
"""Initialize and return a single-frame environment with built-in names."""
env = Frame(None)
env.define("eval", PrimitiveProcedure(scheme_eval, True))
env.define("apply", PrimitiveProcedure(scheme_apply, True))
env.define("load", PrimitiveProcedure(scheme_load, True))
for names, fn in get_primitive_bindings():
for name in names:
proc = PrimitiveProcedure(fn)
env.define(name, proc)
return env
@main
def run(*argv):
next_line = buffer_input
interactive = True
load_files = ()
if argv:
try:
filename = argv[0]
if filename == '-load':
load_files = argv[1:]
else:
input_file = open(argv[0])
lines = input_file.readlines()
def next_line():
return buffer_lines(lines)
interactive = False
except IOError as err:
print(err)
sys.exit(1)
read_eval_print_loop(next_line, create_global_frame(), startup=True,
interactive=interactive, load_files=load_files)
tscheme_exitonclick()
| apache-2.0 | 1,368,474,208,198,917,400 | 35.139693 | 93 | 0.593335 | false | 3.926337 | false | false | false |
beefoo/still-i-rise | collect_sound_data.py | 1 | 2076 | # -*- coding: utf-8 -*-
# Description: generate audio clips for lines, words, and syllables
import argparse
import json
import os
from pprint import pprint
import re
import subprocess
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="still_i_rise.wav", help="Path to input audio file file")
parser.add_argument('-pitch', dest="OUTPUT_PITCH_FILE", default="data/still_i_rise.Pitch", help="Path to output pitch data file")
parser.add_argument('-pulse', dest="OUTPUT_PULSE_FILE", default="data/still_i_rise.PointProcess", help="Path to output pulse data file")
parser.add_argument('-ts', dest="TIME_STEP", default="0.01", help="Time step in seconds")
parser.add_argument('-p0', dest="PITCH_FLOOR", default="70", help="Pitch floor in Hz")
parser.add_argument('-mc', dest="MAX_CANDIDATES", default="4", help="Maximum candidates per frame")
parser.add_argument('-va', dest="VERY_ACCURATE", default="on", help="Very accurate, on/off")
parser.add_argument('-st', dest="SILENCE_THRESHOLD", default="0.01", help="Silence threshold")
parser.add_argument('-vt', dest="VOICING_THRESHOLD", default="0.3", help="Voicing threshold")
parser.add_argument('-oc', dest="OCTAVE_COST", default="0.001", help="Octave cost")
parser.add_argument('-ojc', dest="OCTAVE_JUMP_COST", default="0.3", help="Octave jump cost")
parser.add_argument('-vc', dest="VOICED_COST", default="0.2", help="Voiced cost")
parser.add_argument('-p1', dest="PITCH_CEILING", default="400", help="Pitch ceiling in Hz")
# init input
args = parser.parse_args()
# cut the clip
command = ['Praat', '--run', 'collect_sound_data.praat', args.INPUT_FILE, args.OUTPUT_PITCH_FILE, args.OUTPUT_PULSE_FILE, args.TIME_STEP, args.PITCH_FLOOR, args.MAX_CANDIDATES, args.VERY_ACCURATE, args.SILENCE_THRESHOLD, args.VOICING_THRESHOLD, args.OCTAVE_COST, args.OCTAVE_JUMP_COST, args.VOICED_COST, args.PITCH_CEILING]
print "Running %s" % " ".join(command)
finished = subprocess.check_call(command)
print "Wrote data to %s and %s" % (args.OUTPUT_PITCH_FILE, args.OUTPUT_PULSE_FILE)
| mit | 2,398,684,887,728,343,000 | 55.108108 | 323 | 0.725915 | false | 2.969957 | false | false | false |
googleapis/googleapis-gen | google/cloud/talent/v4beta1/talent-v4beta1-py/google/cloud/talent_v4beta1/services/job_service/transports/grpc.py | 1 | 23359 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.talent_v4beta1.types import job
from google.cloud.talent_v4beta1.types import job as gct_job
from google.cloud.talent_v4beta1.types import job_service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import JobServiceTransport, DEFAULT_CLIENT_INFO
class JobServiceGrpcTransport(JobServiceTransport):
"""gRPC backend transport for JobService.
A service handles job management, including job CRUD,
enumeration and search.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_job(self) -> Callable[
[job_service.CreateJobRequest],
gct_job.Job]:
r"""Return a callable for the create job method over gRPC.
Creates a new job.
Typically, the job becomes searchable within 10 seconds,
but it may take up to 5 minutes.
Returns:
Callable[[~.CreateJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_job' not in self._stubs:
self._stubs['create_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/CreateJob',
request_serializer=job_service.CreateJobRequest.serialize,
response_deserializer=gct_job.Job.deserialize,
)
return self._stubs['create_job']
@property
def batch_create_jobs(self) -> Callable[
[job_service.BatchCreateJobsRequest],
operations_pb2.Operation]:
r"""Return a callable for the batch create jobs method over gRPC.
Begins executing a batch create jobs operation.
Returns:
Callable[[~.BatchCreateJobsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_create_jobs' not in self._stubs:
self._stubs['batch_create_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchCreateJobs',
request_serializer=job_service.BatchCreateJobsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_create_jobs']
@property
def get_job(self) -> Callable[
[job_service.GetJobRequest],
job.Job]:
r"""Return a callable for the get job method over gRPC.
Retrieves the specified job, whose status is OPEN or
recently EXPIRED within the last 90 days.
Returns:
Callable[[~.GetJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_job' not in self._stubs:
self._stubs['get_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/GetJob',
request_serializer=job_service.GetJobRequest.serialize,
response_deserializer=job.Job.deserialize,
)
return self._stubs['get_job']
@property
def update_job(self) -> Callable[
[job_service.UpdateJobRequest],
gct_job.Job]:
r"""Return a callable for the update job method over gRPC.
Updates specified job.
Typically, updated contents become visible in search
results within 10 seconds, but it may take up to 5
minutes.
Returns:
Callable[[~.UpdateJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_job' not in self._stubs:
self._stubs['update_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/UpdateJob',
request_serializer=job_service.UpdateJobRequest.serialize,
response_deserializer=gct_job.Job.deserialize,
)
return self._stubs['update_job']
@property
def batch_update_jobs(self) -> Callable[
[job_service.BatchUpdateJobsRequest],
operations_pb2.Operation]:
r"""Return a callable for the batch update jobs method over gRPC.
Begins executing a batch update jobs operation.
Returns:
Callable[[~.BatchUpdateJobsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_update_jobs' not in self._stubs:
self._stubs['batch_update_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchUpdateJobs',
request_serializer=job_service.BatchUpdateJobsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_update_jobs']
@property
def delete_job(self) -> Callable[
[job_service.DeleteJobRequest],
empty_pb2.Empty]:
r"""Return a callable for the delete job method over gRPC.
Deletes the specified job.
Typically, the job becomes unsearchable within 10
seconds, but it may take up to 5 minutes.
Returns:
Callable[[~.DeleteJobRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_job' not in self._stubs:
self._stubs['delete_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/DeleteJob',
request_serializer=job_service.DeleteJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_job']
@property
def batch_delete_jobs(self) -> Callable[
[job_service.BatchDeleteJobsRequest],
empty_pb2.Empty]:
r"""Return a callable for the batch delete jobs method over gRPC.
Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by
filter.
Returns:
Callable[[~.BatchDeleteJobsRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_delete_jobs' not in self._stubs:
self._stubs['batch_delete_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchDeleteJobs',
request_serializer=job_service.BatchDeleteJobsRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['batch_delete_jobs']
@property
def list_jobs(self) -> Callable[
[job_service.ListJobsRequest],
job_service.ListJobsResponse]:
r"""Return a callable for the list jobs method over gRPC.
Lists jobs by filter.
Returns:
Callable[[~.ListJobsRequest],
~.ListJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_jobs' not in self._stubs:
self._stubs['list_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/ListJobs',
request_serializer=job_service.ListJobsRequest.serialize,
response_deserializer=job_service.ListJobsResponse.deserialize,
)
return self._stubs['list_jobs']
@property
def search_jobs(self) -> Callable[
[job_service.SearchJobsRequest],
job_service.SearchJobsResponse]:
r"""Return a callable for the search jobs method over gRPC.
Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest].
This call constrains the
[visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs
present in the database, and only returns jobs that the caller
has permission to search against.
Returns:
Callable[[~.SearchJobsRequest],
~.SearchJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_jobs' not in self._stubs:
self._stubs['search_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/SearchJobs',
request_serializer=job_service.SearchJobsRequest.serialize,
response_deserializer=job_service.SearchJobsResponse.deserialize,
)
return self._stubs['search_jobs']
@property
def search_jobs_for_alert(self) -> Callable[
[job_service.SearchJobsRequest],
job_service.SearchJobsResponse]:
r"""Return a callable for the search jobs for alert method over gRPC.
Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest].
This API call is intended for the use case of targeting passive
job seekers (for example, job seekers who have signed up to
receive email alerts about potential job opportunities), and has
different algorithmic adjustments that are targeted to passive
job seekers.
This call constrains the
[visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs
present in the database, and only returns jobs the caller has
permission to search against.
Returns:
Callable[[~.SearchJobsRequest],
~.SearchJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_jobs_for_alert' not in self._stubs:
self._stubs['search_jobs_for_alert'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/SearchJobsForAlert',
request_serializer=job_service.SearchJobsRequest.serialize,
response_deserializer=job_service.SearchJobsResponse.deserialize,
)
return self._stubs['search_jobs_for_alert']
__all__ = (
'JobServiceGrpcTransport',
)
| apache-2.0 | 5,724,087,368,380,103,000 | 42.580224 | 87 | 0.609615 | false | 4.638403 | false | false | false |
chungjjang80/FRETBursts | fretbursts/burstlib.py | 1 | 133746 | #
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2013-2016 The Regents of the University of California,
# Antonino Ingargiola <[email protected]>
#
"""
This module contains all the main FRETBursts analysis functions.
`burstslib.py` defines the fundamental object `Data()` that contains both the
experimental data (attributes) and the high-level analysis routines (methods).
Furthermore it loads all the remaining **FRETBursts** modules (except for
`loaders.py`).
For usage example see the IPython Notebooks in sub-folder "notebooks".
"""
from __future__ import print_function, absolute_import, division
from future.utils import raise_from
from builtins import range, zip
import os
import hashlib
import numpy as np
import copy
from numpy import zeros, size, r_
import scipy.stats as SS
from .utils.misc import pprint, clk_to_s, deprecate
from .poisson_threshold import find_optimal_T_bga
from . import fret_fit
from . import bg_cache
from .ph_sel import Ph_sel
from .fretmath import gamma_correct_E, gamma_uncorrect_E
from .phtools import burstsearch as bslib
from .phtools.burstsearch import (
# Burst search function
bsearch,
# Photon counting function,
mch_count_ph_in_bursts
)
from .phtools import phrates
from . import background as bg
from . import select_bursts
from . import fit
from .fit.gaussian_fitting import (gaussian_fit_hist,
gaussian_fit_cdf,
two_gaussian_fit_hist,
two_gaussian_fit_hist_min,
two_gaussian_fit_hist_min_ab,
two_gaussian_fit_EM,
two_gauss_mix_pdf,
two_gauss_mix_ab,)
# Redefine some old functions that have been renamed so old scripts will not
# break but will print a warning
bg_calc_exp = deprecate(bg.exp_fit, 'bg_calc_exp', 'bg.exp_fit')
bg_calc_exp_cdf = deprecate(bg.exp_cdf_fit, 'bg_calc_exp_cdf', 'bg.exp_cdf_fit')
def _get_bsearch_func(pure_python=False):
if pure_python:
# return the python version
return bslib.bsearch_py
else:
# or what is available
return bsearch
def _get_mch_count_ph_in_bursts_func(pure_python=False):
if pure_python:
# return the python version
return bslib.mch_count_ph_in_bursts_py
else:
# or what is available
return mch_count_ph_in_bursts
def isarray(obj):
"""Test if the object support the array interface.
Returns True for numpy arrays and pandas sequences.
"""
return hasattr(obj, '__array__')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# BURST SELECTION FUNCTIONS
#
def Sel(d_orig, filter_fun, negate=False, nofret=False, **kwargs):
"""Uses `filter_fun` to select a sub-set of bursts from `d_orig`.
This function is deprecated. Use :meth:`Data.select_bursts` instead.
"""
d_sel = d_orig.select_bursts(filter_fun, negate=negate,
computefret=not nofret,
**kwargs)
return d_sel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Bursts and Timestamps utilities
#
def get_alex_fraction(on_range, alex_period):
"""Get the fraction of period beween two numbers indicating a range.
"""
assert len(on_range) == 2
if on_range[0] < on_range[1]:
fraction = (on_range[1] - on_range[0]) / alex_period
else:
fraction = (alex_period + on_range[1] - on_range[0]) / alex_period
return fraction
def top_tail(nx, a=0.1):
"""Return for each ch the mean size of the top `a` fraction.
nx is one of nd, na, nt from Data() (list of burst size in each ch).
"""
assert a > 0 and a < 1
return np.r_[[n[n > n.max() * (1 - a)].mean() for n in nx]]
##
# Per-burst quatitites from ph-data arrays (timestamps, lifetime, etc..)
#
def _excitation_width(excitation_range, alex_period):
"""Returns duration of alternation period outside selected excitation.
"""
if excitation_range[1] > excitation_range[0]:
return alex_period - excitation_range[1] + excitation_range[0]
elif excitation_range[1] < excitation_range[0]:
return excitation_range[0] - excitation_range[1]
def _ph_times_compact(ph_times_sel, alex_period, excitation_width):
"""Compact ph_times inplace by removing gaps between alternation periods.
Arguments:
ph_times_sel (array): array of timestamps from one alternation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Returns nothing, ph_times is modified in-place.
"""
# The formula is
#
# gaps = (ph_times_sel // alex_period)*excitation_width
# ph_times_sel = ph_times_sel - gaps
#
# As a memory optimization the `-gaps` array is reused inplace
times_minusgaps = (ph_times_sel // alex_period) * (-1 * excitation_width)
# The formula is ph_times_sel = ph_times_sel - "gaps"
times_minusgaps += ph_times_sel
return times_minusgaps
def iter_bursts_start_stop(bursts):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
arr_istart = bursts.istart
arr_istop = bursts.istop + 1
for istart, istop in zip(arr_istart, arr_istop):
yield istart, istop
def iter_bursts_ph(ph_data, bursts, mask=None, compact=False,
alex_period=None, excitation_width=None):
"""Iterator over arrays of photon-data for each burst.
Arguments:
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
mask (boolean mask or None): if not None, is a boolean mask
to select photons in `ph_data` (for example Donor-ch photons).
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Yields an array with a selection of "photons" for each burst.
"""
if isinstance(mask, slice) and mask == slice(None):
mask = None
if compact:
assert alex_period is not None
assert excitation_width is not None
assert mask is not None
for start, stop in iter_bursts_start_stop(bursts):
ph = ph_data[start:stop]
if mask is not None:
ph = ph[mask[start:stop]]
if compact:
ph = _ph_times_compact(ph, alex_period, excitation_width)
yield ph
def bursts_ph_list(ph_data, bursts, mask=None):
"""Returna list of ph-data for each burst.
ph_data can be either the timestamp array on which the burst search
has been performed or any other array with same size (boolean array,
nanotimes, etc...)
"""
return [ph for ph in iter_bursts_ph(ph_data, bursts, mask=mask)]
def burst_ph_stats(ph_data, bursts, func=np.mean, func_kw=None, **kwargs):
"""Reduce burst photons (timestamps, nanotimes) to a scalar using `func`.
Arguments
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
func (callable): function that takes the burst photon timestamps
as first argument and returns a scalar.
func_kw (callable): additional arguments in `func` beyond photon-data.
**kwargs: additional arguments passed to :func:`iter_bursts_ph`.
Return
Array one element per burst.
"""
if func_kw is None:
func_kw = {}
burst_stats = []
for burst_ph in iter_bursts_ph(ph_data, bursts, **kwargs):
burst_stats.append(func(burst_ph, **func_kw))
return np.asfarray(burst_stats) # NOTE: asfarray converts None to nan
def ph_in_bursts_mask(ph_data_size, bursts):
"""Return bool mask to select all "ph-data" inside any burst."""
mask = zeros(ph_data_size, dtype=bool)
for start, stop in iter_bursts_start_stop(bursts):
mask[start:stop] = True
return mask
def fuse_bursts_direct(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-seconds).
This function is a direct implementation using a single loop.
For a faster implementation see :func:`fuse_bursts_iter`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
fused_bursts_list = []
fused_burst = None
for burst1, burst2 in zip(bursts[:-1], bursts[1:]):
if fused_burst is not None:
burst1c = fused_burst
else:
burst1c = bslib.BurstGap.from_burst(burst1)
separation = burst2.start - burst1c.stop
if separation <= max_delay_clk:
gap = burst2.start - burst1c.stop
gap_counts = burst2.istart - burst1c.istop - 1
if burst1c.istop >= burst2.istart:
gap = 0
gap_counts = 0
fused_burst = bslib.BurstGap(
start = burst1c.start,
istart = burst1c.istart,
stop = burst2.stop,
istop = burst2.istop,
gap = burst1c.gap + gap,
gap_counts = burst1c.gap_counts + gap_counts)
else:
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
fused_burst = None
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst1c))
# Append the last bursts (either a fused or an isolated one)
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst2))
fused_bursts = bslib.BurstsGap.from_list(fused_bursts_list)
init_num_bursts = bursts.num_bursts
delta_b = init_num_bursts - fused_bursts.num_bursts
pprint(" --> END Fused %d bursts (%.1f%%)\n\n" %
(delta_b, 100 * delta_b / init_num_bursts), mute=not verbose)
return fused_bursts
def fuse_bursts_iter(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-secs).
This function calls iteratively :func:`b_fuse` until there are no more
bursts to fuse. For a slower but more readable version see
:func:`fuse_bursts_direct`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
init_nburst = bursts.num_bursts
bursts = bslib.BurstsGap(bursts.data)
z = 0
new_nburst, nburst = 0, 1 # starting condition
while new_nburst < nburst:
z += 1
nburst = bursts.num_bursts
bursts = b_fuse(bursts, ms=ms, clk_p=clk_p)
new_nburst = bursts.num_bursts
delta_b = init_nburst - nburst
pprint(" --> END Fused %d bursts (%.1f%%, %d iter)\n\n" %
(delta_b, 100 * delta_b / init_nburst, z), mute=not verbose)
return bursts
def b_fuse(bursts, ms=0, clk_p=12.5e-9):
"""Fuse bursts separated by less than `ms` (milli-secs).
This is a low-level function which fuses pairs of consecutive
bursts separated by less than `ms` millisec.
If there are 3 or more consecutive bursts separated by less than `ms`
only the first 2 are fused.
See :func:`fuse_bursts_iter` or :func:`fuse_bursts_direct` for
higher level functions.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
# Nearby bursts masks
delays_below_th = (bursts.separation <= max_delay_clk)
if not np.any(delays_below_th):
return bursts
buffer_mask = np.hstack([(False,), delays_below_th, (False,)])
first_bursts = buffer_mask[1:]
second_bursts = buffer_mask[:-1]
# Keep only the first pair in case of more than 2 consecutive bursts
first_bursts ^= (second_bursts * first_bursts)
# note that previous in-place operation also modifies `second_bursts`
both_bursts = first_bursts + second_bursts
# istart is from the first burst, istop is from the second burst
fused_bursts1 = bursts[first_bursts]
fused_bursts2 = bursts[second_bursts]
# Compute gap and gap_counts
gap = fused_bursts2.start - fused_bursts1.stop
gap_counts = fused_bursts2.istart - fused_bursts1.istop - 1 # yes it's -1
overlaping = fused_bursts1.istop >= fused_bursts2.istart
gap[overlaping] = 0
gap_counts[overlaping] = 0
# Assign the new burst data
# fused_bursts1 has alredy the right start and istart
fused_bursts1.istop = fused_bursts2.istop
fused_bursts1.stop = fused_bursts2.stop
fused_bursts1.gap += gap
fused_bursts1.gap_counts += gap_counts
# Join fused bursts with the remaining bursts
new_burst = fused_bursts1.join(bursts[~both_bursts], sort=True)
return new_burst
def mch_fuse_bursts(MBurst, ms=0, clk_p=12.5e-9, verbose=True):
"""Multi-ch version of `fuse_bursts`. `MBurst` is a list of Bursts objects.
"""
mburst = [b.copy() for b in MBurst] # safety copy
new_mburst = []
ch = 0
for mb in mburst:
ch += 1
pprint(" - - - - - CHANNEL %2d - - - - \n" % ch, not verbose)
if mb.num_bursts == 0:
new_bursts = bslib.Bursts.empty()
else:
new_bursts = fuse_bursts_iter(mb, ms=ms, clk_p=clk_p,
verbose=verbose)
new_mburst.append(new_bursts)
return new_mburst
def burst_stats(mburst, clk_p):
"""Compute average duration, size and burst-delay for bursts in mburst.
"""
nans = [np.nan, np.nan]
width_stats = np.array([[b.width.mean(), b.width.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
height_stats = np.array([[b.counts.mean(), b.counts.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
mean_burst_delay = np.array([b.separation.mean() if b.num_bursts > 0
else np.nan for b in mburst])
return (clk_to_s(width_stats, clk_p) * 1e3, height_stats,
clk_to_s(mean_burst_delay, clk_p))
def print_burst_stats(d):
"""Print some bursts statistics."""
nch = len(d.mburst)
width_ms, height, delays = burst_stats(d.mburst, d.clk_p)
s = "\nNUMBER OF BURSTS: m = %d, L = %d" % (d.m, d.L)
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\n#: "+"%7d "*nch % tuple([b.num_bursts for b in d.mburst])
s += "\nT (us) [BS par] "+"%7d "*nch % tuple(np.array(d.T)*1e6)
s += "\nBG Rat T (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel('all')])
s += "\nBG Rat D (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Dem')])
s += "\nBG Rat A (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Aem')])
s += "\n\nBURST WIDTH STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (ms): "+"%7.3f "*nch % tuple(width_ms[0, :])
s += "\nStd.dev (ms): "+"%7.3f "*nch % tuple(width_ms[1, :])
s += "\n\nBURST SIZE STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (# ph): "+"%7.2f "*nch % tuple(height[0, :])
s += "\nStd.dev (# ph): "+"%7.2f "*nch % tuple(height[1, :])
s += "\n\nBURST MEAN DELAY"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nDelay (s): "+"%7.3f "*nch % tuple(delays)
return s
def ES_histog(E, S, bin_step=0.05, E_bins=None, S_bins=None):
"""Returns 2D (ALEX) histogram and bins of bursts (E,S).
"""
if E_bins is None:
E_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
if S_bins is None:
S_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
H, E_bins, S_bins = np.histogram2d(E, S, bins=[E_bins, S_bins])
return H, E_bins, S_bins
def delta(x):
"""Return x.max() - x.min()"""
return x.max() - x.min()
def mask_empty(mask):
"""Returns True if `mask` is empty, otherwise False.
`mask` can be a boolean array or a slice object.
"""
if isinstance(mask, slice):
is_slice_empty = (mask.stop == 0)
return is_slice_empty
else:
# Bolean array
return not mask.any()
class DataContainer(dict):
"""
Generic class for storing data.
It's a dictionary in which each key is also an attribute d['nt'] or d.nt.
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
for k in self:
dict.__setattr__(self, k, self[k])
def add(self, **kwargs):
"""Adds or updates elements (attributes and/or dict entries). """
self.update(**kwargs)
for k, v in kwargs.items():
setattr(self, k, v)
def delete(self, *args, **kwargs):
"""Delete an element (attribute and/or dict entry). """
warning = kwargs.get('warning', True)
for name in args:
try:
self.pop(name)
except KeyError:
if warning:
print(' WARNING: Name %s not found (dict).' % name)
try:
delattr(self, name)
except AttributeError:
if warning:
print(' WARNING: Name %s not found (attr).' % name)
class Data(DataContainer):
"""
Container for all the information (timestamps, bursts) of a dataset.
Data() contains all the information of a dataset (name, timestamps, bursts,
correction factors) and provides several methods to perform analysis
(background estimation, burst search, FRET fitting, etc...).
When loading a measurement file a Data() object is created by one
of the loader functions in `loaders.py`. Data() objects can be also
created with :meth:`Data.copy`, :meth:`Data.fuse_bursts()` or
:meth:`Data.select_bursts`.
To add or delete data-attributes use `.add()` or `.delete()` methods.
All the standard data-attributes are listed below.
Note:
Attributes of type "*list*" contain one element per channel.
Each element, in turn, can be an array. For example `.ph_times_m[i]`
is the array of timestamps for channel `i`; or `.nd[i]` is the array
of donor counts in each burst for channel `i`.
**Measurement attributes**
Attributes:
fname (string): measurements file name
nch (int): number of channels
clk_p (float): clock period in seconds for timestamps in `ph_times_m`
ph_times_m (list): list of timestamp arrays (int64). Each array
contains all the timestamps (donor+acceptor) in one channel.
A_em (list): list of boolean arrays marking acceptor timestamps. Each
array is a boolean mask for the corresponding ph_times_m array.
leakage (float or array of floats): leakage (or bleed-through) fraction.
May be scalar or same size as nch.
gamma (float or array of floats): gamma factor.
May be scalar or same size as nch.
D_em (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` for donor emission
D_ex, A_ex (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` during donor or acceptor
excitation
D_ON, A_ON (2-element tuples of int ): **[ALEX-only]**
start-end values for donor and acceptor excitation selection.
alex_period (int): **[ALEX-only]**
duration of the alternation period in clock cycles.
**Background Attributes**
The background is computed with :meth:`Data.calc_bg`
and is estimated in chunks of equal duration called *background periods*.
Estimations are performed in each spot and photon stream.
The following attributes contain the estimated background rate.
Attributes:
bg (dict): background rates for the different photon streams,
channels and background periods. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period) of background rates.
bg_mean (dict): mean background rates across the entire measurement
for the different photon streams and channels. Keys are `Ph_sel`
objects and values are lists (one element per channel) of
background rates.
nperiods (int): number of periods in which timestamps are split for
background calculation
bg_fun (function): function used to compute the background rates
Lim (list): each element of this list is a list of index pairs for
`.ph_times_m[i]` for **first** and **last** photon in each period.
Ph_p (list): each element in this list is a list of timestamps pairs
for **first** and **last** photon of each period.
bg_ph_sel (Ph_sel object): photon selection used by Lim and Ph_p.
See :mod:`fretbursts.ph_sel` for details.
Th_us (dict): thresholds in us used to select the tail of the
interphoton delay distribution. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period).
Additionlly, there are a few deprecated attributes (`bg_dd`, `bg_ad`,
`bg_da`, `bg_aa`, `rate_dd`, `rate_ad`, `rate_da`, `rate_aa` and `rate_m`)
which will be removed in a future version.
Please use :attr:`Data.bg` and :attr:`Data.bg_mean` instead.
**Burst search parameters (user input)**
These are the parameters used to perform the burst search
(see :meth:`burst_search`).
Attributes:
ph_sel (Ph_sel object): photon selection used for burst search.
See :mod:`fretbursts.ph_sel` for details.
m (int): number of consecutive timestamps used to compute the
local rate during burst search
L (int): min. number of photons for a burst to be identified and saved
P (float, probability): valid values [0..1].
Probability that a burst-start is due to a Poisson background.
The employed Poisson rate is the one computed by `.calc_bg()`.
F (float): `(F * background_rate)` is the minimum rate for burst-start
**Burst search data (available after burst search)**
When not specified, parameters marked as (list of arrays) contains arrays
with one element per bursts. `mburst` arrays contain one "row" per burst.
`TT` arrays contain one element per `period` (see above: background
attributes).
Attributes:
mburst (list of Bursts objects): list Bursts() one element per channel.
See :class:`fretbursts.phtools.burstsearch.Bursts`.
TT (list of arrays): list of arrays of *T* values (in sec.). A *T*
value is the maximum delay between `m` photons to have a
burst-start. Each channels has an array of *T* values, one for
each background "period" (see above).
T (array): per-channel mean of `TT`
nd, na (list of arrays): number of donor or acceptor photons during
donor excitation in each burst
nt (list of arrays): total number photons (nd+na+naa)
naa (list of arrays): number of acceptor photons in each burst
during acceptor excitation **[ALEX only]**
nar (list of arrays): number of acceptor photons in each burst
during donor excitation, not corrected for D-leakage and
A-direct-excitation. **[PAX only]**
bp (list of arrays): time period for each burst. Same shape as `nd`.
This is needed to identify the background rate for each burst.
bg_bs (list): background rates used for threshold computation in burst
search (is a reference to `bg`, `bg_dd` or `bg_ad`).
fuse (None or float): if not None, the burst separation in ms below
which bursts have been fused (see `.fuse_bursts()`).
E (list): FRET efficiency value for each burst:
E = na/(na + gamma*nd).
S (list): stoichiometry value for each burst:
S = (gamma*nd + na) /(gamma*nd + na + naa)
"""
# Attribute names containing per-photon data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per photon).
ph_fields = ['ph_times_m', 'nanotimes', 'particles',
'A_em', 'D_em', 'A_ex', 'D_ex']
# Attribute names containing background data.
# The attribute `bg` is a dict with photon-selections as keys and
# list of arrays as values. Each list contains one element per channel and
# each array one element per background period.
# The attributes `.Lim` and `.Ph_p` are lists with one element per channel.
# Each element is a lists-of-tuples (one tuple per background period).
# These attributes do not exist before computing the background.
bg_fields = ['bg', 'Lim', 'Ph_p']
# Attribute names containing per-burst data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per burst).
# They do not necessarly exist. For example 'naa' exists only for ALEX
# data. Also none of them exist before performing a burst search.
burst_fields = ['E', 'S', 'mburst', 'nd', 'na', 'nt', 'bp', 'nda', 'naa',
'max_rate', 'sbr', 'nar']
# Quantities (scalars or arrays) defining the current set of bursts
burst_metadata = ['m', 'L', 'T', 'TT', 'F', 'FF', 'P', 'PP', 'rate_th',
'bg_bs', 'ph_sel', 'bg_corrected', 'leakage_corrected',
'dir_ex_corrected', 'dithering', 'fuse', 'lsb']
# List of photon selections on which the background is computed
_ph_streams = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'),
Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')]
@property
def ph_streams(self):
if self.alternated:
return self._ph_streams
else:
return [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
def __init__(self, leakage=0., gamma=1., dir_ex=0., **kwargs):
# Default values
init_kw = dict(ALEX=False, _leakage=float(leakage), _gamma=float(gamma),
_dir_ex=float(dir_ex), _beta=1., _chi_ch=1., s=[])
# Override with user data
init_kw.update(**kwargs)
DataContainer.__init__(self, **init_kw)
# def __getattr__(self, name):
# """Single-channel shortcuts for per-channel fields.
#
# Appending a '_' to a per-channel field avoids specifying the channel.
# For example use d.nd_ instead if d.nd[0].
# """
# msg_missing_attr = "'%s' object has no attribute '%s'" %\
# (self.__class__.__name__, name)
# if name.startswith('_') or not name.endswith('_'):
# raise AttributeError(msg_missing_attr)
#
# field = name[:-1]
# try:
# value = self.__getitem__(field)
# except KeyError:
# raise AttributeError(msg_missing_attr)
# else:
# # Support lists, tuples and object with array interface
# if isinstance(value, (list, tuple)) or isarray(value):
# if len(value) == self.nch:
# return value[0]
# raise ValueError('Name "%s" is not a per-channel field.' % field)
def copy(self, mute=False):
"""Copy data in a new object. All arrays copied except for ph_times_m
"""
pprint('Deep copy executed.\n', mute)
new_d = Data(**self) # this make a shallow copy (like a pointer)
# Deep copy (not just reference) or array data
for field in self.burst_fields + self.bg_fields:
# Making sure k is defined
if field in self:
# Make a deepcopy of the per-channel lists
new_d[field] = copy.deepcopy(self[field])
# Set the attribute: new_d.k = new_d[k]
setattr(new_d, field, new_d[field])
return new_d
##
# Methods for photon timestamps (ph_times_m) access
#
def ph_times_hash(self, hash_name='md5', hexdigest=True):
"""Return an hash for the timestamps arrays.
"""
m = hashlib.new(hash_name)
for ph in self.iter_ph_times():
if isinstance(ph, np.ndarray):
m.update(ph.data)
else:
# TODO Handle ph_times in PyTables files
raise NotImplementedError
if hexdigest:
return m.hexdigest()
else:
return m
@property
def ph_data_sizes(self):
"""Array of total number of photons (ph-data) for each channel.
"""
if not hasattr(self, '_ph_data_sizes'):
# This works both for numpy arrays and pytables arrays
self._ph_data_sizes = np.array([ph.shape[0] for ph in
self.ph_times_m])
return self._ph_data_sizes
def _fix_ph_sel(self, ph_sel):
"""For non-ALEX data fix Aex to allow stable comparison."""
msg = 'Photon selection must be of type `Ph_sel` (it was `%s` instead).'
assert isinstance(ph_sel, Ph_sel), (msg % type(ph_sel))
if self.alternated or ph_sel.Dex != 'DAem':
return ph_sel
else:
return Ph_sel(Dex=ph_sel.Dex, Aex='DAem')
def _is_allph(self, ph_sel):
"""Return whether a photon selection `ph_sel` covers all photon."""
if self.alternated:
return ph_sel == Ph_sel(Dex='DAem', Aex='DAem')
else:
return ph_sel.Dex == 'DAem'
def get_ph_mask(self, ich=0, ph_sel=Ph_sel('all')):
"""Returns a mask for `ph_sel` photons in channel `ich`.
The masks are either boolean arrays or slices (full or empty). In
both cases they can be used to index the timestamps of the
corresponding channel.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
assert isinstance(ich, int)
if self._is_allph(ph_sel):
# Note that slice(None) is equivalent to [:].
# Also, numpy arrays are not copied when sliced.
# So getting all photons with this mask is efficient
# Note: the drawback is that the slice cannot be indexed
# (where a normal boolean array would)
return slice(None)
# Handle the case when A_em contains slice objects
if isinstance(self.A_em[ich], slice):
if self.A_em[ich] == slice(None):
if ph_sel.Dex == 'Dem':
return slice(0)
if ph_sel.Dex == 'Aem':
return slice(None)
elif self.A_em[ich] == slice(0):
if ph_sel.Dex == 'Dem':
return slice(None)
if ph_sel.Dex == 'Aem':
return slice(0)
else:
msg = 'When a slice, A_em can only be slice(None) or slice(0).'
raise NotImplementedError(msg)
# Base selections
elif ph_sel == Ph_sel(Dex='Dem'):
return self.get_D_em_D_ex(ich)
elif ph_sel == Ph_sel(Dex='Aem'):
return self.get_A_em_D_ex(ich)
elif ph_sel == Ph_sel(Aex='Dem'):
return self.get_D_em(ich) * self.get_A_ex(ich)
elif ph_sel == Ph_sel(Aex='Aem'):
return self.get_A_em(ich) * self.get_A_ex(ich)
# Selection of all photon in one emission ch
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
return self.get_D_em(ich)
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
return self.get_A_em(ich)
# Selection of all photon in one excitation period
elif ph_sel == Ph_sel(Dex='DAem'):
return self.get_D_ex(ich)
elif ph_sel == Ph_sel(Aex='DAem'):
return self.get_A_ex(ich)
# Selection of all photons except for Dem during Aex
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
return self.get_D_ex(ich) + self.get_A_em(ich) * self.get_A_ex(ich)
else:
raise ValueError('Photon selection not implemented.')
def iter_ph_masks(self, ph_sel=Ph_sel('all')):
"""Iterator returning masks for `ph_sel` photons.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
for ich in range(self.nch):
yield self.get_ph_mask(ich, ph_sel=ph_sel)
def get_ph_times(self, ich=0, ph_sel=Ph_sel('all'), compact=False):
"""Returns the timestamps array for channel `ich`.
This method always returns in-memory arrays, even when ph_times_m
is a disk-backed list of arrays.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
"""
ph = self.ph_times_m[ich]
# If not a list is an on-disk array, we need to load it
if not isinstance(ph, np.ndarray):
if hasattr(self, '_ph_cache') and self._ph_cache_ich == ich:
ph = self._ph_cache
else:
ph = ph.read()
self._ph_cache = ph
self._ph_cache_ich = ich
ph = ph[self.get_ph_mask(ich, ph_sel=ph_sel)]
if compact:
ph = self._ph_times_compact(ph, ph_sel)
return ph
def iter_ph_times(self, ph_sel=Ph_sel('all'), compact=False):
"""Iterator that returns the arrays of timestamps in `.ph_times_m`.
Arguments:
Same arguments as :meth:`get_ph_mask` except for `ich`.
"""
for ich in range(self.nch):
yield self.get_ph_times(ich, ph_sel=ph_sel, compact=compact)
def _get_ph_mask_single(self, ich, mask_name, negate=False):
"""Get the bool array `mask_name` for channel `ich`.
If the internal "bool array" is a scalar return a slice (full or empty)
"""
mask = np.asarray(getattr(self, mask_name)[ich])
if negate:
mask = np.logical_not(mask)
if len(mask.shape) == 0:
# If mask is a boolean scalar, select all or nothing
mask = slice(None) if mask else slice(0)
return mask
def get_A_em(self, ich=0):
"""Returns a mask to select photons detected in the acceptor ch."""
return self._get_ph_mask_single(ich, 'A_em')
def get_D_em(self, ich=0):
"""Returns a mask to select photons detected in the donor ch."""
return self._get_ph_mask_single(ich, 'A_em', negate=True)
def get_A_ex(self, ich=0):
"""Returns a mask to select photons in acceptor-excitation periods."""
return self._get_ph_mask_single(ich, 'A_ex')
def get_D_ex(self, ich=0):
"""Returns a mask to select photons in donor-excitation periods."""
if self.alternated:
return self._get_ph_mask_single(ich, 'D_ex')
else:
return slice(None)
def get_D_em_D_ex(self, ich=0):
"""Returns a mask of donor photons during donor-excitation."""
if self.alternated:
return self.get_D_em(ich) * self.get_D_ex(ich)
else:
return self.get_D_em(ich)
def get_A_em_D_ex(self, ich=0):
"""Returns a mask of acceptor photons during donor-excitation."""
if self.alternated:
return self.get_A_em(ich) * self.get_D_ex(ich)
else:
return self.get_A_em(ich)
def iter_ph_times_period(self, ich=0, ph_sel=Ph_sel('all')):
"""Iterate through arrays of ph timestamps in each background period.
"""
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
for period in range(self.nperiods):
yield self.get_ph_times_period(period, ich=ich, mask=mask)
def get_ph_times_period(self, period, ich=0, ph_sel=Ph_sel('all'),
mask=None):
"""Return the array of ph_times in `period`, `ich` and `ph_sel`.
"""
istart, iend = self.Lim[ich][period]
period_slice = slice(istart, iend + 1)
ph_times = self.get_ph_times(ich=ich)
if mask is None:
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
if isinstance(mask, slice) and mask == slice(None):
ph_times_period = ph_times[period_slice]
else:
ph_times_period = ph_times[period_slice][mask[period_slice]]
return ph_times_period
def _assert_compact(self, ph_sel):
msg = ('Option compact=True requires a photon selection \n'
'from a single excitation period (either Dex or Aex).')
if not self.alternated:
raise ValueError('Option compact=True requires ALEX data.')
if ph_sel.Dex is not None and ph_sel.Aex is not None:
raise ValueError(msg)
def _excitation_width(self, ph_sel, ich=0):
"""Returns duration of alternation period outside selected excitation.
"""
self._assert_compact(ph_sel)
if ph_sel.Aex is None:
excitation_range = self._D_ON_multich[ich]
elif ph_sel.Dex is None:
excitation_range = self._A_ON_multich[ich]
return _excitation_width(excitation_range, self.alex_period)
def _ph_times_compact(self, ph, ph_sel):
"""Return timestamps in one excitation period with "gaps" removed.
It takes timestamps in the specified alternation period and removes
gaps due to time intervals outside the alternation period selection.
This allows to correct the photon rates distorsion due to alternation.
Arguments:
ph (array): timestamps array from which gaps have to be removed.
This array **is modified in-place**.
ph_sel (Ph_sel object): photon selection to be compacted.
Note that only one excitation must be specified, but the
emission can be 'Dem', 'Aem' or 'DAem'.
See :mod:`fretbursts.ph_sel` for details.
Returns:
Array of timestamps in one excitation periods with "gaps" removed.
"""
excitation_width = self._excitation_width(ph_sel)
return _ph_times_compact(ph, self.alex_period, excitation_width)
def _get_tuple_multich(self, name):
"""Get a n-element tuple field in multi-ch format (1 row per ch)."""
field = np.array(self[name])
if field.ndim == 1:
field = np.repeat([field], self.nch, axis=0)
return field
@property
def _D_ON_multich(self):
return self._get_tuple_multich('D_ON')
@property
def _A_ON_multich(self):
return self._get_tuple_multich('A_ON')
@property
def _det_donor_accept_multich(self):
return self._get_tuple_multich('det_donor_accept')
##
# Methods and properties for burst-data access
#
@property
def num_bursts(self):
"""Array of number of bursts in each channel."""
return np.array([bursts.num_bursts for bursts in self.mburst])
@property
def burst_widths(self):
"""List of arrays of burst duration in seconds. One array per channel.
"""
return [bursts.width * self.clk_p for bursts in self.mburst]
def burst_sizes_pax_ich(self, ich=0, gamma=1., add_aex=True,
beta=1., donor_ref=True, aex_corr=True):
r"""Return corrected burst sizes for channel `ich`. PAX-only.
When `donor_ref = False`, the formula for PAX-enhanced burst size is:
.. math::
\gamma(F_{D_{ex}D_{em}} + F_{DA_{ex}D_{em}}) +
\frac{1}{\alpha} F_{FRET}
where :math:`\alpha` is the Dex duty-cycle (0.5 if alternation
periods are equal) and :math:`F_{FRET}` is `na`, the AemAex
signal after leakage and direct-excitation corrections.
If `add_ex = True`, we add the term:
.. math::
\tilde{F}_{A_{ex}A_{em}} / (\alpha\beta)
where :math:`\tilde{F}_{A_{ex}A_{em}}` in A emission due to
A excitation (and not due to FRET).
If `aex_corr = False`, then :math:`\alpha` is fixed to 1.
If `donor_ref = True`, the above burst size expression is divided by
:math:`\gamma`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
donor_ref (bool): True or False select different conventions
for burst size correction. For details see
:meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
add_aex (boolean): when True, the returned burst size also
includes photons detected during the DAex. Default is True.
aex_corr (bool): If True, and `add_aex == True`, then divide
the DAexAem term (naa) by the Dex duty cycle. For example,
if Dex and DAex alternation periods are equal, naa is
multiplied by 2. This correction makes the returned value
equal to the denominator of the stoichiometry ratio S_pax
(PAX-enhanced formula). If False, naa is not divided by
the Dex duty-cycle (gamma and beta corrections may still be
applied). If `add_aex == False`, `aex_corr` is ignored.
beta (float): beta correction factor used for the DAexAem term
(naa) of the burst size.
If `add_aex == False` this argument is ignored. Default 1.
Returns
Array of burst sizes for channel `ich`.
See also:
:meth:`Data.burst_sizes_ich`
"""
assert 'PAX' in self.meas_type
naa = self._get_naa_ich(ich) # nar-subtracted
aex_dex_ratio = self._aex_dex_ratio()
alpha = 1
if aex_corr:
alpha = 1 - self._aex_fraction() # Dex duty-cycle
burst_size_dex = self.nd[ich] * gamma + self.na[ich]
burst_size_aex = (self.nda[ich] * gamma +
self.na[ich] * aex_dex_ratio +
naa / (alpha * beta))
burst_size = burst_size_dex
if add_aex:
burst_size += burst_size_aex
if donor_ref:
burst_size /= gamma
return burst_size
def burst_sizes_ich(self, ich=0, gamma=1., add_naa=False,
beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for channel `ich`.
If `donor_ref == True` (default) the gamma corrected burst size is
computed according to::
1) nd + na / gamma
Otherwise, if `donor_ref == False`, the gamma corrected burst size is::
2) nd * gamma + na
With the definition (1) the corrected burst size is equal to the raw
burst size for zero-FRET or D-only bursts (that's why is `donor_ref`).
With the definition (2) the corrected burst size is equal to the raw
burst size for 100%-FRET bursts.
In an ALEX measurement, use `add_naa = True` to add counts from
AexAem stream to the returned burst size. The argument `gamma` and
`beta` are used to correctly scale `naa` so that it become
commensurate with the Dex corrected burst size. In particular,
when using definition (1) (i.e. `donor_ref = True`), the total
burst size is::
(nd + na/gamma) + naa / (beta * gamma)
Conversely, when using definition (2) (`donor_ref = False`), the
total burst size is::
(nd * gamma + na) + naa / beta
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
add_naa (boolean): when True, add a term for AexAem photons when
computing burst size. Default False.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
beta (float): beta correction factor used for the AexAem term
of the burst size. Default 1. If `add_naa = False` or
measurement is not ALEX this argument is ignored.
For more info see explanation above.
donor_ref (bool): select the convention for burst size correction.
See details above in the function description.
Returns
Array of burst sizes for channel `ich`.
See also :meth:`fretbursts.burstlib.Data.get_naa_corrected`.
"""
if donor_ref:
burst_size = self.nd[ich] + self.na[ich] / gamma
else:
burst_size = self.nd[ich] * gamma + self.na[ich]
if add_naa and self.alternated:
kws = dict(ich=ich, gamma=gamma, beta=beta, donor_ref=donor_ref)
burst_size += self.get_naa_corrected(**kws)
return burst_size
def get_naa_corrected(self, ich=0, gamma=1., beta=1., donor_ref=True):
"""Return corrected naa array for channel `ich`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
gamma (floats): gamma-factor to use in computing the corrected naa.
beta (float): beta-factor to use in computing the corrected naa.
donor_ref (bool): Select the convention for `naa` correction.
If True (default), uses `naa / (beta * gamma)`. Otherwise,
uses `naa / beta`. A consistent convention should be used
for the corrected Dex burst size in order to make it
commensurable with naa.
See also :meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
"""
naa = self._get_naa_ich(ich) # with eventual duty-cycle correction
if donor_ref:
correction = (gamma * beta)
else:
correction = beta
return naa / correction
def _get_naa_ich(self, ich=0):
"""Return naa for `ich` both in ALEX and PAX measurements.
In case of PAX, returns naa using the duty-cycle correction::
naa = self.naa - aex_dex_ratio * self.nar
where `self.nar` is equal to `self.na` before leakage and direct
excitation correction, and `aex_dex_ratio` is the Aex duty-cycle.
"""
naa = self.naa[ich]
if 'PAX' in self.meas_type:
# ATTENTION: do not modify naa inplace
naa = naa - self._aex_dex_ratio() * self.nar[ich]
return naa
def burst_sizes(self, gamma=1., add_naa=False, beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for all the channel.
Compute burst sizes by calling, for each channel,
:meth:`burst_sizes_ich`.
See :meth:`burst_sizes_ich` for description of the arguments.
Returns
List of arrays of burst sizes, one array per channel.
"""
kwargs = dict(gamma=gamma, add_naa=add_naa, beta=beta,
donor_ref=donor_ref)
bsize_list = [self.burst_sizes_ich(ich, **kwargs) for ich in
range(self.nch)]
return np.array(bsize_list)
def iter_bursts_ph(self, ich=0):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
for istart, istop in iter_bursts_start_stop(self.mburst[ich]):
yield istart, istop
def bursts_slice(self, N1=0, N2=-1):
"""Return new Data object with bursts between `N1` and `N2`
`N1` and `N2` can be scalars or lists (one per ch).
"""
if np.isscalar(N1): N1 = [N1] * self.nch
if np.isscalar(N2): N2 = [N2] * self.nch
assert len(N1) == len(N2) == self.nch
d = Data(**self)
d.add(mburst=[b[n1:n2].copy() for b, n1, n2 in zip(d.mburst, N1, N2)])
d.add(nt=[nt[n1:n2] for nt, n1, n2 in zip(d.nt, N1, N2)])
d.add(nd=[nd[n1:n2] for nd, n1, n2 in zip(d.nd, N1, N2)])
d.add(na=[na[n1:n2] for na, n1, n2 in zip(d.na, N1, N2)])
for name in ('naa', 'nda', 'nar'):
if name in d:
d.add(**{name:
[x[n1:n2] for x, n1, n2 in zip(d[name], N1, N2)]})
if 'nda' in self:
d.add(nda=[da[n1:n2] for da, n1, n2 in zip(d.nda, N1, N2)])
d.calc_fret(pax=self.pax) # recalculate fret efficiency
return d
def delete_burst_data(self):
"""Erase all the burst data"""
for name in self.burst_fields + self.burst_metadata:
if name in self:
self.delete(name)
for name in ('E_fitter', 'S_fitter'):
if hasattr(self, name):
delattr(self, name)
##
# Methods for high-level data transformation
#
def slice_ph(self, time_s1=0, time_s2=None, s='slice'):
"""Return a new Data object with ph in [`time_s1`,`time_s2`] (seconds)
If ALEX, this method must be called right after
:func:`fretbursts.loader.alex_apply_periods` (with `delete_ph_t=True`)
and before any background estimation or burst search.
"""
if time_s2 is None:
time_s2 = self.time_max
if time_s2 >= self.time_max and time_s1 <= 0:
return self.copy()
assert time_s1 < self.time_max
t1_clk, t2_clk = int(time_s1 / self.clk_p), int(time_s2 / self.clk_p)
masks = [(ph >= t1_clk) * (ph < t2_clk) for ph in self.iter_ph_times()]
new_d = Data(**self)
for name in self.ph_fields:
if name in self:
new_d[name] = [a[mask] for a, mask in zip(self[name], masks)]
setattr(new_d, name, new_d[name])
new_d.delete_burst_data()
# Shift timestamps to start from 0 to avoid problems with BG calc
for ich in range(self.nch):
ph_i = new_d.get_ph_times(ich)
ph_i -= t1_clk
new_d.s.append(s)
# Delete eventual cached properties
for attr in ['_time_min', '_time_max']:
if hasattr(new_d, attr):
delattr(new_d, attr)
return new_d
def collapse(self, update_gamma=True, skip_ch=None):
"""Returns an object with 1-spot data joining the multi-spot data.
Arguments:
skip_ch (tuple of ints): list of channels to skip.
If None, keep all channels.
update_gamma (bool): if True, recompute gamma as mean of the
per-channel gamma. If False, do not update gamma.
If True, gamma becomes a single value and the update has the
side effect of recomputing E and S values, discarding
previous per-channel corrections. If False, gamma is not
updated (it stays with multi-spot values) and E and S are
not recomputed.
Note:
When using `update_gamma=False`, burst selections on the
collapsed `Data` object should be done with
`computefret=False`, otherwise any attempt to use multi-spot
gamma for single-spot data will raise an error.
"""
dc = Data(**self)
mch_bursts = self.mburst
if skip_ch is not None:
mch_bursts = [bursts for i, bursts in enumerate(mch_bursts)
if i not in skip_ch]
bursts = bslib.Bursts.merge(mch_bursts, sort=False)
# Sort by start times, and when equal by stop times
indexsort = np.lexsort((bursts.stop, bursts.start))
dc.add(mburst=[bursts[indexsort]])
ich_burst = [i * np.ones(nb) for i, nb in enumerate(self.num_bursts)]
dc.add(ich_burst=np.hstack(ich_burst)[indexsort])
for name in self.burst_fields:
if name in self and name is not 'mburst':
# Concatenate arrays along axis = 0
value = [np.concatenate(self[name])[indexsort]]
dc.add(**{name: value})
dc.add(nch=1)
dc.add(_chi_ch=1.)
# NOTE: Updating gamma has the side effect of recomputing E
# (and S if ALEX). We need to update gamma because, in general,
# gamma can be an array with a value for each ch.
# However, the per-channel gamma correction is lost once both
# gamma and chi_ch are made scalar.
if update_gamma:
dc._update_gamma(np.mean(self.get_gamma_array()))
return dc
##
# Utility methods
#
def get_params(self):
"""Returns a plain dict containing only parameters and no arrays.
This can be used as a summary of data analysis parameters.
Additional keys `name' and `Names` are added with values
from `.name` and `.Name()`.
"""
p_names = ['fname', 'clk_p', 'nch', 'ph_sel', 'L', 'm', 'F', 'P',
'_leakage', '_dir_ex', '_gamma', 'bg_time_s',
'T', 'rate_th',
'bg_corrected', 'leakage_corrected', 'dir_ex_corrected',
'dithering', '_chi_ch', 's', 'ALEX']
p_dict = dict(self)
for name in p_dict.keys():
if name not in p_names:
p_dict.pop(name)
p_dict.update(name=self.name, Name=self.Name(), bg_mean=self.bg_mean,
nperiods=self.nperiods)
return p_dict
def expand(self, ich=0, alex_naa=False, width=False):
"""Return per-burst D and A sizes (nd, na) and their background counts.
This method returns for each bursts the corrected signal counts and
background counts in donor and acceptor channels. Optionally, the
burst width is also returned.
Arguments:
ich (int): channel for the bursts (can be not 0 only in multi-spot)
alex_naa (bool): if True and self.ALEX, returns burst sizes and
background also for acceptor photons during accept. excitation
width (bool): whether return the burst duration (in seconds).
Returns:
List of arrays: nd, na, donor bg, acceptor bg.
If `alex_naa` is True returns: nd, na, naa, bg_d, bg_a, bg_aa.
If `width` is True returns the bursts duration (in sec.) as last
element.
"""
period = self.bp[ich]
w = self.mburst[ich].width * self.clk_p
bg_a = self.bg[Ph_sel(Dex='Aem')][ich][period] * w
bg_d = self.bg[Ph_sel(Dex='Dem')][ich][period] * w
res = [self.nd[ich], self.na[ich]]
if self.alternated and alex_naa:
bg_aa = self.bg[Ph_sel(Aex='Aem')][ich][period] * w
res.extend([self.naa[ich], bg_d, bg_a, bg_aa])
else:
res.extend([bg_d, bg_a])
if width:
res.append(w)
return res
def burst_data_ich(self, ich):
"""Return a dict of burst data for channel `ich`."""
bursts = {}
bursts['size_raw'] = self.mburst[ich].counts
bursts['t_start'] = self.mburst[ich].start * self.clk_p
bursts['t_stop'] = self.mburst[ich].stop * self.clk_p
bursts['i_start'] = self.mburst[ich].istart
bursts['i_stop'] = self.mburst[ich].istop
period = bursts['bg_period'] = self.bp[ich]
width = self.mburst[ich].width * self.clk_p
bursts['width_ms'] = width * 1e3
bursts['bg_ad'] = self.bg[Ph_sel(Dex='Aem')][ich][period] * width
bursts['bg_dd'] = self.bg[Ph_sel(Dex='Dem')][ich][period] * width
if self.alternated:
bursts['bg_aa'] = self.bg[Ph_sel(Aex='Aem')][ich][period] * width
bursts['bg_da'] = self.bg[Ph_sel(Aex='Dem')][ich][period] * width
burst_fields = self.burst_fields[:]
burst_fields.remove('mburst')
burst_fields.remove('bp')
for field in burst_fields:
if field in self:
bursts[field] = self[field][ich]
return bursts
@property
def time_max(self):
"""The last recorded time in seconds."""
if not hasattr(self, '_time_max'):
self._time_max = self._time_reduce(last=True, func=max)
return self._time_max
@property
def time_min(self):
"""The first recorded time in seconds."""
if not hasattr(self, '_time_min'):
self._time_min = self._time_reduce(last=False, func=min)
return self._time_min
def _time_reduce(self, last=True, func=max):
"""Return first or last timestamp per-ch, reduced with `func`.
"""
idx = -1 if last else 0
# Get either ph_times_m or ph_times_t
ph_times = None
for ph_times_name in ['ph_times_m', 'ph_times_t']:
try:
ph_times = self[ph_times_name]
except KeyError:
pass
else:
break
if ph_times is not None:
# This works with both numpy arrays and pytables arrays
time = func(t[idx] for t in ph_times if t.shape[0] > 0)
elif 'mburst' in self:
if last:
time = func(bursts[idx].stop for bursts in self.mburst)
else:
time = func(bursts[idx].start for bursts in self.mburst)
else:
raise ValueError("No timestamps or bursts found.")
return time * self.clk_p
def ph_in_bursts_mask_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return mask of all photons inside bursts for channel `ich`.
Returns
Boolean array for photons in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
bursts_mask = ph_in_bursts_mask(self.ph_data_sizes[ich],
self.mburst[ich])
if self._is_allph(ph_sel):
return bursts_mask
else:
ph_sel_mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
return ph_sel_mask * bursts_mask
def ph_in_bursts_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return timestamps of photons inside bursts for channel `ich`.
Returns
Array of photon timestamps in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
ph_all = self.get_ph_times(ich=ich)
bursts_mask = self.ph_in_bursts_mask_ich(ich, ph_sel)
return ph_all[bursts_mask]
##
# Background analysis methods
#
def _obsolete_bg_attr(self, attrname, ph_sel):
print('The Data.%s attribute is deprecated. Please use '
'Data.bg(%s) instead.' % (attrname, repr(ph_sel)))
bg_attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa')
bg_mean_attrs = ('rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa')
assert attrname in bg_attrs or attrname in bg_mean_attrs
if attrname in bg_attrs:
bg_field = 'bg'
elif attrname in bg_mean_attrs:
bg_field = 'bg_mean'
try:
value = getattr(self, bg_field)[ph_sel]
except AttributeError as e:
# This only happens when trying to access 'bg' because
# 'bg_mean' raises RuntimeError when missing.
msg = 'No attribute `%s` found. Please compute background first.'
raise_from(RuntimeError(msg % bg_field), e)
return value
@property
def rate_m(self):
return self._obsolete_bg_attr('rate_m', Ph_sel('all'))
@property
def rate_dd(self):
return self._obsolete_bg_attr('rate_dd', Ph_sel(Dex='Dem'))
@property
def rate_ad(self):
return self._obsolete_bg_attr('rate_ad', Ph_sel(Dex='Aem'))
@property
def rate_da(self):
return self._obsolete_bg_attr('rate_da', Ph_sel(Aex='Dem'))
@property
def rate_aa(self):
return self._obsolete_bg_attr('rate_aa', Ph_sel(Aex='Aem'))
@property
def bg_dd(self):
return self._obsolete_bg_attr('bg_dd', Ph_sel(Dex='Dem'))
@property
def bg_ad(self):
return self._obsolete_bg_attr('bg_ad', Ph_sel(Dex='Aem'))
@property
def bg_da(self):
return self._obsolete_bg_attr('bg_da', Ph_sel(Aex='Dem'))
@property
def bg_aa(self):
return self._obsolete_bg_attr('bg_aa', Ph_sel(Aex='Aem'))
def calc_bg_cache(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True,
recompute=False):
"""Compute time-dependent background rates for all the channels.
This version is the cached version of :meth:`calc_bg`.
This method tries to load the background data from a cache file.
If a saved background data is not found, it computes
the background and stores it to disk.
The arguments are the same as :meth:`calc_bg` with the only addition
of `recompute` (bool) to force a background recomputation even if
a cached version is found.
Form more details on the other arguments see :meth:`calc_bg`.
"""
bg_cache.calc_bg_cache(self, fun, time_s=time_s,
tail_min_us=tail_min_us, F_bg=F_bg,
error_metrics=error_metrics, fit_allph=fit_allph,
recompute=recompute)
def _get_auto_bg_th_arrays(self, F_bg=2, tail_min_us0=250):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
Th_us = {}
for ph_sel in self.ph_streams:
th_us = np.zeros(self.nch)
for ich, ph in enumerate(self.iter_ph_times(ph_sel=ph_sel)):
if ph.size > 0:
bg_rate, _ = bg.exp_fit(ph, tail_min_us=tail_min_us0)
th_us[ich] = 1e6 * F_bg / bg_rate
Th_us[ph_sel] = th_us
# Save the input used to generate Th_us
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
return Th_us
def _get_bg_th_arrays(self, tail_min_us, nperiods):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
n_streams = len(self.ph_streams)
if np.size(tail_min_us) == 1:
tail_min_us = np.repeat(tail_min_us, n_streams)
elif np.size(tail_min_us) == n_streams:
tail_min_us = np.asarray(tail_min_us)
elif np.size(tail_min_us) != n_streams:
raise ValueError('Wrong tail_min_us length (%d).' %
len(tail_min_us))
th_us = {}
for i, key in enumerate(self.ph_streams):
th_us[key] = np.ones(nperiods) * tail_min_us[i]
# Save the input used to generate Th_us
self.add(bg_th_us_user=tail_min_us)
return th_us
def _clean_bg_data(self):
"""Remove background fields specific of only one fit type.
Computing background with manual or 'auto' threshold results in
different sets of attributes being saved. This method removes these
attributes and should be called before recomputing the background
to avoid having old stale attributes of a previous background fit.
"""
# Attributes specific of manual or 'auto' bg fit
field_list = ['bg_auto_th_us0', 'bg_auto_F_bg', 'bg_th_us_user']
for field in field_list:
if field in self:
self.delete(field)
if hasattr(self, '_bg_mean'):
delattr(self, '_bg_mean')
def _get_num_periods(self, time_s):
"""Return the number of periods using `time_s` as period duration.
"""
duration = self.time_max - self.time_min
# Take the ceil to have at least 1 periods
nperiods = np.ceil(duration / time_s)
# Discard last period if negligibly small to avoid problems with
# background fit with very few photons.
if nperiods > 1:
last_period = self.time_max - time_s * (nperiods - 1)
# Discard last period if smaller than 3% of the bg period
if last_period < time_s * 0.03:
nperiods -= 1
return int(nperiods)
def calc_bg(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True):
"""Compute time-dependent background rates for all the channels.
Compute background rates for donor, acceptor and both detectors.
The rates are computed every `time_s` seconds, allowing to
track possible variations during the measurement.
Arguments:
fun (function): function for background estimation (example
`bg.exp_fit`)
time_s (float, seconds): compute background each time_s seconds
tail_min_us (float, tuple or string): min threshold in us for
photon waiting times to use in background estimation.
If float is the same threshold for 'all', DD, AD and AA photons
and for all the channels.
If a 3 or 4 element tuple, each value is used for 'all', DD, AD
or AA photons, same value for all the channels.
If 'auto', the threshold is computed for each stream ('all',
DD, DA, AA) and for each channel as `bg_F * rate_ml0`.
`rate_ml0` is an initial estimation of the rate performed using
:func:`bg.exp_fit` and a fixed threshold (default 250us).
F_bg (float): when `tail_min_us` is 'auto', is the factor by which
the initial background estimation if multiplied to compute the
threshold.
error_metrics (string): Specifies the error metric to use.
See :func:`fretbursts.background.exp_fit` for more details.
fit_allph (bool): if True (default) the background for the
all-photon is fitted. If False it is computed as the sum of
backgrounds in all the other streams.
The background estimation functions are defined in the module
`background` (conventionally imported as `bg`).
Example:
Compute background with `bg.exp_fit` (inter-photon delays MLE
tail fitting), every 30s, with automatic tail-threshold::
d.calc_bg(bg.exp_fit, time_s=20, tail_min_us='auto')
Returns:
None, all the results are saved in the object itself.
"""
pprint(" - Calculating BG rates ... ")
self._clean_bg_data()
kwargs = dict(clk_p=self.clk_p, error_metrics=error_metrics)
nperiods = self._get_num_periods(time_s)
streams_noall = [s for s in self.ph_streams if s != Ph_sel('all')]
bg_auto_th = tail_min_us == 'auto'
if bg_auto_th:
tail_min_us0 = 250
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
auto_th_kwargs = dict(clk_p=self.clk_p, tail_min_us=tail_min_us0)
th_us = {}
for key in self.ph_streams:
th_us[key] = np.zeros(nperiods)
else:
th_us = self._get_bg_th_arrays(tail_min_us, nperiods)
Lim, Ph_p = [], []
BG, BG_err = [], []
Th_us = []
for ich, ph_ch in enumerate(self.iter_ph_times()):
masks = {sel: self.get_ph_mask(ich, ph_sel=sel)
for sel in self.ph_streams}
bins = ((np.arange(nperiods + 1) * time_s + self.time_min) /
self.clk_p)
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
bg = {sel: np.zeros(nperiods) for sel in self.ph_streams}
bg_err = {sel: np.zeros(nperiods) for sel in self.ph_streams}
i1 = 0
for ip in range(nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1 - 1]))
ph_i = ph_ch[i0:i1]
if fit_allph:
sel = Ph_sel('all')
if bg_auto_th:
_bg, _ = fun(ph_i, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i, tail_min_us=th_us[sel][ip], **kwargs)
for sel in streams_noall:
# This supports cases of D-only or A-only timestamps
# where self.A_em[ich] is a bool and not a bool-array
# In this case, the mask of either DexDem or DexAem is
# slice(None) (all-elements selection).
if isinstance(masks[sel], slice):
if masks[sel] == slice(None):
bg[sel][ip] = bg[Ph_sel('all')][ip]
bg_err[sel][ip] = bg_err[Ph_sel('all')][ip]
continue
else:
ph_i_sel = ph_i[masks[sel][i0:i1]]
if ph_i_sel.size > 0:
if bg_auto_th:
_bg, _ = fun(ph_i_sel, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i_sel, tail_min_us=th_us[sel][ip], **kwargs)
if not fit_allph:
bg[Ph_sel('all')] += sum(bg[s] for s in streams_noall)
bg_err[Ph_sel('all')] += sum(bg_err[s] for s in streams_noall)
Lim.append(lim)
Ph_p.append(ph_p)
BG.append(bg)
BG_err.append(bg_err)
Th_us.append(th_us)
# Make Dict Of Lists (DOL) from Lists of Dicts
BG_dol, BG_err_dol, Th_us_dol = {}, {}, {}
for sel in self.ph_streams:
BG_dol[sel] = [bg_ch[sel] for bg_ch in BG]
BG_err_dol[sel] = [err_ch[sel] for err_ch in BG_err]
Th_us_dol[sel] = [th_ch[sel] for th_ch in Th_us]
self.add(bg=BG_dol, bg_err=BG_err_dol, bg_th_us=Th_us_dol,
Lim=Lim, Ph_p=Ph_p,
bg_fun=fun, bg_fun_name=fun.__name__,
bg_time_s=time_s, bg_ph_sel=Ph_sel('all'),
bg_auto_th=bg_auto_th, # bool, True if the using auto-threshold
)
pprint("[DONE]\n")
@property
def nperiods(self):
return len(self.bg[Ph_sel('all')][0])
@property
def bg_mean(self):
if 'bg' not in self:
raise RuntimeError('No background found, compute it first.')
if not hasattr(self, '_bg_mean'):
self._bg_mean = {k: [bg_ch.mean() for bg_ch in bg_ph_sel]
for k, bg_ph_sel in self.bg.items()}
return self._bg_mean
def recompute_bg_lim_ph_p(self, ph_sel, mute=False):
"""Recompute self.Lim and selp.Ph_p relative to ph selection `ph_sel`
`ph_sel` is a Ph_sel object selecting the timestamps in which self.Lim
and self.Ph_p are being computed.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if self.bg_ph_sel == ph_sel:
return
pprint(" - Recomputing background limits for %s ... " %
str(ph_sel), mute)
bg_time_clk = self.bg_time_s / self.clk_p
Lim, Ph_p = [], []
for ph_ch, lim in zip(self.iter_ph_times(ph_sel), self.Lim):
bins = np.arange(self.nperiods + 1) * bg_time_clk
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
i1 = 0
for ip in range(self.nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1-1]))
Lim.append(lim)
Ph_p.append(ph_p)
self.add(Lim=Lim, Ph_p=Ph_p, bg_ph_sel=ph_sel)
pprint("[DONE]\n", mute)
##
# Burst analysis methods
#
def _calc_burst_period(self):
"""Compute for each burst the "background period" `bp`.
Background periods are the time intervals on which the BG is computed.
"""
P = []
for b, lim in zip(self.mburst, self.Lim):
p = zeros(b.num_bursts, dtype=np.int16)
if b.num_bursts > 0:
istart = b.istart
for i, (l0, l1) in enumerate(lim):
p[(istart >= l0) * (istart <= l1)] = i
P.append(p)
self.add(bp=P)
def _param_as_mch_array(self, par):
"""Regardless of `par` size, return an arrays with size == nch.
if `par` is scalar the arrays repeats the calar multiple times
if `par is a list/array must be of length `nch`.
"""
assert size(par) == 1 or size(par) == self.nch
return np.repeat(par, self.nch) if size(par) == 1 else np.asarray(par)
def bg_from(self, ph_sel):
"""Return the background rates for the specified photon selection.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if ph_sel in self.ph_streams:
return self.bg[ph_sel]
elif ph_sel == Ph_sel(Dex='DAem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Aex='DAem'):
sel = Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Aex='Dem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
sel = Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
sel = (Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem'))
bg = [b1 + b2 + b3 for b1, b2, b3 in
zip(self.bg[sel[0]], self.bg[sel[1]], self.bg[sel[2]])]
else:
raise NotImplementedError('Photon selection %s not implemented.' %
str(ph_sel))
return bg
def _calc_T(self, m, P, F=1., ph_sel=Ph_sel('all'), c=-1):
"""If P is None use F, otherwise uses both P *and* F (F defaults to 1).
When P is None, compute the time lag T for burst search according to::
T = (m - 1 - c) / (F * bg_rate)
"""
# Regardless of F and P sizes, FF and PP are arrays with size == nch
FF = self._param_as_mch_array(F)
PP = self._param_as_mch_array(P)
if P is None:
# NOTE: the following lambda ignores Pi
find_T = lambda m, Fi, Pi, bg: (m - 1 - c) / (bg * Fi)
else:
if F != 1:
print("WARNING: BS prob. th. with modified BG rate (F=%.1f)"
% F)
find_T = lambda m, Fi, Pi, bg: find_optimal_T_bga(bg*Fi, m, 1-Pi)
TT, T, rate_th = [], [], []
bg_bs = self.bg_from(ph_sel)
for bg_ch, F_ch, P_ch in zip(bg_bs, FF, PP):
# All "T" are in seconds
Tch = find_T(m, F_ch, P_ch, bg_ch)
TT.append(Tch)
T.append(Tch.mean())
rate_th.append(np.mean(m / Tch))
self.add(TT=TT, T=T, bg_bs=bg_bs, FF=FF, PP=PP, F=F, P=P,
rate_th=rate_th)
def _burst_search_rate(self, m, L, min_rate_cps, c=-1, ph_sel=Ph_sel('all'),
compact=False, index_allph=True, verbose=True,
pure_python=False):
"""Compute burst search using a fixed minimum photon rate.
The burst starts when, for `m` consecutive photons::
(m - 1 - c) / (t[last] - t[first]) >= min_rate_cps
Arguments:
min_rate_cps (float or array): minimum photon rate for burst start.
If array is one value per channel.
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
Min_rate_cps = self._param_as_mch_array(min_rate_cps)
mburst = []
T_clk = (m - 1 - c) / Min_rate_cps / self.clk_p
for ich, t_clk in enumerate(T_clk):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
label = '%s CH%d' % (ph_sel, ich + 1) if verbose else None
burstarray = bsearch(ph_bs, L, m, t_clk, label=label, verbose=verbose)
if burstarray.size > 1:
bursts = bslib.Bursts(burstarray)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
mburst.append(bursts)
self.add(mburst=mburst, rate_th=Min_rate_cps, T=T_clk * self.clk_p)
if ph_sel != Ph_sel('all') and index_allph:
self._fix_mburst_from(ph_sel=ph_sel)
def _burst_search_TT(self, m, L, ph_sel=Ph_sel('all'), verbose=True,
compact=False, index_allph=True, pure_python=False,
mute=False):
"""Compute burst search with params `m`, `L` on ph selection `ph_sel`
Requires the list of arrays `self.TT` with the max time-thresholds in
the different burst periods for each channel (use `._calc_T()`).
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
self.recompute_bg_lim_ph_p(ph_sel=ph_sel, mute=mute)
MBurst = []
label = ''
for ich, T in enumerate(self.TT):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
burstarray_ch_list = []
Tck = T / self.clk_p
for ip, (l0, l1) in enumerate(self.Lim[ich]):
if verbose:
label = '%s CH%d-%d' % (ph_sel, ich + 1, ip)
burstarray = bsearch(ph_bs, L, m, Tck[ip], slice_=(l0, l1 + 1),
label=label, verbose=verbose)
if burstarray.size > 1:
burstarray_ch_list.append(burstarray)
if len(burstarray_ch_list) > 0:
data = np.vstack(burstarray_ch_list)
bursts = bslib.Bursts(data)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
MBurst.append(bursts)
self.add(mburst=MBurst)
if ph_sel != Ph_sel('all') and index_allph:
# Convert the burst data to be relative to ph_times_m.
# Convert both Lim/Ph_p and mburst, as they are both needed
# to compute `.bp`.
self.recompute_bg_lim_ph_p(ph_sel=Ph_sel('all'), mute=mute)
self._fix_mburst_from(ph_sel=ph_sel, mute=mute)
def _fix_mburst_from(self, ph_sel, mute=False):
"""Convert burst data from any ph_sel to 'all' timestamps selection.
"""
assert isinstance(ph_sel, Ph_sel) and not self._is_allph(ph_sel)
pprint(' - Fixing burst data to refer to ph_times_m ... ', mute)
for bursts, mask in zip(self.mburst,
self.iter_ph_masks(ph_sel=ph_sel)):
bursts.recompute_index_expand(mask, out=bursts)
pprint('[DONE]\n', mute)
def burst_search(self, L=None, m=10, F=6., P=None, min_rate_cps=None,
ph_sel=Ph_sel('all'), compact=False, index_allph=True,
c=-1, computefret=True, max_rate=False, dither=False,
pure_python=False, verbose=False, mute=False, pax=False):
"""Performs a burst search with specified parameters.
This method performs a sliding-window burst search without
binning the timestamps. The burst starts when the rate of `m`
photons is above a minimum rate, and stops when the rate falls below
the threshold. The result of the burst search is stored in the
`mburst` attribute (a list of Bursts objects, one per channel)
containing start/stop times and indexes. By default, after burst
search, this method computes donor and acceptor counts, it applies
burst corrections (background, leakage, etc...) and computes
E (and S in case of ALEX). You can skip these steps by passing
`computefret=False`.
The minimum rate can be explicitly specified with the `min_rate_cps`
argument, or computed as a function of the background rate with the
`F` argument.
Parameters:
m (int): number of consecutive photons used to compute the
photon rate. Typical values 5-20. Default 10.
L (int or None): minimum number of photons in burst. If None
(default) L = m is used.
F (float): defines how many times higher than the background rate
is the minimum rate used for burst search
(`min rate = F * bg. rate`), assuming that `P = None` (default).
Typical values are 3-9. Default 6.
P (float): threshold for burst detection expressed as a
probability that a detected bursts is not due to a Poisson
background. If not None, `P` overrides `F`. Note that the
background process is experimentally super-Poisson so this
probability is not physically very meaningful. Using this
argument is discouraged.
min_rate_cps (float or list/array): minimum rate in cps for burst
start. If not None, it has the precedence over `P` and `F`.
If non-scalar, contains one rate per each multispot channel.
Typical values range from 20e3 to 100e3.
ph_sel (Ph_sel object): defines the "photon selection" (or stream)
to be used for burst search. Default: all photons.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
index_allph (bool): if True (default), the indexes of burst start
and stop (`istart`, `istop`) are relative to the full
timestamp array. If False, the indexes are relative to
timestamps selected by the `ph_sel` argument.
c (float): correction factor used in the rate vs time-lags relation.
`c` affects the computation of the burst-search parameter `T`.
When `F` is not None, `T = (m - 1 - c) / (F * bg_rate)`.
When using `min_rate_cps`, `T = (m - 1 - c) / min_rate_cps`.
computefret (bool): if True (default) compute donor and acceptor
counts, apply corrections (background, leakage, direct
excitation) and compute E (and S). If False, skip all these
steps and stop just after the initial burst search.
max_rate (bool): if True compute the max photon rate inside each
burst using the same `m` used for burst search. If False
(default) skip this step.
dither (bool): if True applies dithering corrections to burst
counts. Default False. See :meth:`Data.dither`.
pure_python (bool): if True, uses the pure python functions even
when optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Note:
when using `P` or `F` the background rates are needed, so
`.calc_bg()` must be called before the burst search.
Example:
d.burst_search(m=10, F=6)
Returns:
None, all the results are saved in the `Data` object.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if compact:
self._assert_compact(ph_sel)
pprint(" - Performing burst search (verbose=%s) ..." % verbose, mute)
# Erase any previous burst data
self.delete_burst_data()
if L is None:
L = m
if min_rate_cps is not None:
# Saves rate_th in self
self._burst_search_rate(m=m, L=L, min_rate_cps=min_rate_cps, c=c,
ph_sel=ph_sel, compact=compact,
index_allph=index_allph,
verbose=verbose, pure_python=pure_python)
else:
# Compute TT, saves P and F in self
self._calc_T(m=m, P=P, F=F, ph_sel=ph_sel, c=c)
# Use TT and compute mburst
self._burst_search_TT(L=L, m=m, ph_sel=ph_sel, compact=compact,
index_allph=index_allph, verbose=verbose,
pure_python=pure_python, mute=mute)
pprint("[DONE]\n", mute)
pprint(" - Calculating burst periods ...", mute)
self._calc_burst_period() # writes bp
pprint("[DONE]\n", mute)
# (P, F) or rate_th are saved in _calc_T() or _burst_search_rate()
self.add(m=m, L=L, ph_sel=ph_sel)
# The correction flags are both set here and in calc_ph_num() so that
# they are always consistent. Case 1: we perform only burst search
# (with no call to calc_ph_num). Case 2: we re-call calc_ph_num()
# without doing a new burst search
self.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
self._burst_search_postprocess(
computefret=computefret, max_rate=max_rate, dither=dither,
pure_python=pure_python, mute=mute, pax=pax)
def _burst_search_postprocess(self, computefret, max_rate, dither,
pure_python, mute, pax):
if computefret:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
self.calc_fret(count_ph=True, corrections=True, dither=dither,
mute=mute, pure_python=pure_python, pax=pax)
pprint(" [DONE Counting D/A]\n", mute)
if max_rate:
pprint(" - Computing max rates in burst ...", mute)
self.calc_max_rate(m=self.m)
pprint("[DONE]\n", mute)
def calc_ph_num(self, alex_all=False, pure_python=False):
"""Computes number of D, A (and AA) photons in each burst.
Arguments:
alex_all (bool): if True and self.ALEX is True, computes also the
donor channel photons during acceptor excitation (`nda`)
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
Returns:
Saves `nd`, `na`, `nt` (and eventually `naa`, `nda`) in self.
Returns None.
"""
mch_count_ph_in_bursts = _get_mch_count_ph_in_bursts_func(pure_python)
if not self.alternated:
nt = [b.counts.astype(float) if b.num_bursts > 0 else np.array([])
for b in self.mburst]
A_em = [self.get_A_em(ich) for ich in range(self.nch)]
if isinstance(A_em[0], slice):
# This is to support the case of A-only or D-only data
n0 = [np.zeros(mb.num_bursts) for mb in self.mburst]
if A_em[0] == slice(None):
nd, na = n0, nt # A-only case
elif A_em[0] == slice(0):
nd, na = nt, n0 # D-only case
else:
# This is the usual case with photons in both D and A channels
na = mch_count_ph_in_bursts(self.mburst, A_em)
nd = [t - a for t, a in zip(nt, na)]
assert (nt[0] == na[0] + nd[0]).all()
else:
# The "new style" would be:
#Mask = [m for m in self.iter_ph_masks(Ph_sel(Dex='Dem'))]
Mask = [d_em * d_ex for d_em, d_ex in zip(self.D_em, self.D_ex)]
nd = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * d_ex for a_em, d_ex in zip(self.A_em, self.D_ex)]
na = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * a_ex for a_em, a_ex in zip(self.A_em, self.A_ex)]
naa = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(naa=naa)
if alex_all or 'PAX' in self.meas_type:
Mask = [d_em * a_ex for d_em, a_ex in zip(self.D_em, self.A_ex)]
nda = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(nda=nda)
if self.ALEX:
nt = [d + a + aa for d, a, aa in zip(nd, na, naa)]
assert (nt[0] == na[0] + nd[0] + naa[0]).all()
elif 'PAX' in self.meas_type:
nt = [d + a + da + aa for d, a, da, aa in zip(nd, na, nda, naa)]
assert (nt[0] == na[0] + nd[0] + nda[0] + naa[0]).all()
# This is a copy of na which will never be corrected
# (except for background). It is used to compute the
# equivalent of naa for PAX:
# naa~ = naa - nar
# where naa~ is the A emission due to direct excitation
# by A laser during D+A-excitation,
# nar is the uncorrected A-channel signal during D-excitation,
# and naa is the A-channel signal during D+A excitation.
nar = [a.copy() for a in na]
self.add(nar=nar)
self.add(nd=nd, na=na, nt=nt,
bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
def fuse_bursts(self, ms=0, process=True, mute=False):
"""Return a new :class:`Data` object with nearby bursts fused together.
Arguments:
ms (float): fuse all burst separated by less than `ms` millisecs.
If < 0 no burst is fused. Note that with ms = 0, overlapping
bursts are fused.
process (bool): if True (default), reprocess the burst data in
the new object applying corrections and computing FRET.
mute (bool): if True suppress any printed output.
"""
if ms < 0:
return self
mburst = mch_fuse_bursts(self.mburst, ms=ms, clk_p=self.clk_p)
new_d = Data(**self)
for k in ['E', 'S', 'nd', 'na', 'naa', 'nda', 'nar', 'nt', 'lsb', 'bp']:
if k in new_d:
new_d.delete(k)
new_d.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
new_d.add(mburst=mburst, fuse=ms)
if 'bg' in new_d:
new_d._calc_burst_period()
if process:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
new_d.calc_fret(count_ph=True, corrections=True,
dither=self.dithering, mute=mute, pax=self.pax)
pprint(" [DONE Counting D/A and FRET]\n", mute)
return new_d
##
# Burst selection and filtering
#
def select_bursts(self, filter_fun, negate=False, computefret=True,
args=None, **kwargs):
"""Return an object with bursts filtered according to `filter_fun`.
This is the main method to select bursts according to different
criteria. The selection rule is defined by the selection function
`filter_fun`. FRETBursts provides a several predefined selection
functions see :ref:`burst_selection`. New selection
functions can be defined and passed to this method to implement
arbitrary selection rules.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
"""
Masks, str_sel = self.select_bursts_mask(filter_fun, negate=negate,
return_str=True, args=args,
**kwargs)
d_sel = self.select_bursts_mask_apply(Masks, computefret=computefret,
str_sel=str_sel)
return d_sel
def select_bursts_mask(self, filter_fun, negate=False, return_str=False,
args=None, **kwargs):
"""Returns mask arrays to select bursts according to `filter_fun`.
The function `filter_fun` is called to compute the mask arrays for
each channel.
This method is useful when you want to apply a selection from one
object to a second object. Otherwise use :meth:`Data.select_bursts`.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
return_str: if True return, for each channel, a tuple with
a bool array and a string that can be added to the measurement
name to indicate the selection. If False returns only
the bool array. Default False.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A list of boolean arrays (one per channel) that define the burst
selection. If `return_str` is True returns a list of tuples, where
each tuple is a bool array and a string.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_bursts_mask_apply`
"""
# Create the list of bool masks for the bursts selection
if args is None:
args = tuple()
M = [filter_fun(self, i, *args, **kwargs) for i in range(self.nch)]
# Make sure the selection function has the right return signature
msg = 'The second argument returned by `%s` must be a string.'
assert np.all([isinstance(m[1], str) for m in M]), msg % filter_fun
# Make sure all boolean masks have the right size
msg = ("The size of boolean masks returned by `%s` needs to match "
"the number of bursts.")
assert np.all([m[0].size == n for m, n in zip(M, self.num_bursts)]), (
msg % filter_fun)
Masks = [-m[0] if negate else m[0] for m in M]
str_sel = M[0][1]
if return_str:
return Masks, str_sel
else:
return Masks
def select_bursts_mask_apply(self, masks, computefret=True, str_sel=''):
"""Returns a new Data object with bursts selected according to `masks`.
This method select bursts using a list of boolean arrays as input.
Since the user needs to create the boolean arrays first, this method
is useful when experimenting with new selection criteria that don't
have a dedicated selection function. Usually, however, it is easier
to select bursts through :meth:`Data.select_bursts` (using a
selection function).
Arguments:
masks (list of arrays): each element in this list is a boolean
array that selects bursts in a channel.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_mask`
"""
# Attributes of ds point to the same objects of self
ds = Data(**self)
##Copy the per-burst fields that must be filtered
used_fields = [field for field in Data.burst_fields if field in self]
for name in used_fields:
# Recreate the current attribute as a new list to avoid modifying
# the old list that is also in the original object.
# The list is initialized with empty arrays because this is the
# valid value when a ch has no bursts.
empty = bslib.Bursts.empty() if name == 'mburst' else np.array([])
ds.add(**{name: [empty] * self.nch})
# Assign the new data
for ich, mask in enumerate(masks):
if self[name][ich].size == 0:
continue # -> no bursts in ch
# Note that boolean masking implies numpy array copy
# On the contrary slicing only makes a new view of the array
ds[name][ich] = self[name][ich][mask]
# Recompute E and S
if computefret:
ds.calc_fret(count_ph=False, pax=self.pax)
# Add the annotation about the filter function
ds.s = list(self.s + [str_sel]) # using append would modify also self
return ds
##
# Burst corrections
#
def background_correction(self, relax_nt=False, mute=False):
"""Apply background correction to burst sizes (nd, na,...)
"""
if self.bg_corrected:
return -1
pprint(" - Applying background correction.\n", mute)
self.add(bg_corrected=True)
for ich, bursts in enumerate(self.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
period = self.bp[ich]
nd, na, bg_d, bg_a, width = self.expand(ich, width=True)
nd -= bg_d
na -= bg_a
if 'nar' in self:
# Apply background correction to PAX field nar
self.nar[ich][:] = na
if relax_nt:
# This does not guarantee that nt = nd + na
self.nt[ich] -= self.bg_from(Ph_sel('all'))[ich][period] * width
else:
self.nt[ich] = nd + na
if self.alternated:
bg_aa = self.bg_from(Ph_sel(Aex='Aem'))
self.naa[ich] -= bg_aa[ich][period] * width
if 'nda' in self:
bg_da = self.bg_from(Ph_sel(Aex='Dem'))
self.nda[ich] -= bg_da[ich][period] * width
self.nt[ich] += self.naa[ich]
if 'PAX' in self.meas_type:
self.nt[ich] += self.nda[ich]
def leakage_correction(self, mute=False):
"""Apply leakage correction to burst sizes (nd, na,...)
"""
if self.leakage_corrected:
return -1
elif self.leakage != 0:
pprint(" - Applying leakage correction.\n", mute)
Lk = self.get_leakage_array()
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
self.na[i] -= self.nd[i] * Lk[i]
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(leakage_corrected=True)
def direct_excitation_correction(self, mute=False):
"""Apply direct excitation correction to bursts (ALEX-only).
The applied correction is: na -= naa*dir_ex
"""
if self.dir_ex_corrected:
return -1
elif self.dir_ex != 0:
pprint(" - Applying direct excitation correction.\n", mute)
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
naa = self.naa[i]
if 'PAX' in self.meas_type:
naa = naa - self.nar[i] # do not modify inplace
self.na[i] -= naa * self.dir_ex
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(dir_ex_corrected=True)
def dither(self, lsb=2, mute=False):
"""Add dithering (uniform random noise) to burst counts (nd, na,...).
The dithering amplitude is the range -0.5*lsb .. 0.5*lsb.
"""
if self.dithering:
return -1
pprint(" - Applying burst-size dithering.\n", mute)
self.add(dithering=True)
for nd, na in zip(self.nd, self.na):
nd += lsb * (np.random.rand(nd.size) - 0.5)
na += lsb * (np.random.rand(na.size) - 0.5)
if self.alternated:
for naa in self.naa:
naa += lsb * (np.random.rand(naa.size) - 0.5)
if 'nda' in self:
for nda in self.nda:
nda += lsb * (np.random.rand(nda.size) - 0.5)
self.add(lsb=lsb)
def calc_chi_ch(self, E):
"""Calculate the gamma correction prefactor factor `chi_ch` (array).
Computes `chi_ch`, a channel-dependent prefactor for gamma used
to correct dispersion of E across channels.
Returns:
array of `chi_ch` correction factors (one per spot).
To apply the correction assign the returned array to `Data.chi_ch`.
Upon assignment E values for all bursts will be corrected.
"""
chi_ch = (1 / E.mean() - 1) / (1 / E - 1)
return chi_ch
def corrections(self, mute=False):
"""Apply corrections on burst-counts: nd, na, nda, naa.
The corrections are: background, leakage (or bleed-through) and
direct excitation (dir_ex).
"""
self.background_correction(mute=mute)
self.leakage_correction(mute=mute)
if self.alternated:
self.direct_excitation_correction(mute=mute)
def _update_corrections(self):
"""Recompute corrections whose flag is True.
Checks the flags .bg_corrected, .leakage_corrected, .dir_ex_corrected,
.dithering and recomputes the correction if the corresponding flag
is True (i.e. if the correction was already applied).
Note that this method is not used for gamma and beta corrections
because these do not affect the `nd`, `na` and `naa` quantities but
are only applied when computing E, S and corrected size.
Differently from :meth:`corrections`, this allows to recompute
corrections that have already been applied.
"""
if 'mburst' not in self:
return # no burst search performed yet
old_bg_corrected = self.bg_corrected
old_leakage_corrected = self.leakage_corrected
old_dir_ex_corrected = self.dir_ex_corrected
old_dithering = self.dithering
self.calc_ph_num() # recompute uncorrected na, nd, nda, naa
if old_bg_corrected:
self.background_correction()
if old_leakage_corrected:
self.leakage_correction()
if old_dir_ex_corrected:
self.direct_excitation_correction()
if old_dithering:
self.dither(self.lsb)
# Recompute E and S with no corrections (because already applied)
self.calc_fret(count_ph=False, corrections=False, pax=self.pax)
@property
def leakage(self):
"""Spectral leakage (bleed-through) of D emission in the A channel.
"""
return self._leakage
@leakage.setter
def leakage(self, leakage):
self._update_leakage(leakage)
def _update_leakage(self, leakage):
"""Apply/update leakage (or bleed-through) correction.
"""
assert (np.size(leakage) == 1) or (np.size(leakage) == self.nch)
self.add(_leakage=np.asfarray(leakage), leakage_corrected=True)
self._update_corrections()
@property
def dir_ex(self):
"""Direct excitation correction factor."""
return self._dir_ex
@dir_ex.setter
def dir_ex(self, value):
self._update_dir_ex(value)
def _update_dir_ex(self, dir_ex):
"""Apply/update direct excitation correction with value `dir_ex`.
"""
assert np.size(dir_ex) == 1
self.add(_dir_ex=float(dir_ex), dir_ex_corrected=True)
self._update_corrections()
@property
def beta(self):
"""Beta factor used to correct S (compensates Dex and Aex unbalance).
"""
return self._beta
@beta.setter
def beta(self, value):
self._update_beta(value)
def _update_beta(self, beta):
"""Change the `beta` value and recompute E and S."""
assert np.size(beta) == 1
self.add(_beta=float(beta))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def chi_ch(self):
"""Per-channel relative gamma factor."""
return self._chi_ch
@chi_ch.setter
def chi_ch(self, value):
self._update_chi_ch(value)
def _update_chi_ch(self, chi_ch):
"""Change the `chi_ch` value and recompute E and S."""
msg = 'chi_ch is a per-channel correction and must have size == nch.'
assert np.size(chi_ch) == self.nch, ValueError(msg)
self.add(_chi_ch=np.asfarray(chi_ch))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def gamma(self):
"""Gamma correction factor (compensates DexDem and DexAem unbalance).
"""
return self._gamma
@gamma.setter
def gamma(self, value):
self._update_gamma(value)
def _update_gamma(self, gamma):
"""Change the `gamma` value and recompute E and S."""
assert (np.size(gamma) == 1) or (np.size(gamma) == self.nch)
self.add(_gamma=np.asfarray(gamma))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
def get_gamma_array(self):
"""Get the array of gamma factors, one per ch.
It always returns an array of gamma factors regardless of
whether `self.gamma` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
gamma = self.gamma
G = np.repeat(gamma, self.nch) if np.size(gamma) == 1 else gamma
G *= self.chi_ch
return G
def get_leakage_array(self):
"""Get the array of leakage coefficients, one per ch.
It always returns an array of leakage coefficients regardless of
whether `self.leakage` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
leakage = self.leakage
Lk = np.r_[[leakage] * self.nch] if np.size(leakage) == 1 else leakage
Lk *= self.chi_ch
return Lk
##
# Methods to compute burst quantities: FRET, S, SBR, max_rate, etc ...
#
def calc_sbr(self, ph_sel=Ph_sel('all'), gamma=1.):
"""Return Signal-to-Background Ratio (SBR) for each burst.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection
for which to compute the sbr. Changes the photons used for
burst size and the corresponding background rate. Valid values
here are Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem').
See :mod:`fretbursts.ph_sel` for details.
gamma (float): gamma value used to compute corrected burst size
in the case `ph_sel` is Ph_sel('all'). Ignored otherwise.
Returns:
A list of arrays (one per channel) with one value per burst.
The list is also saved in `sbr` attribute.
"""
ph_sel = self._fix_ph_sel(ph_sel)
sbr = []
for ich, mb in enumerate(self.mburst):
if mb.num_bursts == 0:
sbr.append(np.array([]))
continue # if no bursts skip this ch
nd, na, bg_d, bg_a = self.expand(ich)
nt = self.burst_sizes_ich(ich=ich, gamma=gamma)
signal = {Ph_sel('all'): nt,
Ph_sel(Dex='Dem'): nd, Ph_sel(Dex='Aem'): na}
background = {Ph_sel('all'): bg_d + bg_a,
Ph_sel(Dex='Dem'): bg_d, Ph_sel(Dex='Aem'): bg_a}
sbr.append(signal[ph_sel] / background[ph_sel])
self.add(sbr=sbr)
return sbr
def calc_burst_ph_func(self, func, func_kw, ph_sel=Ph_sel('all'),
compact=False, ich=0):
"""Evaluate a scalar function from photons in each burst.
This method allow calling an arbitrary function on the photon
timestamps of each burst. For example if `func` is `np.mean` it
computes the mean time in each bursts.
Arguments:
func (callable): function that takes as first argument an array of
timestamps for one burst.
func_kw (callable): additional arguments to be passed `func`.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
Returns:
A list (on element per channel) array. The array size is equal to
the number of bursts in the corresponding channel.
"""
if compact:
self._assert_compact(ph_sel)
kwargs = dict(func=func, func_kw=func_kw, compact=compact)
if self.alternated:
kwargs.update(alex_period=self.alex_period)
if compact:
kwargs.update(excitation_width=self._excitation_width(ph_sel))
results_mch = [burst_ph_stats(ph, bursts, mask=mask, **kwargs)
for ph, mask, bursts in
zip(self.iter_ph_times(),
self.iter_ph_masks(ph_sel=ph_sel),
self.mburst)]
return results_mch
def calc_max_rate(self, m, ph_sel=Ph_sel('all'), compact=False,
c=phrates.default_c):
"""Compute the max m-photon rate reached in each burst.
Arguments:
m (int): number of timestamps to use to compute the rate.
As for burst search, typical values are 5-20.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
c (float): this parameter is used in the definition of the
rate estimator which is `(m - 1 - c) / t[last] - t[first]`.
For more details see :func:`.phtools.phrates.mtuple_rates`.
"""
ph_sel = self._fix_ph_sel(ph_sel)
Max_Rate = self.calc_burst_ph_func(func=phrates.mtuple_rates_max,
func_kw=dict(m=m, c=c),
ph_sel=ph_sel, compact=compact)
Max_Rate = [mr / self.clk_p - bg[bp] for bp, bg, mr in
zip(self.bp, self.bg_from(ph_sel), Max_Rate)]
params = dict(m=m, ph_sel=ph_sel, compact=compact)
self.add(max_rate=Max_Rate, max_rate_params=params)
def calc_fret(self, count_ph=False, corrections=True, dither=False,
mute=False, pure_python=False, pax=False):
"""Compute FRET (and stoichiometry if ALEX) for each burst.
This is an high-level functions that can be run after burst search.
By default, it will count Donor and Acceptor photons, perform
corrections (background, leakage), and compute gamma-corrected
FRET efficiencies (and stoichiometry if ALEX).
Arguments:
count_ph (bool): if True (default), calls :meth:`calc_ph_num` to
counts Donor and Acceptor photons in each bursts
corrections (bool): if True (default), applies background and
bleed-through correction to burst data
dither (bool): whether to apply dithering to burst size.
Default False.
mute (bool): whether to mute all the printed output. Default False.
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Returns:
None, all the results are saved in the object.
"""
if count_ph:
self.calc_ph_num(pure_python=pure_python, alex_all=True)
if dither:
self.dither(mute=mute)
if corrections:
self.corrections(mute=mute)
self._calculate_fret_eff(pax=pax)
if self.alternated:
self._calculate_stoich(pax=pax)
#self._calc_alex_hist()
for attr in ('ES_binwidth', 'ES_hist', 'E_fitter', 'S_fitter'):
# E_fitter and S_fitter are only attributes
# so we cannot use the membership syntax (attr in self)
if hasattr(self, attr):
self.delete(attr, warning=False)
def _aex_fraction(self):
"""Proportion of Aex period versus Dex + Aex."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return ((A_ON[1] - A_ON[0]) /
(A_ON[1] - A_ON[0] + D_ON[1] - D_ON[0]))
def _aex_dex_ratio(self):
"""Ratio of Aex and Dex period durations."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return (A_ON[1] - A_ON[0]) / (D_ON[1] - D_ON[0])
def _calculate_fret_eff(self, pax=False):
"""Compute FRET efficiency (`E`) for each burst."""
G = self.get_gamma_array()
if not pax:
E = [na / (g * nd + na) for nd, na, g in zip(self.nd, self.na, G)]
else:
alpha = 1 - self._aex_fraction()
E = [(na / alpha) / (g * (nd + nda) + (na / alpha))
for nd, na, nda, g in zip(self.nd, self.na, self.nda, G)]
self.add(E=E, pax=pax)
def _calculate_stoich(self, pax=False):
"""Compute "stoichiometry" (the `S` parameter) for each burst."""
G = self.get_gamma_array()
naa = self.naa
if 'PAX' in self.meas_type:
naa = [self._get_naa_ich(i) for i in range(self.nch)]
if not pax:
S = [(g * d + a) / (g * d + a + aa / self.beta)
for d, a, aa, g in zip(self.nd, self.na, naa, G)]
else:
# This is a PAX-enhanced formula which uses information
# from both alternation periods in order to compute S
alpha = 1 - self._aex_fraction()
S = [(g * (d + da) + a / alpha) /
(g * (d + da) + a / alpha + aa / (alpha * self.beta))
for d, a, da, aa, g in
zip(self.nd, self.na, self.nda, naa, G)]
self.add(S=S)
def _calc_alex_hist(self, binwidth=0.05):
"""Compute the ALEX histogram with given bin width `bin_step`"""
if 'ES_binwidth' in self and self.ES_binwidth == binwidth:
return
ES_hist_tot = [ES_histog(E, S, binwidth) for E, S in
zip(self.E, self.S)]
E_bins, S_bins = ES_hist_tot[0][1], ES_hist_tot[0][2]
ES_hist = [h[0] for h in ES_hist_tot]
E_ax = E_bins[:-1] + 0.5 * binwidth
S_ax = S_bins[:-1] + 0.5 * binwidth
self.add(ES_hist=ES_hist, E_bins=E_bins, S_bins=S_bins,
E_ax=E_ax, S_ax=S_ax, ES_binwidth=binwidth)
##
# Methods for measurement info
#
def status(self, add="", noname=False):
"""Return a string with burst search, corrections and selection info.
"""
name = "" if noname else self.name
s = name
if 'L' in self: # burst search has been done
if 'rate_th' in self:
s += " BS_%s L%d m%d MR%d" % (self.ph_sel, self.L, self.m,
np.mean(self.rate_th) * 1e-3)
else:
P_str = '' if self.P is None else ' P%s' % self.P
s += " BS_%s L%d m%d F%.1f%s" % \
(self.ph_sel, self.L, self.m, np.mean(self.F), P_str)
s += " G%.3f" % np.mean(self.gamma)
if 'bg_fun' in self: s += " BG%s" % self.bg_fun.__name__[:-4]
if 'bg_time_s' in self: s += "-%ds" % self.bg_time_s
if 'fuse' in self: s += " Fuse%.1fms" % self.fuse
if 'bg_corrected' in self and self.bg_corrected:
s += " bg"
if 'leakage_corrected' in self and self.leakage_corrected:
s += " Lk%.3f" % np.mean(self.leakage*100)
if 'dir_ex_corrected' in self and self.dir_ex_corrected:
s += " dir%.1f" % (self.dir_ex*100)
if 'dithering' in self and self.dithering:
s += " Dith%d" % self.lsb
if 's' in self: s += ' '.join(self.s)
return s + add
@property
def name(self):
"""Measurement name: last subfolder + file name with no extension."""
if not hasattr(self, '_name'):
basename = str(os.path.splitext(os.path.basename(self.fname))[0])
name = basename
last_dir = str(os.path.basename(os.path.dirname(self.fname)))
if len(last_dir) > 0:
name = '_'.join([last_dir, basename])
self.add(_name=name)
return self._name
@name.setter
def name(self, value):
self.add(_name=value)
def Name(self, add=""):
"""Return short filename + status information."""
n = self.status(add=add)
return n
def __repr__(self):
return self.status()
def stats(self, string=False):
"""Print common statistics (BG rates, #bursts, mean size, ...)"""
s = print_burst_stats(self)
if string:
return s
else:
print(s)
##
# FRET fitting methods
#
def fit_E_m(self, E1=-1, E2=2, weights='size', gamma=1.):
"""Fit E in each channel with the mean using bursts in [E1,E2] range.
Note:
This two fitting are equivalent (but the first is much faster)::
fit_E_m(weights='size')
fit_E_minimize(kind='E_size', weights='sqrt')
However `fit_E_minimize()` does not provide a model curve.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, 2)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
# Compute weighted mean
fit_res[ich, 0] = np.dot(w, E[mask])/w.sum()
# Compute weighted variance
fit_res[ich, 1] = np.sqrt(
np.dot(w, (E[mask] - fit_res[ich, 0])**2)/w.sum())
fit_model_F[ich] = mask.sum()/mask.size
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
self.add(fit_E_res=fit_res, fit_E_name='Moments',
E_fit=fit_res[:, 0], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_poiss(self, E1=-1, E2=2, method=1, **kwargs):
"""ML fit for E modeling size ~ Poisson, using bursts in [E1,E2] range.
"""
assert method in [1, 2, 3]
fit_fun = {1: fret_fit.fit_E_poisson_na, 2: fret_fit.fit_E_poisson_nt,
3: fret_fit.fit_E_poisson_nd}
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = zeros(self.nch)
for ich, mask in zip(range(self.nch), Mask):
nd, na, bg_d, bg_a = self.expand(ich)
bg_x = bg_d if method == 3 else bg_a
fit_res[ich] = fit_fun[method](nd[mask], na[mask],
bg_x[mask], **kwargs)
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Poisson',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_binom(self, E1=-1, E2=2, **kwargs):
"""ML fit for E modeling na ~ Binomial, using bursts in [E1,E2] range.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fret_fit.fit_E_binom(_d[mask], _a[mask], **kwargs)
for _d, _a, mask in zip(self.nd, self.na, Mask)])
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Binomial',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_minimize(self, kind='slope', E1=-1, E2=2, **kwargs):
"""Fit E using method `kind` ('slope' or 'E_size') and bursts in [E1,E2]
If `kind` is 'slope' the fit function is fret_fit.fit_E_slope()
If `kind` is 'E_size' the fit function is fret_fit.fit_E_E_size()
Additional arguments in `kwargs` are passed to the fit function.
"""
assert kind in ['slope', 'E_size']
# Build a dictionary fun_d so we'll call the function fun_d[kind]
fun_d = dict(slope=fret_fit.fit_E_slope,
E_size=fret_fit.fit_E_E_size)
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fun_d[kind](nd[mask], na[mask], **kwargs)
for nd, na, mask in
zip(self.nd, self.na, Mask)])
fit_name = dict(slope='Linear slope fit', E_size='E_size fit')
self.add(fit_E_res=fit_res, fit_E_name=fit_name[kind],
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_two_gauss_EM(self, fit_func=two_gaussian_fit_EM,
weights='size', gamma=1., **kwargs):
"""Fit the E population to a Gaussian mixture model using EM method.
Additional arguments in `kwargs` are passed to the fit_func().
"""
fit_res = zeros((self.nch, 5))
for ich, (nd, na, E) in enumerate(zip(self.nd, self.na, self.E)):
w = fret_fit.get_weights(nd, na, weights=weights, gamma=gamma)
fit_res[ich, :] = fit_func(E, weights=w, **kwargs)
self.add(fit_E_res=fit_res, fit_E_name=fit_func.__name__,
E_fit=fit_res[:, 2], fit_E_curve=True,
fit_E_model=two_gauss_mix_pdf,
fit_E_model_F=np.repeat(1, self.nch))
return self.E_fit
def fit_E_generic(self, E1=-1, E2=2, fit_fun=two_gaussian_fit_hist,
weights=None, gamma=1., **fit_kwargs):
"""Fit E in each channel with `fit_fun` using burst in [E1,E2] range.
All the fitting functions are defined in
:mod:`fretbursts.fit.gaussian_fitting`.
Parameters:
weights (string or None): specifies the type of weights
If not None `weights` will be passed to
`fret_fit.get_weights()`. `weights` can be not-None only when
using fit functions that accept weights (the ones ending in
`_hist` or `_EM`)
gamma (float): passed to `fret_fit.get_weights()` to compute
weights
All the additional arguments are passed to `fit_fun`. For example `p0`
or `mu_fix` can be passed (see `fit.gaussian_fitting` for details).
Note:
Use this method for CDF/PDF or hist fitting.
For EM fitting use :meth:`fit_E_two_gauss_EM()`.
"""
if fit_fun.__name__.startswith("gaussian_fit"):
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
if 'mu0' not in fit_kwargs: fit_kwargs.update(mu0=0.5)
if 'sigma0' not in fit_kwargs: fit_kwargs.update(sigma0=0.3)
iE, nparam = 0, 2
elif fit_fun.__name__ == "two_gaussian_fit_hist_min_ab":
fit_model = two_gauss_mix_ab
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.5, 0.6, 0.1, 0.5])
iE, nparam = 3, 6
elif fit_fun.__name__.startswith("two_gaussian_fit"):
fit_model = two_gauss_mix_pdf
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.6, 0.1, 0.5])
iE, nparam = 2, 5
else:
raise ValueError("Fitting function not recognized.")
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, nparam)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
if '_hist' in fit_fun.__name__ or '_EM' in fit_fun.__name__:
if weights is None:
w = None
else:
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
fit_res[ich, :] = fit_fun(E[mask], weights=w, **fit_kwargs)
else:
# Non-histogram fits (PDF/CDF) do not support weights
fit_res[ich, :] = fit_fun(E[mask], **fit_kwargs)
fit_model_F[ich] = mask.sum()/mask.size
# Save enough info to generate a fit plot (see hist_fret in burst_plot)
self.add(fit_E_res=fit_res, fit_E_name=fit_fun.__name__,
E_fit=fit_res[:, iE], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F, fit_E_weights=weights,
fit_E_gamma=gamma, fit_E_kwargs=fit_kwargs)
return self.E_fit
def fit_from(self, D):
"""Copy fit results from another Data() variable.
Now that the fit methods accept E1,E1 parameter this probabily useless.
"""
# NOTE Are 'fit_guess' and 'fit_fix' still used ?
fit_data = ['fit_E_res', 'fit_E_name', 'E_fit', 'fit_E_curve',
'fit_E_E1', 'fit_E_E2=E2', 'fit_E_model',
'fit_E_model_F', 'fit_guess', 'fit_fix']
for name in fit_data:
if name in D:
self[name] = D[name]
setattr(self, name, self[name])
# Deal with the normalization to the number of bursts
self.add(fit_model_F=r_[[old_E.size/new_E.size \
for old_E, new_E in zip(D.E, self.E)]])
def fit_E_calc_variance(self, weights='sqrt', dist='DeltaE',
E_fit=None, E1=-1, E2=2):
"""Compute several versions of WEIGHTED std.dev. of the E estimator.
`weights` are multiplied *BEFORE* squaring the distance/error
`dist` can be 'DeltaE' or 'SlopeEuclid'
Note:
This method is still experimental
"""
assert dist in ['DeltaE', 'SlopeEuclid']
if E_fit is None:
E_fit = self.E_fit
E1 = self.fit_E_E1 if 'fit_E_E1' in self else -1
E2 = self.fit_E_E2 if 'fit_E_E2' in self else 2
else:
# If E_fit is not None the specified E1,E2 range is used
if E1 < 0 and E2 > 1:
pprint('WARN: E1 < 0 and E2 > 1 (wide range of E eff.)\n')
if size(E_fit) == 1 and self.nch > 0:
E_fit = np.repeat(E_fit, self.nch)
assert size(E_fit) == self.nch
E_sel = [Ei[(Ei > E1)*(Ei < E2)] for Ei in self.E]
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
E_var, E_var_bu, E_var_ph = \
zeros(self.nch), zeros(self.nch), zeros(self.nch)
for i, (Ech, nt, mask) in enumerate(zip(E_sel, self.nt, Mask)):
nt_s = nt[mask]
nd_s, na_s = self.nd[i][mask], self.na[i][mask]
w = fret_fit.get_weights(nd_s, na_s, weights=weights)
info_ph = nt_s.sum()
info_bu = nt_s.size
if dist == 'DeltaE':
distances = (Ech - E_fit[i])
elif dist == 'SlopeEuclid':
distances = fret_fit.get_dist_euclid(nd_s, na_s, E_fit[i])
residuals = distances * w
var = np.mean(residuals**2)
var_bu = np.mean(residuals**2)/info_bu
var_ph = np.mean(residuals**2)/info_ph
#lvar = np.mean(log(residuals**2))
#lvar_bu = np.mean(log(residuals**2)) - log(info_bu)
#lvar_ph = np.mean(log(residuals**2)) - log(info_ph)
E_var[i], E_var_bu[i], E_var_ph[i] = var, var_bu, var_ph
assert (-np.isnan(E_var[i])).all() # check there is NO NaN
self.add(E_var=E_var, E_var_bu=E_var_bu, E_var_ph=E_var_ph)
return E_var
| gpl-2.0 | -7,185,521,470,680,014,000 | 41.513032 | 82 | 0.562813 | false | 3.492336 | false | false | false |
sl2017/campos | campos_jobber_final/models/campos_jobber_accom_group.py | 1 | 1182 | # -*- coding: utf-8 -*-
# Copyright 2017 Stein & Gabelgaard ApS
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import api, fields, models, _
class CamposJobberAccomGroup(models.Model):
_name = 'campos.jobber.accom.group'
_description = 'Campos Jobber Accom Group' # TODO
name = fields.Char(required=True)
code = fields.Char(required=True)
owner_id = fields.Many2one('campos.event.participant', 'Owner')
accom_participant_ids = fields.One2many('campos.jobber.accomodation', 'accom_group_id', string='Participants')
number_participants = fields.Integer('# participants', compute='_compute_number_participants')
subcamp_id = fields.Many2one('campos.subcamp', 'Sub Camp')
_sql_constraints = [
('code_uniq', 'unique(code)', 'Code already in use. Choose another'),
('name_uniq', 'unique(name)', 'Name already in use. Choose another'),
]
@api.depends('accom_participant_ids')
@api.multi
def _compute_number_participants(self):
for cjag in self:
cjag.number_participants = len(cjag.accom_participant_ids) | agpl-3.0 | 1,359,394,841,940,174,300 | 38.433333 | 114 | 0.64467 | false | 3.507418 | false | false | false |
OpenTouch/python-facette | src/facette/v1/groupentry.py | 1 | 1278 | # Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from facette.utils import *
import json
GROUP_ENTRY_ORIGIN = "origin"
GROUP_ENTRY_PATTERN = "pattern"
class GroupEntry:
def __init__(self, js=""):
self.entry = {}
self.origin = facette_to_json(GROUP_ENTRY_ORIGIN, js, self.entry)
self.pattern = facette_to_json(GROUP_ENTRY_PATTERN, js, self.entry)
def set(self, origin=None, pattern=None):
self.origin = facette_set(id, GROUP_ENTRY_ORIGIN, self.entry)
self.pattern = facette_set(id, GROUP_ENTRY_PATTERN, self.entry)
def __str__(self):
return json.dumps(self.entry)
def __repr__(self):
return str(self)
| apache-2.0 | -6,053,528,661,753,423,000 | 34.5 | 78 | 0.682316 | false | 3.6 | false | false | false |
macarthur-lab/xbrowse | xbrowse_server/api/views.py | 1 | 67273 | import datetime
import csv
import json
import logging
import sys
import traceback
from collections import defaultdict
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from settings import LOGIN_URL
from seqr.utils.gene_utils import get_queried_genes
from xbrowse.analysis_modules.combine_mendelian_families import get_variants_by_family_for_gene
from xbrowse_server.analysis.diagnostic_search import get_gene_diangostic_info
from xbrowse_server.base.model_utils import update_xbrowse_model, get_or_create_xbrowse_model, delete_xbrowse_model, \
create_xbrowse_model
from xbrowse_server.base.models import Project, Family, FamilySearchFlag, VariantNote, ProjectTag, VariantTag, GeneNote, \
AnalysedBy, VariantFunctionalData
from seqr.models import Individual as SeqrIndividual, MatchmakerResult
from xbrowse_server.api.utils import get_project_and_family_for_user, get_project_and_cohort_for_user, \
add_extra_info_to_variants_project, add_notes_to_genes, get_variant_notes, get_variant_tags, get_variant_functional_data
from xbrowse.variant_search.family import get_variants_with_inheritance_mode
from xbrowse_server.api import utils as api_utils
from xbrowse_server.api import forms as api_forms
from xbrowse_server.mall import get_reference, get_datastore, get_mall
from xbrowse_server.search_cache import utils as cache_utils
from xbrowse_server.decorators import log_request
from xbrowse_server.server_utils import JSONResponse
import utils
from xbrowse.variant_search import cohort as cohort_search
from xbrowse import Variant
from xbrowse.analysis_modules.mendelian_variant_search import MendelianVariantSearchSpec
from xbrowse.core import displays as xbrowse_displays
from xbrowse_server import server_utils
from . import basicauth
from xbrowse_server import user_controls
from django.utils import timezone
from xbrowse_server.phenotips.reporting_utilities import phenotype_entry_metric_for_individual
from xbrowse_server.base.models import ANALYSIS_STATUS_CHOICES
from xbrowse_server.matchmaker.utilities import get_all_clinical_data_for_family
from xbrowse_server.matchmaker.utilities import is_a_valid_patient_structure
from xbrowse_server.matchmaker.utilities import generate_slack_notification_for_seqr_match
from xbrowse_server.matchmaker.utilities import gather_all_annotated_genes_in_seqr
from xbrowse_server.matchmaker.utilities import find_projects_with_families_in_matchbox
from xbrowse_server.matchmaker.utilities import find_families_of_this_project_in_matchbox
from xbrowse_server.matchmaker.utilities import extract_hpo_id_list_from_mme_patient_struct
import requests
from django.contrib.admin.views.decorators import staff_member_required
logger = logging.getLogger()
@csrf_exempt
@basicauth.logged_in_or_basicauth()
@log_request('projects_api')
def projects(request):
"""
List the projects that this user has access to
"""
user_projects = user_controls.get_projects_for_user(request.user)
project_ids = [p.project_id for p in user_projects]
response_format = request.GET.get('format', 'json')
if response_format == 'json':
return JSONResponse({'projects': project_ids})
elif response_format == 'tsv':
return HttpResponse('\n'.join(project_ids))
else:
raise Exception("Invalid format")
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_api')
def mendelian_variant_search(request):
# TODO: how about we move project getter into the form, and just test for authX here?
# esp because error should be described in json, not just 404
request_dict = request.GET or request.POST
project, family = get_project_and_family_for_user(request.user, request_dict)
form = api_forms.MendelianVariantSearchForm(request_dict)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = family.family_id
try:
variants = api_utils.calculate_mendelian_variant_search(search_spec, family, user=request.user)
except Exception as e:
traceback.print_exc()
return JSONResponse({
'is_error': True,
'error': str(e.args[0]) if e.args else str(e)
})
hashable_search_params = search_spec.toJSON()
hashable_search_params['family_id'] = family.family_id
list_of_variants = [v.toJSON(encode_indiv_id=True) for v in variants]
search_hash = cache_utils.save_results_for_spec(project.project_id, hashable_search_params, list_of_variants)
add_extra_info_to_variants_project(get_reference(), project, variants, add_family_tags=True, add_populations=True)
return_type = request_dict.get('return_type', 'json')
if return_type == 'json':
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_hash': search_hash,
})
elif return_type == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
indiv_ids = family.indiv_ids_with_variant_data()
headers = xbrowse_displays.get_variant_display_headers(get_mall(project), project, indiv_ids)
writer.writerow(headers)
for variant in variants:
fields = xbrowse_displays.get_display_fields_for_variant(get_mall(project), project, variant, indiv_ids, genes_to_return=search_spec.variant_filter.genes)
writer.writerow(fields)
return response
else:
return HttpResponse("Return type not implemented")
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_spec_api')
def mendelian_variant_search_spec(request):
project, family = get_project_and_family_for_user(request.user, request.GET)
search_hash = request.GET.get('search_hash')
search_spec_dict, variants = cache_utils.get_cached_results(project.project_id, search_hash)
search_spec = MendelianVariantSearchSpec.fromJSON(search_spec_dict)
if variants is None:
variants = api_utils.calculate_mendelian_variant_search(search_spec, family, user=request.user)
else:
variants = [Variant.fromJSON(v) for v in variants]
for variant in variants:
variant.set_extra('family_id', family.family_id)
add_extra_info_to_variants_project(get_reference(), project, variants, add_family_tags=True, add_populations=True)
return_type = request.GET.get('return_type')
if return_type == 'json' or not return_type:
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_spec': search_spec_dict,
})
elif request.GET.get('return_type') == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
indiv_ids = family.indiv_ids_with_variant_data()
headers = xbrowse_displays.get_variant_display_headers(get_mall(project), project, indiv_ids)
writer.writerow(headers)
for variant in variants:
fields = xbrowse_displays.get_display_fields_for_variant(get_mall(project), project, variant, indiv_ids)
writer.writerow(fields)
return response
@csrf_exempt
@login_required
@log_request('get_cohort_variants')
def cohort_variant_search(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CohortVariantSearchForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = cohort.cohort_id
variants = api_utils.calculate_mendelian_variant_search(search_spec, cohort, user=request.user)
list_of_variants = [v.toJSON(encode_indiv_id=True) for v in variants]
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), list_of_variants)
api_utils.add_extra_info_to_variants_cohort(get_reference(), cohort, variants)
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('cohort_variant_search_spec_api')
def cohort_variant_search_spec(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
# TODO: use form
search_spec_dict, variants = cache_utils.get_cached_results(project.project_id, request.GET.get('search_hash'))
search_spec = MendelianVariantSearchSpec.fromJSON(search_spec_dict)
if variants is None:
variants = api_utils.calculate_mendelian_variant_search(search_spec, cohort, user=request.user)
else:
variants = [Variant.fromJSON(v) for v in variants]
api_utils.add_extra_info_to_variants_cohort(get_reference(), cohort, variants)
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_spec': search_spec.toJSON(),
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search')
def cohort_gene_search(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
sys.stderr.write("cohort_gene_search %s %s: starting ... \n" % (project.project_id, cohort.cohort_id))
form = api_forms.CohortGeneSearchForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.cohort_id = cohort.cohort_id
sys.stderr.write("cohort_gene_search %s %s: search spec: %s \n" % (project.project_id, cohort.cohort_id, str(search_spec.toJSON())))
genes = api_utils.calculate_cohort_gene_search(cohort, search_spec)
sys.stderr.write("cohort_gene_search %s %s: get %s genes \n" % (project.project_id, cohort.cohort_id, len(genes)))
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), genes)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
sys.stderr.write("cohort_gene_search %s %s: done adding extra info \n" % (project.project_id, cohort.cohort_id))
return JSONResponse({
'is_error': False,
'genes': genes,
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search_spec')
def cohort_gene_search_spec(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
search_spec, genes = cache_utils.get_cached_results(project.project_id, request.GET.get('search_hash'))
if genes is None:
genes = api_utils.calculate_cohort_gene_search(cohort, search_spec)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
return JSONResponse({
'is_error': False,
'genes': genes,
'search_spec': search_spec,
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search_variants')
def cohort_gene_search_variants(request):
error = None
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CohortGeneSearchVariantsForm(request.GET)
if form.is_valid():
gene_id = form.cleaned_data['gene_id']
inheritance_mode = form.cleaned_data['inheritance_mode']
variant_filter = form.cleaned_data['variant_filter']
quality_filter = form.cleaned_data['quality_filter']
else:
error = server_utils.form_error_string(form)
if not error:
indivs_with_inheritance, gene_variation = cohort_search.get_individuals_with_inheritance_in_gene(
get_datastore(project),
get_reference(),
cohort.xcohort(),
inheritance_mode,
gene_id,
variant_filter=variant_filter,
quality_filter=quality_filter
)
relevant_variants = gene_variation.get_relevant_variants_for_indiv_ids(cohort.indiv_id_list())
api_utils.add_extra_info_to_variants_project(get_reference(), project, relevant_variants, add_family_tags=True,
add_populations=True)
ret = {
'is_error': False,
'variants': [v.toJSON() for v in relevant_variants],
'gene_info': get_reference().get_gene(gene_id),
}
return JSONResponse(ret)
else:
ret = {
'is_error': True,
'error': error
}
return JSONResponse(ret)
@login_required
@log_request('gene_info')
def gene_info(request, gene_id):
gene = get_reference().get_gene(gene_id)
gene['expression'] = get_reference().get_tissue_expression_display_values(gene_id)
add_notes_to_genes([gene], request.user)
ret = {
'gene': gene,
'is_error': False,
'found_gene': gene is not None,
}
return JSONResponse(ret)
@login_required
@log_request('family_variant_annotation')
def family_variant_annotation(request):
# TODO: this view not like the others - refactor to forms
error = None
for key in ['project_id', 'family_id', 'xpos', 'ref', 'alt']:
if request.GET.get(key) is None:
error = "%s is requred", key
if not error:
project = get_object_or_404(Project, project_id=request.GET.get('project_id'))
family = get_object_or_404(Family, project=project, family_id=request.GET.get('family_id'))
if not project.can_view(request.user):
raise PermissionDenied
if not error:
variant = get_datastore(project).get_single_variant(
family.project.project_id,
family.family_id,
int(request.GET['xpos']),
request.GET['ref'],
request.GET['alt']
)
if not variant:
error = "Variant does not exist"
if not error:
ret = {
'variant': variant.toJSON(),
'is_error': False,
}
else:
ret = {
'is_error': True,
'error': error,
}
return JSONResponse(ret)
@login_required
@log_request('add_flag')
def add_family_search_flag(request):
error = None
for key in ['project_id', 'family_id', 'xpos', 'ref', 'alt', 'note', 'flag_type', 'flag_inheritance_mode']:
if request.GET.get(key, None) == None:
error = "%s is requred" % key
if not error:
project = get_object_or_404(Project, project_id=request.GET.get('project_id'))
family = get_object_or_404(Family, project=project, family_id=request.GET.get('family_id'))
if not project.can_edit(request.user):
raise PermissionDenied
if not error:
xpos = int(request.GET['xpos'])
ref=request.GET.get('ref')
alt=request.GET['alt']
note=request.GET.get('note')
flag_type=request.GET.get('flag_type')
flag_inheritance_mode=request.GET.get('flag_inheritance_mode')
# todo: more validation - is variant valid?
flag = FamilySearchFlag(user=request.user,
family=family,
xpos=int(request.GET['xpos']),
ref=ref,
alt=alt,
note=note,
flag_type=flag_type,
suggested_inheritance=flag_inheritance_mode,
date_saved=timezone.now(),
)
if not error:
flag.save()
variant = get_datastore(project).get_single_variant(family.project.project_id, family.family_id,
xpos, ref, alt )
api_utils.add_extra_info_to_variants_project(get_reference(), project, [variant], add_family_tags=True,
add_populations=True)
ret = {
'is_error': False,
'variant': variant.toJSON(),
}
else:
ret = {
'is_error': True,
'error': error,
}
return JSONResponse(ret)
@login_required
# @csrf_exempt
@log_request('add_analysed_by')
def add_family_analysed_by(request, data=None):
if not data:
data = request.GET
family_id = data.get('family_id')
project_id = data.get('project_id')
if not (family_id and project_id):
raise HttpResponseBadRequest('family_id and project_id are required')
try:
family = Family.objects.get(project__project_id=project_id, family_id=family_id)
except ObjectDoesNotExist:
raise Http404('No family matches the given query')
if not family.project.can_edit(request.user):
raise PermissionDenied
analysed_by = create_xbrowse_model(AnalysedBy, user=request.user, family=family, date_saved=timezone.now())
return JSONResponse({
'is_error': False,
'analysed_by': analysed_by.toJSON(),
})
@login_required
@log_request('delete_variant_note')
def delete_variant_note(request, note_id):
ret = {
'is_error': False,
}
notes = VariantNote.objects.filter(id=note_id)
if not notes:
ret['is_error'] = True
ret['error'] = 'note id %s not found' % note_id
else:
note = list(notes)[0]
if not note.project.can_edit(request.user):
raise PermissionDenied
delete_xbrowse_model(note)
return JSONResponse(ret)
@login_required
@log_request('add_or_edit_variant_note')
def add_or_edit_variant_note(request):
"""Add a variant note"""
family = None
if 'family_id' in request.GET:
project, family = get_project_and_family_for_user(request.user, request.GET)
else:
project = utils.get_project_for_user(request.user, request.GET)
form = api_forms.VariantNoteForm(project, request.GET)
if not form.is_valid():
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
if 'note_id' in form.cleaned_data and form.cleaned_data['note_id']:
event_type = "edit_variant_note"
notes = VariantNote.objects.filter(
id=form.cleaned_data['note_id'],
project=project,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
if not notes:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % form.cleaned_data['note_id']
})
note = notes[0]
update_xbrowse_model(
note,
user=request.user,
note=form.cleaned_data['note_text'],
submit_to_clinvar=form.cleaned_data['submit_to_clinvar'],
date_saved=timezone.now(),
family=family)
else:
event_type = "add_variant_note"
create_xbrowse_model(
VariantNote,
user=request.user,
project=project,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
note=form.cleaned_data['note_text'],
submit_to_clinvar = form.cleaned_data['submit_to_clinvar'],
date_saved=timezone.now(),
family=family)
notes = get_variant_notes(project=project, family_id=request.GET.get('family_id'), **form.cleaned_data)
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'note': form.cleaned_data['note_text'],
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
})
except Exception as e:
logging.error("Error while logging %s event: %s" % (event_type, e))
return JSONResponse({
'is_error': False,
'notes': notes,
})
@login_required
@log_request('add_or_edit_variant_tags')
def add_or_edit_variant_tags(request):
family = None
if 'family_id' in request.GET:
project, family = get_project_and_family_for_user(request.user, request.GET)
else:
project = utils.get_project_for_user(request.user, request.GET)
form = api_forms.VariantTagsForm(project, request.GET)
if not form.is_valid():
ret = {
'is_error': True,
'error': server_utils.form_error_string(form)
}
return JSONResponse(ret)
variant_tags_to_delete = {
variant_tag.id: variant_tag for variant_tag in VariantTag.objects.filter(
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'])
}
project_tag_events = {}
for project_tag in form.cleaned_data['project_tags']:
# retrieve tags
tag, created = get_or_create_xbrowse_model(
VariantTag,
project_tag=project_tag,
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
if not created:
# this tag already exists so just keep it (eg. remove it from the set of tags that will be deleted)
del variant_tags_to_delete[tag.id]
continue
# this a new tag, so update who saved it and when
project_tag_events[project_tag] = "add_variant_tag"
update_xbrowse_model(
tag,
user=request.user,
date_saved=timezone.now(),
search_url=form.cleaned_data['search_url'])
# delete the tags that are no longer checked.
for variant_tag in variant_tags_to_delete.values():
project_tag_events[variant_tag.project_tag] = "delete_variant_tag"
delete_xbrowse_model(variant_tag)
# Get tags after updating the tag info in the database, so that the new tag info is added to the variant JSON
tags = get_variant_tags(project=project, family_id=request.GET.get('family_id'), **form.cleaned_data)
# log tag creation
for project_tag, event_type in project_tag_events.items():
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'tag': project_tag.tag,
'title': project_tag.title,
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
'search_url': form.cleaned_data.get('search_url'),
})
except Exception as e:
logging.error("Error while logging add_variant_tag event: %s" % e)
return JSONResponse({
'is_error': False,
'tags': tags,
})
@login_required
@csrf_exempt
@log_request('add_or_edit_functional_data')
def add_or_edit_functional_data(request):
request_data = json.loads(request.body)
project, family = get_project_and_family_for_user(request.user, request_data)
form = api_forms.VariantFunctionalDataForm(request_data)
if not form.is_valid():
ret = {
'is_error': True,
'error': server_utils.form_error_string(form)
}
return JSONResponse(ret)
project_tag_events = {}
tag_ids = set()
for tag_data in form.cleaned_data['tags']:
# retrieve tags
tag, created = get_or_create_xbrowse_model(
VariantFunctionalData,
functional_data_tag=tag_data['tag'],
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
tag_ids.add(tag.id)
if created:
project_tag_events[tag_data['tag']] = "add_variant_functional_data"
elif tag.metadata != tag_data.get('metadata'):
project_tag_events[tag_data['tag']] = "edit_variant_functional_data"
else:
continue
# this a new/changed tag, so update who saved it and when
update_xbrowse_model(
tag,
metadata=tag_data.get('metadata'),
user=request.user,
date_saved=timezone.now(),
search_url=form.cleaned_data['search_url'])
# delete the tags that are no longer checked.
variant_tags_to_delete = VariantFunctionalData.objects.filter(
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
).exclude(id__in=tag_ids)
for variant_tag in variant_tags_to_delete:
project_tag_events[variant_tag.functional_data_tag] = "delete_variant_functional_data"
delete_xbrowse_model(variant_tag)
# get the tags after updating the tag info in the database, so that the new tag info is added to the variant JSON
functional_data = get_variant_functional_data(project=project, family_id=request_data.get('family_id'), **form.cleaned_data)
# log tag creation
for project_tag, event_type in project_tag_events.items():
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'tag': project_tag,
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
'search_url': form.cleaned_data.get('search_url'),
})
except Exception as e:
logging.error("Error while logging add_variant_tag event: %s" % e)
return JSONResponse({
'is_error': False,
'functional_data': functional_data,
})
@login_required
@log_request('delete_gene_note')
def delete_gene_note(request, note_id):
try:
note = GeneNote.objects.get(id=note_id)
except ObjectDoesNotExist:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % note_id
})
if not note.can_edit(request.user):
raise PermissionDenied
delete_xbrowse_model(note)
return JSONResponse({
'is_error': False,
})
@login_required
@log_request('add_or_edit_gene_note')
def add_or_edit_gene_note(request):
"""Add a gene note"""
form = api_forms.GeneNoteForm(request.GET)
if not form.is_valid():
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
if form.cleaned_data.get('note_id'):
event_type = "edit_gene_note"
try:
note = GeneNote.objects.get(id=form.cleaned_data['note_id'])
except ObjectDoesNotExist:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % form.cleaned_data['note_id']
})
if not note.can_edit(request.user):
raise PermissionDenied
update_xbrowse_model(
note,
note=form.cleaned_data['note_text'],
user=request.user,
date_saved=timezone.now(),
)
else:
event_type = "add_variant_note"
note = create_xbrowse_model(
GeneNote,
user=request.user,
gene_id=form.cleaned_data['gene_id'],
note=form.cleaned_data['note_text'],
date_saved=timezone.now(),
)
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'note': form.cleaned_data['note_text'],
'gene_id':form.cleaned_data['gene_id'],
'username': request.user.username,
'email': request.user.email,
})
except Exception as e:
logging.error("Error while logging %s event: %s" % (event_type, e))
return JSONResponse({
'is_error': False,
'note': note.toJSON(request.user),
})
def gene_autocomplete(request):
query = request.GET.get('q', '')
gene_items = get_queried_genes(query, 20)
genes = [{
'value': item['gene_id'],
'label': item['gene_symbol'],
} for item in gene_items]
return JSONResponse(genes)
@login_required
@log_request('variant_info')
def variant_info(request):
pass
@csrf_exempt
@login_required
@log_request('combine_mendelian_families_api')
def combine_mendelian_families(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CombineMendelianFamiliesForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_group_id = family_group.slug
genes = api_utils.calculate_combine_mendelian_families(family_group, search_spec, user=request.user)
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), genes)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
return JSONResponse({
'is_error': False,
'genes': genes,
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_spec_api')
def combine_mendelian_families_spec(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
search_hash = request.GET.get('search_hash')
search_spec, genes = cache_utils.get_cached_results(project.project_id, search_hash)
search_spec_obj = MendelianVariantSearchSpec.fromJSON(search_spec)
if request.GET.get('return_type') != 'csv' or not request.GET.get('group_by_variants'):
if genes is None:
genes = api_utils.calculate_combine_mendelian_families(family_group, search_spec, user=request.user)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
if request.GET.get('return_type') != 'csv':
return JSONResponse({
'is_error': False,
'genes': genes,
'search_spec': search_spec,
})
else:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="family_group_results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
writer.writerow(["gene", "# families", "family list", "chrom", "start", "end"])
for gene in genes:
family_id_list = [family_id for (project_id, family_id) in gene["family_id_list"]]
writer.writerow(map(str, [gene["gene_name"], len(family_id_list), " ".join(family_id_list), gene["chr"], gene["start"], gene["end"], ""]))
return response
else:
# download results grouped by variant
indiv_id_list = []
for family in family_group.get_families():
indiv_id_list.extend(family.indiv_ids_with_variant_data())
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
headers = ['genes','chr','pos','ref','alt','worst_annotation' ]
headers.extend(project.get_reference_population_slugs())
headers.extend([ 'polyphen','sift','muttaster','fathmm'])
for indiv_id in indiv_id_list:
headers.append(indiv_id)
headers.append(indiv_id+'_gq')
headers.append(indiv_id+'_dp')
writer.writerow(headers)
mall = get_mall(project)
variant_key_to_individual_id_to_variant = defaultdict(dict)
variant_key_to_variant = {}
for family in family_group.get_families():
for variant in get_variants_with_inheritance_mode(
mall,
family.xfamily(),
search_spec_obj.inheritance_mode,
search_spec_obj.variant_filter,
search_spec_obj.quality_filter,
user=request.user):
if len(variant.coding_gene_ids) == 0:
continue
variant_key = (variant.xpos, variant.ref, variant.alt)
variant_key_to_variant[variant_key] = variant
for indiv_id in family.indiv_ids_with_variant_data():
variant_key_to_individual_id_to_variant[variant_key][indiv_id] = variant
for variant_key in sorted(variant_key_to_individual_id_to_variant.keys()):
variant = variant_key_to_variant[variant_key]
individual_id_to_variant = variant_key_to_individual_id_to_variant[variant_key]
genes = [mall.reference.get_gene_symbol(gene_id) for gene_id in variant.coding_gene_ids]
fields = []
fields.append(','.join(genes))
fields.extend([
variant.chr,
str(variant.pos),
variant.ref,
variant.alt,
variant.annotation.get('vep_group', '.'),
])
for ref_population_slug in project.get_reference_population_slugs():
fields.append(variant.annotation['freqs'][ref_population_slug])
for field_key in ['polyphen', 'sift', 'muttaster', 'fathmm']:
fields.append(variant.annotation.get(field_key, ""))
for indiv_id in indiv_id_list:
variant = individual_id_to_variant.get(indiv_id)
genotype = None
if variant is not None:
genotype = variant.get_genotype(indiv_id)
if genotype is None:
fields.extend(['.', '.', '.'])
else:
fields.append("/".join(genotype.alleles) if genotype.alleles else "./.")
#fields[-1] += " %s (%s)" % (indiv_id, genotype.num_alt)
fields.append(str(genotype.gq) if genotype.gq is not None else '.')
fields.append(genotype.extras['dp'] if genotype.extras.get('dp') is not None else '.')
writer.writerow(fields)
return response
@csrf_exempt
@login_required
@log_request('combine_mendelian_families_variants_api')
def combine_mendelian_families_variants(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
form = api_forms.CombineMendelianFamiliesVariantsForm(request.GET)
if form.is_valid():
variants_grouped = get_variants_by_family_for_gene(
get_mall(project),
[f.xfamily() for f in form.cleaned_data['families']],
form.cleaned_data['inheritance_mode'],
form.cleaned_data['gene_id'],
variant_filter=form.cleaned_data['variant_filter'],
quality_filter=form.cleaned_data['quality_filter'],
user=request.user,
)
variants_by_family = []
for family in form.cleaned_data['families']:
variants = variants_grouped[(family.project.project_id, family.family_id)]
add_extra_info_to_variants_project(get_reference(), family.project, variants, add_family_tags=True, add_populations=True)
variants_by_family.append({
'project_id': family.project.project_id,
'family_id': family.family_id,
'family_name': str(family),
'variants': [v.toJSON() for v in variants],
})
return JSONResponse({
'is_error': False,
'variants_by_family': variants_by_family,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('diagnostic_search')
def diagnostic_search(request):
project, family = utils.get_project_and_family_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.DiagnosticSearchForm(family, request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = family.family_id
gene_list = form.cleaned_data['gene_list']
diagnostic_info_list = []
for gene_id in gene_list.gene_id_list():
diagnostic_info = get_gene_diangostic_info(family, gene_id, search_spec.variant_filter)
add_extra_info_to_variants_project(get_reference(), project, diagnostic_info._variants, add_family_tags=True, add_populations=True)
diagnostic_info_list.append(diagnostic_info)
return JSONResponse({
'is_error': False,
'gene_diagnostic_info_list': [d.toJSON() for d in diagnostic_info_list],
'gene_list_info': gene_list.toJSON(details=True),
'data_summary': family.get_data_summary(),
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
def family_gene_lookup(request):
project, family = utils.get_project_and_family_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
gene_id = request.GET.get('gene_id')
if not get_reference().is_valid_gene_id(gene_id):
return JSONResponse({
'is_error': True,
'error': 'Invalid gene',
})
family_gene_data = get_gene_diangostic_info(family, gene_id)
add_extra_info_to_variants_project(get_reference(), project, family_gene_data._variants, add_family_tags=True,
add_populations=True)
return JSONResponse({
'is_error': False,
'family_gene_data': family_gene_data.toJSON(),
'data_summary': family.get_data_summary(),
'gene': get_reference().get_gene(gene_id),
})
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_individuals_phenotypes(request,project_id):
"""
Export all HPO terms entered for this project individuals. A direct proxy
from PhenoTips API
Args:
project_id
Returns:
A JSON string of HPO terms entered
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
project = get_object_or_404(Project, project_id=project_id)
result={}
for individual in project.get_individuals():
ui_display_name = individual.indiv_id
ext_id=individual.phenotips_id
result[ui_display_name] = phenotype_entry_metric_for_individual(project_id, ext_id)['raw']
return JSONResponse(result)
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_family_statuses(request,project_id):
"""
Exports the status of all families in this project
Args:
Project ID
Returns:
All statuses of families
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
project = get_object_or_404(Project, project_id=project_id)
status_description_map = {}
for abbrev, details in ANALYSIS_STATUS_CHOICES:
status_description_map[abbrev] = details[0]
result={}
for family in project.get_families():
fam_details =family.toJSON()
result[fam_details['family_id']] = status_description_map.get(family.analysis_status, 'unknown')
return JSONResponse(result)
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_variants(request,project_id):
"""
Export all variants associated to this project
Args:
Project id
Returns:
A JSON object of variant information
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
status_description_map = {}
for abbrev, details in ANALYSIS_STATUS_CHOICES:
status_description_map[abbrev] = details[0]
variants=[]
project_tags = ProjectTag.objects.filter(project__project_id=project_id)
for project_tag in project_tags:
variant_tags = VariantTag.objects.filter(project_tag=project_tag)
for variant_tag in variant_tags:
variant = get_datastore(project).get_single_variant(
project.project_id,
variant_tag.family.family_id if variant_tag.family else '',
variant_tag.xpos,
variant_tag.ref,
variant_tag.alt,
)
variant_json = variant.toJSON() if variant is not None else {'xpos': variant_tag.xpos, 'ref': variant_tag.ref, 'alt': variant_tag.alt}
family_status = ''
if variant_tag.family:
family_status = status_description_map.get(variant_tag.family.analysis_status, 'unknown')
variants.append({"variant":variant_json,
"tag":project_tag.tag,
"description":project_tag.title,
"family":variant_tag.family.toJSON(),
"family_status":family_status})
return JSONResponse(variants)
@login_required
@log_request('matchmaker_individual_add')
def get_submission_candidates(request,project_id,family_id,indiv_id):
"""
Gathers submission candidate individuals from this family
Args:
individual_id: an individual ID
project_id: project this individual belongs to
Returns:
Status code
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
else:
id_map,affected_patient = get_all_clinical_data_for_family(project_id,family_id,indiv_id)
return JSONResponse({
"submission_candidate":affected_patient,
"id_map":id_map
})
@csrf_exempt
@login_required
@log_request('matchmaker_individual_add')
def add_individual(request):
"""
Adds given individual to the local database
Args:
submission information of a single patient is expected in the POST data
Returns:
Submission status information
"""
affected_patient = json.loads(request.POST.get("patient_data", "wasn't able to parse patient_data in POST!"))
seqr_id = request.POST.get("localId", "wasn't able to parse Id (as seqr knows it) in POST!")
project_id = request.POST.get("projectId", "wasn't able to parse project Id in POST!")
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=seqr_id, family__project=project.seqr_project)
submission = json.dumps({'patient':affected_patient})
validity_check=is_a_valid_patient_structure(affected_patient)
if not validity_check['status']:
return JSONResponse({
'http_result':{"message":validity_check['reason'] + ", the patient was not submitted to matchmaker"},
'status_code':400,
})
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
result = requests.post(url=settings.MME_ADD_INDIVIDUAL_URL,
headers=headers,
data=submission)
#if successfully submitted to MME, persist info
if result.status_code==200 or result.status_code==409:
individual.mme_submitted_data = {'patient':affected_patient}
individual.mme_submitted_date = datetime.datetime.now()
individual.mme_deleted_date = None
individual.mme_deleted_by = None
individual.save()
#update the contact information store if any updates were made
updated_contact_name = affected_patient['contact']['name']
updated_contact_href = affected_patient['contact']['href']
try:
project = Project.objects.get(project_id=project_id)
update_xbrowse_model(
project,
mme_primary_data_owner=updated_contact_name,
mme_contact_url=updated_contact_href,
)
except ObjectDoesNotExist:
logger.error("ERROR: couldn't update the contact name and href of MME submission: ", updated_contact_name, updated_contact_href)
#seqr_project.save()
if result.status_code==401:
return JSONResponse({
'http_result':{"message":"sorry, authorization failed, I wasn't able to insert that individual"},
'status_code':result.status_code,
})
return JSONResponse({
'http_result':result.json(),
'status_code':result.status_code,
})
@csrf_exempt
@login_required
@log_request('matchmaker_individual_delete')
def delete_individual(request,project_id, indiv_id):
"""
Deletes a given individual from the local database
Args:
Project ID of project
Individual ID of a single patient to delete
Returns:
Delete confirmation
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
#find the latest ID that was used in submission which might defer from seqr ID
matchbox_id=indiv_id
if individual.mme_submitted_date:
if individual.mme_deleted_date:
return JSONResponse({"status_code":402,"message":"that individual has already been deleted"})
else:
matchbox_id = individual.mme_submitted_data['patient']['id']
logger.info("using matchbox ID: %s" % (matchbox_id))
payload = {"id":matchbox_id}
result = requests.delete(url=settings.MME_DELETE_INDIVIDUAL_URL,
headers=headers,
data=json.dumps(payload))
#if successfully deleted from matchbox/MME, persist that detail
if result.status_code == 200:
deleted_date = datetime.datetime.now()
individual.mme_deleted_date = deleted_date
individual.mme_deleted_by = request.user
individual.save()
return JSONResponse({"status_code":result.status_code,"message":result.text, 'deletion_date':str(deleted_date)})
else:
return JSONResponse({"status_code":404,"message":result.text})
return JSONResponse({"status_code":result.status_code,"message":result.text})
@login_required
@log_request('matchmaker_family_submissions')
def get_family_submissions(request,project_id,family_id):
"""
Gets the last 4 submissions for this family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
else:
family = get_object_or_404(Family, project=project, family_id=family_id)
family_submissions=[]
family_members_submitted=[]
for individual in family.individual_set.filter(seqr_individual__mme_submitted_date__isnull=False):
family_submissions.append({'submitted_data': individual.seqr_individual.mme_submitted_data,
'hpo_details': extract_hpo_id_list_from_mme_patient_struct(individual.seqr_individual.mme_submitted_data),
'seqr_id': individual.indiv_id,
'family_id': family_id,
'project_id': project_id,
'insertion_date': individual.seqr_individual.mme_submitted_date.strftime("%b %d %Y %H:%M:%S"),
'deletion': individual.seqr_individual.mme_deleted_date,
})
family_members_submitted.append(individual.indiv_id)
#TODO: figure out when more than 1 indi for a family. For now returning a list. Eventually
#this must be the latest submission for every indiv in a family
return JSONResponse({
"family_submissions":family_submissions,
"family_members_submitted":family_members_submitted
})
@login_required
@csrf_exempt
@log_request('match_internally_and_externally')
def match_internally_and_externally(request,project_id,indiv_id):
"""
Looks for matches for the given individual. Expects a single patient (MME spec) in the POST
data field under key "patient_data"
Args:
project_id,indiv_id and POST all data in POST under key "patient_data"
Returns:
Status code and results
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
patient_data = request.POST.get("patient_data")
if patient_data is None:
r = HttpResponse("wasn't able to parse patient data field in POST!",status=400)
return r
#find details on HPO terms and start aggregating in a map to send back with reply
hpo_map={}
extract_hpo_id_list_from_mme_patient_struct(json.loads(patient_data),hpo_map)
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
results={}
#first look in the local MME database
internal_result = requests.post(url=settings.MME_LOCAL_MATCH_URL,
headers=headers,
data=patient_data
)
ids={}
for internal_res in internal_result.json().get('results',[]):
ids[internal_res['patient']['id']] = internal_res
extract_hpo_id_list_from_mme_patient_struct(internal_res,hpo_map)
results['local_results']={"result":internal_result.json(),
"status_code":internal_result.status_code
}
#then externally (unless turned off)
if settings.SEARCH_IN_EXTERNAL_MME_NODES:
extnl_result = requests.post(url=settings.MME_EXTERNAL_MATCH_URL,
headers=headers,
data=patient_data
)
results['external_results']={"result":extnl_result.json(),
"status_code":str(extnl_result.status_code)
}
for ext_res in extnl_result.json().get('results',[]):
extract_hpo_id_list_from_mme_patient_struct(ext_res,hpo_map)
ids[ext_res['patient']['id']] = ext_res
saved_results = {
result.result_data['patient']['id']: result for result in MatchmakerResult.objects.filter(individual=individual)
}
result_analysis_state={}
for id in ids.keys():
persisted_result_det = saved_results.get(id)
if not persisted_result_det:
persisted_result_det = MatchmakerResult.objects.create(
individual=individual,
result_data=ids[id],
last_modified_by=request.user,
)
result_analysis_state[id] = {
"id_of_indiv_searched_with":indiv_id,
"content_of_indiv_searched_with":json.loads(patient_data),
"content_of_result":ids[id],
"result_id":id,
"we_contacted_host":persisted_result_det.we_contacted,
"host_contacted_us":persisted_result_det.host_contacted,
"seen_on":str(persisted_result_det.created_date),
"deemed_irrelevant":persisted_result_det.deemed_irrelevant,
"comments":persisted_result_det.comments or '',
"seqr_project_id":project_id,
"flag_for_analysis":persisted_result_det.flag_for_analysis,
"username_of_last_event_initiator":persisted_result_det.last_modified_by.username,
}
#post to slack
if settings.SLACK_TOKEN is not None:
generate_slack_notification_for_seqr_match(results,project_id,indiv_id)
return JSONResponse({
"match_results":results,
"result_analysis_state":result_analysis_state,
"hpo_map":hpo_map
})
@login_required
@csrf_exempt
@log_request('match_internally_and_externally')
def match_in_open_mme_sources(request,project_id,indiv_id):
"""
Match in other MME data sources that are open and not toke protected (ex: Monarch)
Args:
project_id,indiv_id and POST all data in POST under key "patient_data"
Returns:
Status code and results
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
patient_data = request.POST.get("patient_data")
if patient_data is None:
r = HttpResponse("wasn't able to parse patient data field in POST!",status=400)
return r
#find details on HPO terms and start aggregating in a map to send back with reply
hpo_map={}
extract_hpo_id_list_from_mme_patient_struct(json.loads(patient_data),hpo_map)
#these open sites require no token
headers={
'X-Auth-Token': '',
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
results={}
open_sites = {'Monarch Initiative':'https://mme.monarchinitiative.org/match'} #todo: put into settings
for open_site_name, open_site_url in open_sites.iteritems():
results_back = requests.post(url=open_site_url,
headers=headers,
data=patient_data)
ids={}
for res in results_back.json().get('results',[]):
ids[res['patient']['id']] = res
extract_hpo_id_list_from_mme_patient_struct(res,hpo_map)
results[open_site_name]={"result":results_back.json(),
"status_code":results_back.status_code
}
return JSONResponse({
"match_results":results,
"hpo_map":hpo_map
})
@login_required
@csrf_exempt
@log_request('get_project_individuals')
def get_project_individuals(request,project_id):
"""
Get a list of individuals with their family IDs of this project
Args:
project_id
Returns:
map of individuals and their family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
indivs=[]
for indiv in project.get_individuals():
strct={'guid':indiv.id}
for k,v in indiv.to_dict().iteritems():
if k not in ['phenotypes']:
strct[k] = v
indivs.append(strct)
return JSONResponse({
"individuals":indivs
})
@login_required
@csrf_exempt
@log_request('get_family_individuals')
def get_family_individuals(request,project_id,family_id):
"""
Get a list of individuals belongint to this family IDs
Args:
project_id
family_id
Returns:
map of individuals in this family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
indivs=[]
for indiv in project.get_individuals():
if indiv.to_dict()['family_id'] == family_id:
strct={'guid':indiv.id}
for k,v in indiv.to_dict().iteritems():
if k not in ['phenotypes']:
strct[k] = v
indivs.append(strct)
return JSONResponse({
"individuals":indivs
})
@staff_member_required(login_url=LOGIN_URL)
@log_request('matchmaker_get_matchbox_id_details')
def get_matchbox_id_details(request,matchbox_id):
"""
Gets information of this matchbox_id
"""
match_individuals = SeqrIndividual.objects.filter(mme_submitted_data__patient__id=matchbox_id)
records = []
for individual in match_individuals:
record = {
'seqr_id':individual.individual_id,
'family_id':individual.family.family_id,
'project_id':individual.family.project.deprecated_project_id,
'insertion_date':str(individual.mme_submitted_date)}
genomicFatures = []
for g_feature in individual.mme_submitted_data['patient']['genomicFeatures']:
genomicFatures.append({'gene_id': g_feature['gene']['id'],
'variant_start': g_feature['variant']['start'],
'variant_end': g_feature['variant']['end']})
record['submitted_genomic_features'] = genomicFatures
features = []
for feature in individual.mme_submitted_data['patient']['features']:
id = feature['id']
label = ''
if feature.has_key('label'):
label = feature['label']
features.append({'id': id,
'label': label}),
record['submitted_features'] = features
records.append(record)
return JSONResponse({
'submission_records':records
})
@staff_member_required(login_url=LOGIN_URL)
@log_request('matchmaker_get_matchbox_metrics')
def get_matchbox_metrics(request):
"""
Gets matchbox metrics
"""
mme_headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
r = requests.get(url=settings.MME_MATCHBOX_METRICS_URL,
headers=mme_headers)
if r.status_code==200:
matchbox_metrics = r.json()['metrics']
genes_in_matchbox=matchbox_metrics['geneCounts'].keys()
seqr__gene_info = gather_all_annotated_genes_in_seqr()
seqr_metrics={"genes_in_seqr":len(seqr__gene_info),
"genes_found_in_matchbox":0}
unique_genes=[]
for gene_ids,proj in seqr__gene_info.iteritems():
if gene_ids[0] in genes_in_matchbox:
unique_genes.append(gene_ids[0])
seqr_metrics['genes_found_in_matchbox'] = len(set(unique_genes))
seqr_metrics["submission_info"]=find_projects_with_families_in_matchbox()
return JSONResponse({"from_matchbox":r.json(),
"from_seqr":seqr_metrics})
else:
resp = HttpResponse('{"message":"error contacting matchbox to gain metrics", "status":' + r.status_code + '}',status=r.status_code)
resp.status_code=r.status_code
return resp
@login_required
@log_request('matchmaker_get_matchbox_metrics')
def get_matchbox_metrics_for_project(request,project_id):
"""
Gets matchbox submission metrics for project (accessible to non-staff)
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
try:
return JSONResponse({"families":find_families_of_this_project_in_matchbox(project_id)})
except:
raise
@login_required
@csrf_exempt
@log_request('update_match_comment')
def update_match_comment(request,project_id,match_id,indiv_id):
"""
Update a comment made about a match
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
parse_json_error_mesg="wasn't able to parse POST!"
comment = request.POST.get("comment",parse_json_error_mesg)
if comment == parse_json_error_mesg:
return HttpResponse('{"message":"' + parse_json_error_mesg +'"}',status=500)
persisted_result_dets = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id)
if persisted_result_dets.count()>0:
for persisted_result_det in persisted_result_dets:
persisted_result_det.comments=comment.strip()
persisted_result_det.last_modified_by=request.user
persisted_result_det.save()
resp = HttpResponse('{"message":"OK"}',status=200)
return resp
else:
raise
return HttpResponse('{"message":"error updating database"}',status=500)
@staff_member_required(login_url=LOGIN_URL)
@csrf_exempt
@log_request('get_current_match_state_of_all_results')
def get_current_match_state_of_all_results(request):
"""
gets the current state of all matches in this project
"""
return HttpResponse('{"message":"error unimplemented MME endpoint"}',status=500)
@login_required
@csrf_exempt
@log_request('get_current_match_state')
def get_current_match_state(request,project_id,match_id,indiv_id):
"""
gets the current state of this matched pair
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
try:
result_model = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id).first()
persisted_result_det = {
"id_of_indiv_searched_with":indiv_id,
"content_of_result":result_model.result_data,
"result_id":result_model.result_data['patient']['id'],
"we_contacted_host":result_model.we_contacted,
"host_contacted_us":result_model.host_contacted,
"seen_on":str(result_model.created_date),
"deemed_irrelevant":result_model.deemed_irrelevant,
"comments":result_model.comments or '',
"seqr_project_id":project_id,
"flag_for_analysis":result_model.flag_for_analysis,
"username_of_last_event_initiator":result_model.last_modified_by.username,
}
except Exception as e:
print e
return HttpResponse('{"message":"error talking to database"}',status=500)
return JSONResponse(persisted_result_det)
@login_required
@csrf_exempt
@log_request('match_state_update')
def match_state_update(request,project_id,match_id,indiv_id):
"""
Update a state change made about a match
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
state_type = request.POST.get('state_type', None)
state = request.POST.get('state',None)
if state_type is None or state is None:
return HttpResponse('{"message":"error parsing POST"}',status=500)
persisted_result_det = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id).first()
try:
if state_type == 'flag_for_analysis':
persisted_result_det.flag_for_analysis=False
if state == "true":
persisted_result_det.flag_for_analysis=True
if state_type == 'deemed_irrelevant':
persisted_result_det.deemed_irrelevant=False
if state == "true":
persisted_result_det.deemed_irrelevant=True
if state_type == 'we_contacted_host':
persisted_result_det.we_contacted=False
if state == "true":
persisted_result_det.we_contacted=True
if state_type == 'host_contacted_us':
persisted_result_det.host_contacted=False
if state == "true":
persisted_result_det.host_contacted=True
persisted_result_det.last_modified_by=request.user
persisted_result_det.save()
except:
raise
return HttpResponse('{"message":"error updating database"}',status=500)
return HttpResponse('{"message":"successfully updated database"}',status=200)
| agpl-3.0 | -8,759,921,877,234,000,000 | 36.645775 | 170 | 0.615225 | false | 3.801808 | false | false | false |
dvklopfenstein/PrincetonAlgorithms | py/AlgsSedgewickWayne/Topological.py | 1 | 2002 | """Compute topological ordering(w DFS) of a DAG or edge-weighted DAG. Runs in O(E + V) time."""
# TBD Finish Python port
from AlgsSedgewickWayne.DirectedCycle import DirectedCycle
from AlgsSedgewickWayne.DepthFirstOrder import DepthFirstOrder
from AlgsSedgewickWayne.EdgeWeightedDigraph import EdgeWeightedDigraph
from AlgsSedgewickWayne.EdgeWeightedDirectedCycle import EdgeWeightedDirectedCycle
class Topological(object):
"""Determines if digraph G has a topological order and, if so, finds topological order."""
def __init__(self, G): # G is Digraph O(V+E) wc
finder = DirectedCycle(G)
if not finder.hasCycle():
dfs = DepthFirstOrder(G)
self._order = dfs.reversePost() # topological order
self._rank = [] # rank[v] = position of vertex v in topological order
i = 0
for v in self. order:
self._rank[v] = i
i += 1
def Topological(EdgeWeightedDigraph G): # EdgeWeightedDigraph
"""Determines if digraph G has a topological order and, if so, finds topological order."""
EdgeWeightedDirectedCycle finder = new EdgeWeightedDirectedCycle(G)
if not finder.hasCycle():
dfs = DepthFirstOrder(G)
order = dfs.reversePost()
# Returns a topological order if the digraph has a topologial order, None otherwise
def order(self): return self._order # O(V)
# Does the digraph have a topological order?
def hasOrder(self): return self._order is not None # O(k)
def rank(self, v): # O(k)
"""The the rank of vertex v in the topological order; -1 if the digraph is not a DAG."""
self._validateVertex(v)
if self.hasOrder(): return self._rank[v]
else: return -1
def _validateVertex(self, v):
"""raise an IndexOutOfBoundsException unless 0 <= v < V."""
V = len(self._rank)
if v < 0 or v >= V:
raise Exception("vertex {} is not between 0 and {}".format(v, (V-1))
# Copyright 2002-2016, Robert Sedgewick and Kevin Wayne.
# Copyright 2002-2019, DV Klopfenstein, Python port
| gpl-2.0 | -1,320,169,917,173,574,100 | 39.04 | 95 | 0.695305 | false | 3.398981 | false | false | false |
WizeCommerce/medusa | setup.py | 1 | 1292 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "thrift_medusa",
version = "0.0.1",
author = "Samir Faci",
author_email = "",
description = ("Language agnostic tool for packaging of thrift based services and artifacts"),
license = "Apache Software License",
url = "https://github.com/WizeCommerce/medusa",
packages=['thrift_medusa', 'tests'],
#packages = find_packages(exclude="test"),
package_data = {'': ['*.yaml']},
long_description=read('README.md'),
install_requires=['lxml','paramiko','argparse','pyyaml','jinja2'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
#entry_points = { 'console_scripts': ['medusa = thrift_medusa:main', 'samir = thrift_medusa.thrift_medusa:main'] },
#scripts = ['./publishClients.py'],
test_suite='tests',
zip_safe = True
)
| apache-2.0 | -8,982,372,392,790,578,000 | 37 | 119 | 0.647059 | false | 3.578947 | false | false | false |
open-austin/influence-texas | src/influencetx/legislators/migrations/0001_initial.py | 1 | 1680 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-17 17:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Legislator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('openstates_leg_id', models.CharField(db_index=True, max_length=48)),
('name', models.CharField(max_length=45)),
('first_name', models.CharField(blank=True, max_length=20)),
('last_name', models.CharField(blank=True, max_length=20)),
('party', models.CharField(choices=[('D', 'Democratic'), ('I', 'Independent'), ('R', 'Republican'), ('U', 'Unknown')], max_length=1)),
('chamber', models.CharField(choices=[('House', 'House'), ('Senate', 'Senate')], max_length=6)),
('district', models.IntegerField()),
('openstates_updated_at', models.DateTimeField()),
('url', models.URLField(blank=True)),
('photo_url', models.URLField(blank=True)),
],
),
migrations.CreateModel(
name='LegislatorIdMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('openstates_leg_id', models.CharField(db_index=True, max_length=48)),
('tpj_filer_id', models.IntegerField(db_index=True)),
],
),
]
| gpl-2.0 | -3,630,584,201,326,298,000 | 41 | 150 | 0.554167 | false | 4.009547 | false | false | false |
raphaelrpl/portal | backend/appengine/routes/questions/rest.py | 1 | 4013 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from time import sleep
from gaebusiness.business import CommandExecutionException
from permission_app.permission_facade import main_user_form
from tekton.gae.middleware.json_middleware import JsonResponse
from question_app import question_facade
from gaepermission.decorator import login_required
from gaecookie.decorator import no_csrf
from question_app.question_model import CategoryQuestion, Question
from category_app.category_model import Category
@login_required
@no_csrf
def index():
cmd = question_facade.list_questions_cmd()
question_list = cmd()
question_form = question_facade.question_form()
def localize_user(model):
dct = question_form.fill_with_model(model)
user = main_user_form().fill_with_model(model.user.get())
dct['user'] = user
return dct
question_dcts = [localize_user(m) for m in question_list]
return JsonResponse(question_dcts)
@login_required
def new(_resp, _logged_user, **question_properties):
if _logged_user is None:
_resp.status_code = 400
return JsonResponse({"name": "Login required!"})
quest = question_properties.get('question', {})
if not quest:
_resp.status_code = 400
return JsonResponse({"name": "Required Field"})
question = Question(**quest)
question.user = _logged_user.key
try:
question.put()
except CommandExecutionException:
_resp.status_code = 400
if not question.name:
return JsonResponse({"name": "Required field"})
return JsonResponse({"name": "Put a valid post"})
for c in question_properties.get("categorys", {}):
cat = Category.query(Category.name == c).fetch()
if cat:
category = CategoryQuestion(origin=cat[0], destination=question)
category.put()
question_form = question_facade.question_form()
data = question_form.fill_with_model(question)
data['user'] = _logged_user.name
sleep(0.5)
return JsonResponse(data)
@login_required
def edit(_resp, _logged_user, **question_properties):
question_id = question_properties.get('id')
# key = ndb.Key('Question', int(question_id))
question = Question.get_by_id(int(question_id))
if int(_logged_user.key.id()) != int(question_properties.get('user', {}).get('id', 0)) and question_id != None:
_resp.status_code = 400
return JsonResponse({"name": "This post don't belong to you!"})
if question is None:
_resp.status_code = 400
return JsonResponse({"name": "Invalid post"})
question.name = question_properties.get('name')
try:
question.put()
except:
_resp.status_code = 400
return JsonResponse({"name": "Put a valid question"})
user_form = main_user_form()
form = question_facade.question_form()
question_dct = form.fill_with_model(question)
question_dct['user'] = user_form.fill_with_model(question.user.get())
return JsonResponse(question_dct)
# cmd = question_facade.update_question_cmd(question_id, **question_properties)
# return _save_or_update_json_response(_logged_user, cmd, _resp)
@login_required
def delete(_resp, id):
cmd = question_facade.delete_question_cmd(id)
try:
question = cmd()
# DeleteCategoryQuestion(destination=question).execute()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
question_dct = question_facade.question_form().fill_with_model(question)
return JsonResponse(question_dct)
def _save_or_update_json_response(_logged_user, cmd, _resp):
try:
question = cmd()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
question_form = question_facade.question_form()
data = question_form.fill_with_model(question)
data['user'] = _logged_user.name
return JsonResponse(data)
| mit | -3,836,463,681,952,738,300 | 33.299145 | 115 | 0.673561 | false | 3.821905 | false | false | false |
mozillazg/bustard | tests/httpbin/core.py | 1 | 21325 | # -*- coding: utf-8 -*-
"""
httpbin.core
~~~~~~~~~~~~
This module provides the core HttpBin experience.
"""
import base64
import json
import os
import random
import time
import uuid
from bustard.app import Bustard
from bustard.http import (
Response, Headers, jsonify as bustard_jsonify, redirect
)
from bustard.utils import json_dumps_default
from werkzeug.datastructures import WWWAuthenticate
from werkzeug.http import http_date
from werkzeug.serving import run_simple
from six.moves import range as xrange
from . import filters
from .helpers import (
get_headers, status_code, get_dict, get_request_range,
check_basic_auth, check_digest_auth, secure_cookie,
H, ROBOT_TXT, ANGRY_ASCII
)
from .utils import weighted_choice
from .structures import CaseInsensitiveDict
ENV_COOKIES = (
'_gauges_unique',
'_gauges_unique_year',
'_gauges_unique_month',
'_gauges_unique_day',
'_gauges_unique_hour',
'__utmz',
'__utma',
'__utmb'
)
def jsonify(*args, **kwargs):
response = bustard_jsonify(*args, **kwargs)
if not response.data.endswith(b'\n'):
response.data += b'\n'
return response
# Prevent WSGI from correcting the casing of the Location header
# BaseResponse.autocorrect_location_header = False
# Find the correct template folder when running from a different location
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'templates')
app = Bustard(__name__, template_dir=tmpl_dir)
render_template = app.render_template
url_for = app.url_for
# -----------
# Middlewares
# -----------
@app.after_request
def set_cors_headers(request, response):
response.headers['Access-Control-Allow-Origin'] = (
request.headers.get('Origin', '*')
)
response.headers['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
# Both of these headers are only used for the "preflight request"
# http://www.w3.org/TR/cors/#access-control-allow-methods-response-header
response.headers['Access-Control-Allow-Methods'] = (
'GET, POST, PUT, DELETE, PATCH, OPTIONS'
)
response.headers['Access-Control-Max-Age'] = '3600' # 1 hour cache
if request.headers.get('Access-Control-Request-Headers') is not None:
response.headers['Access-Control-Allow-Headers'] = (
request.headers['Access-Control-Request-Headers']
)
return response
# ------
# Routes
# ------
@app.route('/')
def view_landing_page(request):
"""Generates Landing Page."""
tracking_enabled = 'HTTPBIN_TRACKING' in os.environ
return render_template('index.html', request=request,
tracking_enabled=tracking_enabled)
@app.route('/html')
def view_html_page(request):
"""Simple Html Page"""
return render_template('moby.html')
@app.route('/robots.txt')
def view_robots_page(request):
"""Simple Html Page"""
response = Response()
response.content = ROBOT_TXT
response.content_type = 'text/plain'
return response
@app.route('/deny')
def view_deny_page(request):
"""Simple Html Page"""
response = Response()
response.content = ANGRY_ASCII
response.content_type = 'text/plain'
return response
# return "YOU SHOULDN'T BE HERE"
@app.route('/ip')
def view_origin(request):
"""Returns Origin IP."""
return jsonify(origin=request.headers.get('X-Forwarded-For',
request.remote_addr))
@app.route('/headers')
def view_headers(request):
"""Returns HTTP HEADERS."""
return jsonify(get_dict(request, 'headers'))
@app.route('/user-agent')
def view_user_agent(request):
"""Returns User-Agent."""
headers = get_headers(request)
return jsonify({'user-agent': headers['user-agent']})
@app.route('/get', methods=('GET', 'OPTIONS'))
def view_get(request):
"""Returns GET Data."""
return jsonify(get_dict(request, 'url', 'args', 'headers', 'origin'))
@app.route('/post', methods=('POST',))
def view_post(request):
"""Returns POST Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/put', methods=('PUT',))
def view_put(request):
"""Returns PUT Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/patch', methods=('PATCH',))
def view_patch(request):
"""Returns PATCH Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/delete', methods=('DELETE',))
def view_delete(request):
"""Returns DELETE Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/gzip')
@filters.gzip
def view_gzip_encoded_content(request):
"""Returns GZip-Encoded Data."""
return jsonify(get_dict(request, 'origin', 'headers',
method=request.method, gzipped=True))
@app.route('/deflate')
@filters.deflate
def view_deflate_encoded_content(request):
"""Returns Deflate-Encoded Data."""
return jsonify(get_dict(request, 'origin', 'headers',
method=request.method, deflated=True))
@app.route('/redirect/<int:n>')
def redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
absolute = request.args.get('absolute', 'false').lower() == 'true'
if n == 1:
return redirect(app.url_for('view_get', _request=request,
_external=absolute))
if absolute:
return _redirect(request, 'absolute', n, True)
else:
return _redirect(request, 'relative', n, False)
def _redirect(request, kind, n, external):
return redirect(url_for('{0}_redirect_n_times'.format(kind),
n=n - 1, _external=external, _request=request))
@app.route('/redirect-to')
def redirect_to(request):
"""302 Redirects to the given URL."""
args = CaseInsensitiveDict(request.args.items())
# We need to build the response manually and convert to UTF-8 to prevent
# werkzeug from "fixing" the URL. This endpoint should set the Location
# header to the exact string supplied.
response = Response('')
response.status_code = 302
response.headers['Location'] = args['url'].encode('utf-8')
return response
@app.route('/relative-redirect/<int:n>')
def relative_redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
response = Response('')
response.status_code = 302
if n == 1:
response.headers['Location'] = url_for('view_get')
return response
response.headers['Location'] = app.url_for(
'relative_redirect_n_times', n=n - 1
)
return response
@app.route('/absolute-redirect/<int:n>')
def absolute_redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
if n == 1:
return redirect(app.url_for('view_get', _request=request,
_external=True))
return _redirect(request, 'absolute', n, True)
@app.route('/stream/<int:n>')
def stream_n_messages(request, n):
"""Stream n JSON messages"""
n = int(n)
response = get_dict(request, 'url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response, default=json_dumps_default) + '\n'
return Response(generate_stream(), headers={
'Content-Type': 'application/json',
})
@app.route('/status/<codes>',
methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'])
def view_status_code(request, codes):
"""Return status code or random status code if more than one are given"""
if ',' not in codes:
code = int(codes)
return status_code(code)
choices = []
for choice in codes.split(','):
if ':' not in choice:
code = choice
weight = 1
else:
code, weight = choice.split(':')
choices.append((int(code), float(weight)))
code = weighted_choice(choices)
return status_code(code)
@app.route('/response-headers')
def response_headers(request):
"""Returns a set of response headers from the query string """
headers = Headers(request.args.to_dict())
response = jsonify(headers)
while True:
content_len_shown = response.headers['Content-Length']
d = {}
for key in response.headers.keys():
value = response.headers.get_all(key)
if len(value) == 1:
value = value[0]
d[key] = value
response = jsonify(d)
for key, value in headers.to_list():
response.headers.add(key, value)
if response.headers['Content-Length'] == content_len_shown:
break
return response
@app.route('/cookies')
def view_cookies(request, hide_env=True):
"""Returns cookie data."""
cookies = dict(request.cookies.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_COOKIES:
try:
del cookies[key]
except KeyError:
pass
return jsonify(cookies=cookies)
@app.route('/forms/post')
def view_forms_post(request):
"""Simple HTML form."""
return render_template('forms-post.html')
@app.route('/cookies/set/<name>/<value>')
def set_cookie(request, name, value):
"""Sets a cookie and redirects to cookie list."""
r = app.make_response(redirect(url_for('view_cookies')))
r.set_cookie(key=name, value=value, secure=secure_cookie(request))
return r
@app.route('/cookies/set')
def set_cookies(request):
"""Sets cookie(s) as provided by the query string
and redirects to cookie list.
"""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.set_cookie(key=key, value=value, secure=secure_cookie(request))
return r
@app.route('/cookies/delete')
def delete_cookies(request):
"""Deletes cookie(s) as provided by the query string
and redirects to cookie list.
"""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.delete_cookie(key=key)
return r
@app.route('/basic-auth/<user>/<passwd>')
def basic_auth(request, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(request, user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/hidden-basic-auth/<user>/<passwd>')
def hidden_basic_auth(request, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(request, user, passwd):
return status_code(404)
return jsonify(authenticated=True, user=user)
@app.route('/digest-auth/<qop>/<user>/<passwd>')
def digest_auth(request, qop=None, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Digest auth"""
if qop not in ('auth', 'auth-int'):
qop = None
if 'Authorization' not in request.headers or \
not check_digest_auth(user, passwd) or \
'Cookie' not in request.headers:
response = app.make_response('')
response.status_code = 401
# RFC2616 Section4.2: HTTP headers are ASCII. That means
# request.remote_addr was originally ASCII, so I should be able to
# encode it back to ascii. Also, RFC2617 says about nonces: "The
# contents of the nonce are implementation dependent"
nonce = H(b''.join([
getattr(request, 'remote_addr', u'').encode('ascii'),
b':',
str(time.time()).encode('ascii'),
b':',
os.urandom(10)
]))
opaque = H(os.urandom(10))
auth = WWWAuthenticate('digest')
auth.set_digest('[email protected]', nonce, opaque=opaque,
qop=('auth', 'auth-int') if qop is None else (qop, ))
response.headers['WWW-Authenticate'] = auth.to_header()
response.headers['Set-Cookie'] = 'fake=fake_value'
return response
return jsonify(authenticated=True, user=user)
@app.route('/delay/<delay>')
def delay_response(request, delay):
"""Returns a delayed response"""
delay = min(float(delay), 10)
time.sleep(delay)
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files'))
@app.route('/drip')
def drip(request):
"""Drips data over a duration after an optional initial delay."""
args = CaseInsensitiveDict(request.args.items())
duration = float(args.get('duration', 2))
numbytes = int(args.get('numbytes', 10))
code = int(args.get('code', 200))
pause = duration / numbytes
delay = float(args.get('delay', 0))
if delay > 0:
time.sleep(delay)
def generate_bytes():
for i in xrange(numbytes):
yield u'*'.encode('utf-8')
time.sleep(pause)
response = Response(generate_bytes(), headers={
'Content-Type': 'application/octet-stream',
'Content-Length': str(numbytes),
})
response.status_code = code
return response
@app.route('/base64/<value>')
def decode_base64(request, value):
"""Decodes base64url-encoded string"""
encoded = value.encode('utf-8') # base64 expects binary string as input
return base64.urlsafe_b64decode(encoded).decode('utf-8')
@app.route('/cache', methods=('GET',))
def cache(request):
"""Returns a 304 if an If-Modified-Since header or
If-None-Match is present. Returns the same as a GET otherwise.
"""
is_conditional = (
request.headers.get('If-Modified-Since') or
request.headers.get('If-None-Match')
)
if is_conditional is None:
response = view_get(request)
response.headers['Last-Modified'] = http_date()
response.headers['ETag'] = uuid.uuid4().hex
return response
else:
return status_code(304)
@app.route('/cache/<int:value>')
def cache_control(request, value):
"""Sets a Cache-Control header."""
value = int(value)
response = view_get(request)
response.headers['Cache-Control'] = 'public, max-age={0}'.format(value)
return response
@app.route('/encoding/utf8')
def encoding(request):
return render_template('UTF-8-demo.txt')
@app.route('/bytes/<int:n>')
def random_bytes(request, n):
"""Returns n random bytes generated with given seed."""
n = int(n)
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
response = Response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = 'application/octet-stream'
return response
@app.route('/stream-bytes/<int:n>')
def stream_random_bytes(request, n):
"""Streams n random bytes generated with given seed,
at given chunk size per packet.
"""
n = int(n)
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
def generate_bytes():
chunks = bytearray()
for i in xrange(n):
chunks.append(random.randint(0, 255))
if len(chunks) == chunk_size:
yield(bytes(chunks))
chunks = bytearray()
if chunks:
yield(bytes(chunks))
headers = {'Content-Type': 'application/octet-stream'}
return Response(generate_bytes(), headers=headers)
@app.route('/range/<int:numbytes>')
def range_request(request, numbytes):
"""Streams n random bytes generated with given seed,
at given chunk size per packet.
"""
numbytes = int(numbytes)
if numbytes <= 0 or numbytes > (100 * 1024):
response = Response(headers={
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes'
})
response.status_code = 404
response.content = 'number of bytes must be in the range (0, 10240]'
return response
params = CaseInsensitiveDict(request.args.items())
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
duration = float(params.get('duration', 0))
pause_per_byte = duration / numbytes
request_headers = get_headers(request)
first_byte_pos, last_byte_pos = get_request_range(request_headers,
numbytes)
if (
first_byte_pos > last_byte_pos or
first_byte_pos not in xrange(0, numbytes) or
last_byte_pos not in xrange(0, numbytes)
):
response = Response(headers={
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes',
'Content-Range': 'bytes */%d' % numbytes
})
response.status_code = 416
return response
def generate_bytes():
chunks = bytearray()
for i in xrange(first_byte_pos, last_byte_pos + 1):
# We don't want the resource to change across requests, so we need
# to use a predictable data generation function
chunks.append(ord('a') + (i % 26))
if len(chunks) == chunk_size:
yield(bytes(chunks))
time.sleep(pause_per_byte * chunk_size)
chunks = bytearray()
if chunks:
time.sleep(pause_per_byte * len(chunks))
yield(bytes(chunks))
content_range = 'bytes %d-%d/%d' % (first_byte_pos, last_byte_pos,
numbytes)
response_headers = {
'Content-Type': 'application/octet-stream',
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes',
'Content-Range': content_range}
response = Response(generate_bytes(), headers=response_headers)
if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):
response.status_code = 200
else:
response.status_code = 206
return response
@app.route('/links/<int:n>/<int:offset>')
def link_page(request, n, offset):
"""Generate a page containing n links to other pages which do the same."""
n = int(n)
offset = int(offset)
n = min(max(1, n), 200) # limit to between 1 and 200 links
link = "<a href='{0}'>{1}</a> "
html = ['<html><head><title>Links</title></head><body>']
for i in xrange(n):
if i == offset:
html.append('{0} '.format(i))
else:
html.append(link.format(url_for('link_page', n=n, offset=i), i))
html.append('</body></html>')
return ''.join(html)
@app.route('/links/<int:n>')
def links(request, n):
"""Redirect to first links page."""
n = int(n)
return redirect(url_for('link_page', n=n, offset=0))
@app.route('/image')
def image(request):
"""Returns a simple image of the type suggest by the Accept header."""
headers = get_headers(request)
if 'accept' not in headers:
return image_png(request) # Default media type to png
accept = headers['accept'].lower()
if 'image/webp' in accept:
return image_webp(request)
elif 'image/svg+xml' in accept:
return image_svg(request)
elif 'image/jpeg' in accept:
return image_jpeg(request)
elif 'image/png' in accept or 'image/*' in accept:
return image_png(request)
else:
return status_code(406) # Unsupported media type
@app.route('/image/png')
def image_png(request):
data = resource('images/pig_icon.png')
return Response(data, headers={'Content-Type': 'image/png'})
@app.route('/image/jpeg')
def image_jpeg(request):
data = resource('images/jackal.jpg')
return Response(data, headers={'Content-Type': 'image/jpeg'})
@app.route('/image/webp')
def image_webp(request):
data = resource('images/wolf_1.webp')
return Response(data, headers={'Content-Type': 'image/webp'})
@app.route('/image/svg')
def image_svg(request):
data = resource('images/svg_logo.svg')
return Response(data, headers={'Content-Type': 'image/svg+xml'})
def resource(filename):
path = os.path.join(
tmpl_dir,
filename)
return open(path, 'rb').read()
@app.route('/xml')
def xml(request):
response = Response(render_template('sample.xml'))
response.headers['Content-Type'] = 'application/xml'
return response
if __name__ == '__main__':
run_simple('0.0.0.0', 5000, app, use_reloader=True, use_debugger=True)
| mit | 1,247,459,229,769,562,400 | 27.245033 | 81 | 0.608488 | false | 3.708696 | false | false | false |
SurfasJones/icecream-info | icecream/lib/python2.7/site-packages/sphinx/search/__init__.py | 1 | 11415 | # -*- coding: utf-8 -*-
"""
sphinx.search
~~~~~~~~~~~~~
Create a full-text search index for offline search.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import with_statement
import re
import cPickle as pickle
from docutils.nodes import raw, comment, title, Text, NodeVisitor, SkipNode
from sphinx.util import jsdump, rpartition
class SearchLanguage(object):
"""
This class is the base class for search natural language preprocessors. If
you want to add support for a new language, you should override the methods
of this class.
You should override `lang` class property too (e.g. 'en', 'fr' and so on).
.. attribute:: stopwords
This is a set of stop words of the target language. Default `stopwords`
is empty. This word is used for building index and embedded in JS.
.. attribute:: js_stemmer_code
Return stemmer class of JavaScript version. This class' name should be
``Stemmer`` and this class must have ``stemWord`` method. This string is
embedded as-is in searchtools.js.
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
lang = None
stopwords = set()
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
*/
var Stemmer = function() {
this.stemWord = function(w) {
return w;
}
}
"""
_word_re = re.compile(r'\w+(?u)')
def __init__(self, options):
self.options = options
self.init(options)
def init(self, options):
"""
Initialize the class with the options the user has given.
"""
def split(self, input):
"""
This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
languages.
"""
return self._word_re.findall(input)
def stem(self, word):
"""
This method implements stemming algorithm of the Python version.
Default implementation does nothing. You should implement this if the
language has any stemming rules.
This class is used to preprocess search words before registering them in
the search index. The stemming of the Python version and the JS version
(given in the js_stemmer_code attribute) must be compatible.
"""
return word
def word_filter(self, word):
"""
Return true if the target word should be registered in the search index.
This method is called after stemming.
"""
return not (((len(word) < 3) and (12353 < ord(word[0]) < 12436)) or
(ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or
word.isdigit())))
from sphinx.search import en, ja
languages = {
'en': en.SearchEnglish,
'ja': ja.SearchJapanese,
}
class _JavaScriptIndex(object):
"""
The search index as javascript file that calls a function
on the documentation search object to register the index.
"""
PREFIX = 'Search.setIndex('
SUFFIX = ')'
def dumps(self, data):
return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
def loads(self, s):
data = s[len(self.PREFIX):-len(self.SUFFIX)]
if not data or not s.startswith(self.PREFIX) or not \
s.endswith(self.SUFFIX):
raise ValueError('invalid data')
return jsdump.loads(data)
def dump(self, data, f):
f.write(self.dumps(data))
def load(self, f):
return self.loads(f.read())
js_index = _JavaScriptIndex()
class WordCollector(NodeVisitor):
"""
A special visitor that collects words for the `IndexBuilder`.
"""
def __init__(self, document, lang):
NodeVisitor.__init__(self, document)
self.found_words = []
self.found_title_words = []
self.lang = lang
def dispatch_visit(self, node):
if node.__class__ is comment:
raise SkipNode
if node.__class__ is raw:
# Some people might put content in raw HTML that should be searched,
# so we just amateurishly strip HTML tags and index the remaining
# content
nodetext = re.sub(r'(?is)<style.*?</style>', '', node.astext())
nodetext = re.sub(r'(?is)<script.*?</script>', '', nodetext)
nodetext = re.sub(r'<[^<]+?>', '', nodetext)
self.found_words.extend(self.lang.split(nodetext))
raise SkipNode
if node.__class__ is Text:
self.found_words.extend(self.lang.split(node.astext()))
elif node.__class__ is title:
self.found_title_words.extend(self.lang.split(node.astext()))
class IndexBuilder(object):
"""
Helper class that creates a searchindex based on the doctrees
passed to the `feed` method.
"""
formats = {
'jsdump': jsdump,
'pickle': pickle
}
def __init__(self, env, lang, options, scoring):
self.env = env
# filename -> title
self._titles = {}
# stemmed word -> set(filenames)
self._mapping = {}
# stemmed words in titles -> set(filenames)
self._title_mapping = {}
# word -> stemmed word
self._stem_cache = {}
# objtype -> index
self._objtypes = {}
# objtype index -> (domain, type, objname (localized))
self._objnames = {}
# add language-specific SearchLanguage instance
self.lang = languages[lang](options)
if scoring:
with open(scoring, 'rb') as fp:
self.js_scorer_code = fp.read().decode('utf-8')
else:
self.js_scorer_code = u''
def load(self, stream, format):
"""Reconstruct from frozen data."""
if isinstance(format, basestring):
format = self.formats[format]
frozen = format.load(stream)
# if an old index is present, we treat it as not existing.
if not isinstance(frozen, dict) or \
frozen.get('envversion') != self.env.version:
raise ValueError('old format')
index2fn = frozen['filenames']
self._titles = dict(zip(index2fn, frozen['titles']))
def load_terms(mapping):
rv = {}
for k, v in mapping.iteritems():
if isinstance(v, int):
rv[k] = set([index2fn[v]])
else:
rv[k] = set(index2fn[i] for i in v)
return rv
self._mapping = load_terms(frozen['terms'])
self._title_mapping = load_terms(frozen['titleterms'])
# no need to load keywords/objtypes
def dump(self, stream, format):
"""Dump the frozen index to a stream."""
if isinstance(format, basestring):
format = self.formats[format]
format.dump(self.freeze(), stream)
def get_objects(self, fn2index):
rv = {}
otypes = self._objtypes
onames = self._objnames
for domainname, domain in self.env.domains.iteritems():
for fullname, dispname, type, docname, anchor, prio in \
domain.get_objects():
# XXX use dispname?
if docname not in fn2index:
continue
if prio < 0:
continue
prefix, name = rpartition(fullname, '.')
pdict = rv.setdefault(prefix, {})
try:
typeindex = otypes[domainname, type]
except KeyError:
typeindex = len(otypes)
otypes[domainname, type] = typeindex
otype = domain.object_types.get(type)
if otype:
# use unicode() to fire translation proxies
onames[typeindex] = (domainname, type,
unicode(domain.get_type_name(otype)))
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
shortanchor = ''
elif anchor == type + '-' + fullname:
shortanchor = '-'
else:
shortanchor = anchor
pdict[name] = (fn2index[docname], typeindex, prio, shortanchor)
return rv
def get_terms(self, fn2index):
rvs = {}, {}
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
for k, v in mapping.iteritems():
if len(v) == 1:
fn, = v
if fn in fn2index:
rv[k] = fn2index[fn]
else:
rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
return rvs
def freeze(self):
"""Create a usable data structure for serializing."""
filenames = self._titles.keys()
titles = self._titles.values()
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
terms, title_terms = self.get_terms(fn2index)
objects = self.get_objects(fn2index) # populates _objtypes
objtypes = dict((v, k[0] + ':' + k[1])
for (k, v) in self._objtypes.iteritems())
objnames = self._objnames
return dict(filenames=filenames, titles=titles, terms=terms,
objects=objects, objtypes=objtypes, objnames=objnames,
titleterms=title_terms, envversion=self.env.version)
def prune(self, filenames):
"""Remove data for all filenames not in the list."""
new_titles = {}
for filename in filenames:
if filename in self._titles:
new_titles[filename] = self._titles[filename]
self._titles = new_titles
for wordnames in self._mapping.itervalues():
wordnames.intersection_update(filenames)
for wordnames in self._title_mapping.itervalues():
wordnames.intersection_update(filenames)
def feed(self, filename, title, doctree):
"""Feed a doctree to the index."""
self._titles[filename] = title
visitor = WordCollector(doctree, self.lang)
doctree.walk(visitor)
# memoize self.lang.stem
def stem(word):
try:
return self._stem_cache[word]
except KeyError:
self._stem_cache[word] = self.lang.stem(word)
return self._stem_cache[word]
_filter = self.lang.word_filter
for word in visitor.found_title_words:
word = stem(word)
if _filter(word):
self._title_mapping.setdefault(word, set()).add(filename)
for word in visitor.found_words:
word = stem(word)
if word not in self._title_mapping and _filter(word):
self._mapping.setdefault(word, set()).add(filename)
def context_for_searchtool(self):
return dict(
search_language_stemming_code = self.lang.js_stemmer_code,
search_language_stop_words =
jsdump.dumps(sorted(self.lang.stopwords)),
search_scorer_tool = self.js_scorer_code,
)
| mit | -8,059,802,016,210,315,000 | 33.279279 | 80 | 0.565484 | false | 4.215288 | false | false | false |
Huyuwei/tvm | topi/python/topi/image/resize.py | 1 | 7184 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator input resize compute."""
from __future__ import absolute_import
import tvm
from .. import tag
def resize(data, size, layout="NCHW", method="bilinear", align_corners=True, out_dtype=None):
"""Perform resize operation on the data.
Parameters
----------
inputs : tvm.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
size: Tuple
Output resolution scale to
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
align_corners: Boolean, optional
To preserve the values at the corner pixels.
method: {"bilinear", "nearest_neighbor", "bicubic"}
Method to be used for resizing.
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : tvm.Tensor
4-D with shape [batch, channel, in_height*scale, in_width*scale]
or [batch, in_height*scale, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_height*scale, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == 'NHWC':
in_n, in_h, in_w, in_c = data.shape
output_shape = [in_n, size[0], size[1], in_c]
elif layout == 'NCHW':
in_n, in_c, in_h, in_w = data.shape
output_shape = [in_n, in_c, size[0], size[1]]
# Otherwise layout must be NCHWxc
else:
in_n, in_c, in_h, in_w, in_cc = data.shape
output_shape = [in_n, in_c, size[0], size[1], in_cc]
if align_corners:
y_ratio = (in_h - 1).astype('float') / (size[0] - 1)
x_ratio = (in_w - 1).astype('float') / (size[1] - 1)
else:
y_ratio = (in_h).astype('float') / (size[0])
x_ratio = (in_w).astype('float') / (size[1])
def _get_pixel(n, c, y, x, cc):
y = tvm.max(tvm.min(y, in_h - 1), 0)
x = tvm.max(tvm.min(x, in_w - 1), 0)
if layout == 'NHWC':
return data(n, y, x, c).astype('float')
if layout == 'NCHW':
return data(n, c, y, x).astype('float')
# else must be NCHWxc
return data(n, c, y, x, cc).astype('float')
def _get_indices(*indices):
if layout == 'NHWC':
n, y, x, c = indices
cc = None
elif layout == 'NCHW':
n, c, y, x = indices
cc = None
else:
n, c, y, x, cc = indices
return n, c, y, x, cc
def _cast_output(value):
if out_dtype:
dtype = out_dtype
else:
dtype = data.dtype
return value.astype(dtype)
# Nearest neighbor computation
def _nearest_neighbor(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
if align_corners:
yint = tvm.round(in_y).astype('int32')
xint = tvm.round(in_x).astype('int32')
else:
# Add epsilon to floor to prevent gpu rounding errors.
epsilon = 1e-5
yint = tvm.floor(in_y + epsilon).astype('int32')
xint = tvm.floor(in_x + epsilon).astype('int32')
return _cast_output(_get_pixel(n, c, yint, xint, cc))
# Bilinear helper functions and computation.
def _lerp(A, B, t):
return A * (1.0 - t) + B * t
def _bilinear(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
xint = tvm.floor(in_x).astype('int32')
xfract = in_x - tvm.floor(in_x)
yint = tvm.floor(in_y).astype('int32')
yfract = in_y - tvm.floor(in_y)
p00 = _get_pixel(n, c, yint, xint, cc)
p10 = _get_pixel(n, c, yint, xint + 1, cc)
p01 = _get_pixel(n, c, yint + 1, xint, cc)
p11 = _get_pixel(n, c, yint + 1, xint + 1, cc)
col0 = _lerp(p00, p10, xfract)
col1 = _lerp(p01, p11, xfract)
value = _lerp(col0, col1, yfract)
return _cast_output(value)
# Bicubic helper function and computation.
def _cubic_kernel(A, B, C, D, t):
a = -A / 2.0 + (3.0*B) / 2.0 - (3.0*C) / 2.0 + D / 2.0
b = A - (5.0*B) / 2.0 + 2.0*C - D / 2.0
c = -A / 2.0 + C / 2.0
d = B
return a*t*t*t + b*t*t + c*t + d
def _bicubic(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
xint = tvm.floor(in_x).astype('int32')
xfract = in_x - tvm.floor(in_x)
yint = tvm.floor(in_y).astype('int32')
yfract = in_y - tvm.floor(in_y)
# 1st row
p00 = _get_pixel(n, c, yint - 1, xint - 1, cc)
p10 = _get_pixel(n, c, yint - 1, xint + 0, cc)
p20 = _get_pixel(n, c, yint - 1, xint + 1, cc)
p30 = _get_pixel(n, c, yint - 1, xint + 2, cc)
# 2nd row
p01 = _get_pixel(n, c, yint + 0, xint - 1, cc)
p11 = _get_pixel(n, c, yint + 0, xint + 0, cc)
p21 = _get_pixel(n, c, yint + 0, xint + 1, cc)
p31 = _get_pixel(n, c, yint + 0, xint + 2, cc)
# 3rd row
p02 = _get_pixel(n, c, yint + 1, xint - 1, cc)
p12 = _get_pixel(n, c, yint + 1, xint + 0, cc)
p22 = _get_pixel(n, c, yint + 1, xint + 1, cc)
p32 = _get_pixel(n, c, yint + 1, xint + 2, cc)
# 4th row
p03 = _get_pixel(n, c, yint + 2, xint - 1, cc)
p13 = _get_pixel(n, c, yint + 2, xint + 0, cc)
p23 = _get_pixel(n, c, yint + 2, xint + 1, cc)
p33 = _get_pixel(n, c, yint + 2, xint + 2, cc)
# Interpolate bicubically
col0 = _cubic_kernel(p00, p10, p20, p30, xfract)
col1 = _cubic_kernel(p01, p11, p21, p31, xfract)
col2 = _cubic_kernel(p02, p12, p22, p32, xfract)
col3 = _cubic_kernel(p03, p13, p23, p33, xfract)
value = _cubic_kernel(col0, col1, col2, col3, yfract)
return _cast_output(value)
# Determine which interpolation method to use then run it.
if method == "nearest_neighbor":
compute_func = _nearest_neighbor
elif method == "bilinear":
compute_func = _bilinear
elif method == "bicubic":
compute_func = _bicubic
else:
raise ValueError('%s method is not supported.' % method)
return tvm.compute(output_shape, compute_func, name='resize', tag=tag.INJECTIVE)
| apache-2.0 | 8,464,358,940,680,116,000 | 32.886792 | 96 | 0.549415 | false | 2.980913 | false | false | false |
ictofnwi/coach | dashboard/views.py | 1 | 19639 | import random
import re
import json
import pytz
import dateutil.parser
from datetime import datetime, timedelta
from pprint import pformat
from hashlib import md5
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect
from django.conf import settings
from django.template import RequestContext, loader
from django.db.models import Q
from models import Activity, Recommendation, LogEvent, GroupAssignment
from recommendation import recommend
from tincan_api import TinCan
from helpers import *
# Fetch TinCan credentials from settings
USERNAME = settings.TINCAN['username']
PASSWORD = settings.TINCAN['password']
ENDPOINT = settings.TINCAN['endpoint']
# Reference to TinCan verbs
COMPLETED = TinCan.VERBS['completed']['id']
PROGRESSED = TinCan.VERBS['progressed']['id']
# Reference to TinCan activity types
ASSESSMENT = TinCan.ACTIVITY_TYPES['assessment']
MEDIA = TinCan.ACTIVITY_TYPES['media']
QUESTION = TinCan.ACTIVITY_TYPES['question']
# Reference to progress URI in result/extension
PROGRESS_T = "http://uva.nl/coach/progress"
# Default barcode height
BARCODE_HEIGHT = 35
## Decorators
def identity_required(func):
def inner(request, *args, **kwargs):
# Fetch email from GET paramaters if present and store in session.
paramlist = request.GET.get('paramlist', None)
email = request.GET.get('email', None)
param_hash = request.GET.get('hash', None)
if paramlist is not None:
hash_contents = []
for param in paramlist.split(","):
if param == "pw":
hash_contents.append(settings.AUTHENTICATION_SECRET)
else:
hash_contents.append(request.GET.get(param, ""))
hash_string = md5(",".join(hash_contents)).hexdigest().upper()
if hash_string == param_hash and email is not None and email != "":
request.session['user'] = "mailto:%s" % (email, )
# Fetch user from session
user = request.session.get('user', None)
# If no user is specified, show information on how to login
if user is None:
return render(request, 'dashboard/loginfirst.html', {})
else:
return func(request, *args, **kwargs)
return inner
def check_group(func):
"""Decorator to check the group for A/B testing.
Users in group A see the dashboard and users in group B do not.
Users that are in no group will be assigned one, so that both groups differ
at most 1 in size. If both groups are the same size, the group will be
assigned pseudorandomly.
"""
def inner(request, *args, **kwargs):
# Fetch user from session
user = request.session.get('user', None)
# Case 1: Existing user
try:
assignment = GroupAssignment.objects.get(user=user)
if assignment.group == 'A':
return func(request, *args, **kwargs)
else:
return HttpResponse()
# Case 2: New user
except ObjectDoesNotExist:
# Case 2a: First half of new pair,
# randomly pick A or B for this user.
if GroupAssignment.objects.count() % 2 == 0:
group = random.choice(['A', 'B'])
if group == 'A':
assignment = GroupAssignment(user=user, group='A')
assignment.save()
return func(request, *args, **kwargs)
else:
assignment = GroupAssignment(user=user, group='B')
assignment.save()
return HttpResponse()
# Case 2b: Second half of new pair,
# choose the group that was not previously chosen.
else:
try:
last_group = GroupAssignment.objects.order_by('-id')[0].group
except:
last_group = random.choice(['A', 'B'])
if last_group == 'A':
assignment = GroupAssignment(user=user, group='B')
assignment.save()
return HttpResponse()
else:
assignment = GroupAssignment(user=user, group='A')
assignment.save()
return func(request, *args, **kwargs)
return inner
## Bootstrap
def bootstrap(request):
width = request.GET.get('width',0)
template = loader.get_template('dashboard/bootstrap.js')
return HttpResponse(
template.render(RequestContext(
request,
{ 'host': request.get_host(), 'width': width }
)),
content_type="application/javascript"
)
def bootstrap_recommend(request, milestones):
width = request.GET.get('width',0)
max_recs = int(request.GET.get('max', False))
return render(request, 'dashboard/bootstrap_recommend.js',
{'milestones': milestones,
'max_recs': max_recs,
'width': width,
'host': request.get_host()})
## Debug interface
def log(request):
logs = LogEvent.objects.order_by('-timestamp')[:100]
data = request.GET.get('data',"0") == "1"
return render(request, 'dashboard/log.html',
{ 'logs': logs, 'data': data, 'host': request.get_host()})
## Interface
@identity_required
@check_group
def barcode(request, default_width=170):
"""Return an svg representing progress of an individual vs the group."""
# Fetch user from session
user = request.session.get('user', None)
width = int(request.GET.get('width', default_width))
data = {'width': width, 'height': BARCODE_HEIGHT}
# Add values
markers = {}
activities = Activity.objects.filter(type=ASSESSMENT)
for activity in activities:
if activity.user in markers:
markers[activity.user] += min(80, activity.value)
else:
markers[activity.user] = min(80, activity.value)
if user in markers:
data['user'] = markers[user]
del markers[user]
else:
data['user'] = 0
data['people'] = markers.values()
# Normalise
if len(markers) > 0:
maximum = max(max(data['people']), data['user'])
data['user'] /= maximum
data['user'] *= width
data['user'] = int(data['user'])
for i in range(len(data['people'])):
data['people'][i] /= maximum
data['people'][i] *= width
data['people'][i] = int(data['people'][i])
else:
# if no other persons have been active
# then user is assumed to be in the lead.
# This is regardless if the user has done anything at all.
data['user'] = width
return render(request, 'dashboard/barcode.svg', data)
@identity_required
@check_group
def index(request):
# Fetch user from session
user = request.session.get('user', None)
# Fetch desired width of the dashboard
width = request.GET.get("width",300);
activities = Activity.objects.filter(user=user).order_by('time')
statements = map(lambda x: x._dict(), activities)
statements = aggregate_statements(statements)
for statement in statements:
statement['activity'] = fix_url(statement['activity'], request)
statements = split_statements(statements)
assignments = statements['assignments']
assignments.sort(key = lambda x: x['time'], reverse=True)
exercises = statements['exercises']
exercises.sort(key = lambda x: x['value'])
video = statements['video']
video.sort(key = lambda x: x['time'], reverse=True)
template = loader.get_template('dashboard/index.html')
context = RequestContext(request, {
'width': width,
'barcode_height': BARCODE_HEIGHT,
'assignments': assignments,
'exercises': exercises,
'video': video,
'host': request.get_host()
})
response = HttpResponse(template.render(context))
response['Access-Control-Allow-Origin'] = "*"
event = LogEvent(type='D', user=user, data="{}")
event.save()
return response
@identity_required
@check_group
def get_recommendations(request, milestones, max_recommendations=False):
# Fetch user from session
user = request.session.get('user', None)
# Fetch desired width of the recommendations dashboard
width = request.GET.get("width", 300);
# Get maximum recommendations to be showed
max_recommendations = int(request.GET.get('max', max_recommendations))
# Fetch activities that can be perceived as seen by the user
seen = Activity.objects.filter(
Q(verb=COMPLETED) | Q(verb=PROGRESSED),
value__gte=30,
user=user
)
# Futher filter that list to narrow it down to activities that can be
# perceived as being done by the user.
done = seen.filter(value__gte=80)
# Preprocess the seen and done sets to be used later
seen = set(map(lambda x: hash(x.activity), seen))
done = set(map(lambda x: x.activity, done))
# Init dict containing final recommendations
recommendations = {}
# For every milestone we want to make recommendations for:
for milestone in milestones.split(','):
# Alas this is necessary on some servers
milestone = re.sub(r'http(s?):/([^/])',r'http\1://\2',milestone)
# Make sure the milestone is not already passed
if milestone not in done:
# Fetch list of rules from the context of this milestone.
# Rules contain antecedent => consequent associations with a
# certain amount of confidence and support. The antecedent is
# stored as a hash of the activities in the antecedent. The
# consequent is the activity that is recommended if you did the
# activities in the consequent. At the moment only the trail
# recommendation algorithm is used, which has antecedents of only
# one activity. If this was different, the antecedent hash check
# would have to include creating powersets of certain length.
rules = Recommendation.objects.filter(milestone=milestone)
# For each recommendation rule
for rule in rules:
# If the LHS applies and the RHS is not already done
if rule.antecedent_hash in seen and \
rule.consequent not in done:
# If the consequent was already recommended earlier
if rule.consequent in recommendations:
# Fetch earlier recommendation
earlier_rule = recommendations[rule.consequent]
# Calculate the original total by with the support was
# divided in order to get the confidence of the
# the earlier recommendation
earlier_total = earlier_rule['support']
earlier_total /= float(earlier_rule['confidence'])
total = earlier_total + rule.support/rule.confidence
# Calculate combined values
support = earlier_rule['support'] + rule.support
confidence = support / float(total)
score = f_score(confidence, support, beta=1.5)
# Update the earlier recommendation to combine both
earlier_rule['support'] = support
earlier_rule['confidence'] = confidence
earlier_rule['score'] = score
# If the consequent is recommended for the first time
else:
# Calculate F-score
score = f_score(rule.confidence, rule.support, beta=1.5)
# Store recommendation for this consequent
recommendations[rule.consequent] = {
'milestone': milestone,
'url': rule.consequent,
'id': rand_id(),
'name': rule.name,
'desc': rule.description,
'm_name': rule.m_name,
'confidence': rule.confidence,
'support': rule.support,
'score': score
}
# Convert to a list of recommendations.
# The lookup per consequent is no longer necessary
recommendations = recommendations.values()
# If recommendations were found
if len(recommendations) > 0:
# Normalise score
max_score = max(map(lambda x: x['score'], recommendations))
for recommendation in recommendations:
recommendation['score'] /= max_score
# Sort the recommendations using their f-scores
recommendations.sort(key = lambda x: x['score'], reverse=True)
# Cap the number of recommendations if applicable.
if max_recommendations:
recommendations = recommendations[:max_recommendations]
# Log Recommendations viewed
data = json.dumps({
"recs": map(lambda x: x['url'], recommendations),
"path": request.path,
"milestone_n": len(milestones.split(',')),
"milestones": milestones})
event = LogEvent(type='V', user=user, data=data)
event.save()
# Render the result
return render(request, 'dashboard/recommend.html',
{'recommendations': recommendations,
'context': event.id,
'width' : width,
'host': request.get_host()})
else:
return HttpResponse()
## Background processes
def cache_activities(request):
"""Create a cache of the Learning Record Store by getting all items since
the most recent one in the cache.
"""
# Dynamic interval retrieval settings
INTERVAL = timedelta(days=1)
EPOCH = datetime(2013, 9, 3, 0, 0, 0, 0, pytz.utc)
# Set aggregate to True if events concerning the same activity-person
# should be aggregated into one row. This has impact for recommendations.
aggregate = False
# Find most recent date
try:
# Selecting the the datetime of the latest stored item minus a margin
# of 6 hours. The margin is there to be slightly more resillient to
# variation (read mistakes) in timezone handling and also to cope with
# the situation that an event was stored later than it occured. The
# latter situation is one of the use cases of the Experience API.
# TODO: The 6 hour margin is arbitrary and a hack.
# We should find a better solution for this.
t1 = Activity.objects.latest('time').time - timedelta(hours=6)
except:
t1 = EPOCH
# Get new data
tincan = TinCan(USERNAME, PASSWORD, ENDPOINT)
statements = tincan.dynamicIntervalStatementRetrieval(t1, INTERVAL)
created_statement_count = 0
for statement in statements:
statement_type = statement['object']['definition']['type']
user = statement['actor']['mbox']
activity = statement['object']['id']
verb = statement['verb']['id']
name = statement['object']['definition']['name']['en-US']
description = statement['object']['definition']['description']['en-US']
time = dateutil.parser.parse(statement['timestamp'])
try:
raw_score = statement['result']['score']['raw']
min_score = statement['result']['score']['min']
max_score = statement['result']['score']['max']
value = 100 * (raw_score - min_score) / max_score
except KeyError:
try:
value = 100 * float(statement['result']['extensions'][PROGRESS_T])
except KeyError:
# If no information is given about the end result then assume a
# perfect score was acquired when the activity was completed,
# and no score otherwise.
if verb == COMPLETED:
value = 100
else:
value = 0
if aggregate:
a, created = Activity.objects.get_or_create(user=user,
activity=activity)
# Don't overwrite completed except with other completed events
# and only overwite with more recent timestamp
if created or (time > a.time and
(verb == COMPLETED or a.verb != COMPLETED)):
a.verb = verb
a.type = statement_type
a.value = value
a.name = name
a.description = description
a.time = time
a.save()
created_statement_count += 1
else:
a, created = Activity.objects.get_or_create(user=user,
verb=verb,
activity=activity,
time=time)
if created:
a.verb = verb
a.type = statement_type
a.value = value
a.name = name
a.description = description
a.time = time
a.save()
created_statement_count += 1
data = json.dumps({'t1':t1.isoformat(), 'created':created_statement_count});
event = LogEvent(type='C', user='all', data=data)
event.save()
return HttpResponse()
def generate_recommendations(request):
minsup = int(request.GET.get('minsup', 2))
minconf = int(request.GET.get('minconf', .3))
gamma = int(request.GET.get('gamma', .8))
# Mine recommendations
recommendations, names = recommend(
minsup=minsup,
minconf=minconf,
gamma=gamma
)
# Add recommendations to database
Recommendation.objects.all().delete()
for recommendation in recommendations:
model = Recommendation(
antecedent_hash = hash(recommendation['antecedent']),
confidence = recommendation['confidence'],
support = recommendation['support'],
milestone = recommendation['milestone'],
m_name = names[recommendation['milestone']][0],
name = names[recommendation['consequent']][0],
consequent = recommendation['consequent'],
description = names[recommendation['consequent']][1])
model.save()
event = LogEvent(type='G', user='all', data=json.dumps(recommendations))
event.save()
return HttpResponse(pformat(recommendations))
@identity_required
def track(request, defaulttarget='index.html'):
"""Track user clicks so that we may be able to improve recommendation
relevance in the future.
"""
# Fetch user from session
user = request.session.get('user', None)
# Fetch target URL from GET parameters
target = request.GET.get('target', defaulttarget)
# Fetch context log id from GET paramaters
context = request.GET.get('context', None)
if context is not None:
try:
context = LogEvent.objects.get(pk=int(context))
except LogEvent.DoesNotExist:
context = None
event = LogEvent(type='T', user=user, data=target, context=context)
event.save()
return redirect(fix_url(target, request))
| agpl-3.0 | -7,548,557,729,014,595,000 | 38.594758 | 82 | 0.585671 | false | 4.473576 | false | false | false |
mikacousin/olc | src/ascii_load.py | 1 | 25915 | """ASCII file: Load functions"""
import array
from olc.channel_time import ChannelTime
from olc.cue import Cue
from olc.define import MAX_CHANNELS, NB_UNIVERSES, App
from olc.device import Device, Parameter, Template
from olc.group import Group
from olc.independent import Independent
from olc.master import Master
from olc.sequence import Sequence
from olc.step import Step
def get_time(string):
"""String format : [[hours:]minutes:]seconds[.tenths]
Return time in seconds
"""
if ":" in string:
tsplit = string.split(":")
if len(tsplit) == 2:
time = int(tsplit[0]) * 60 + float(tsplit[1])
elif len(tsplit) == 3:
time = int(tsplit[0]) * 3600 + int(tsplit[1]) * 60 + float(tsplit[2])
else:
print("Time format Error")
time = 0
else:
time = float(string)
return time
class AsciiParser:
"""Parse ASCII files"""
def __init__(self):
self.default_time = App().settings.get_double("default-time")
def parse(self, readlines):
"""Parse stream"""
flag_seq = False
in_cue = False
flag_patch = False
flag_master = False
flag_group = False
flag_preset = False
flag_inde = False
flag_template = False
flag_parameter = False
type_seq = "MainPlayback"
playback = False
txt = False
t_in = False
t_out = False
d_in = False
d_out = False
wait = False
channels = False
mem = False
channel_time = {}
template = None
devices = {}
parameters = {}
console = ""
item = ""
for line in readlines:
# Remove not needed endline
line = line.replace("\r", "")
line = line.replace("\n", "")
# Marker for end of file
if line[:7].upper() == "ENDDATA":
break
# Console type
if line[:7].upper() == "CONSOLE":
console = line[8:]
# Clear all
if line[:9].upper() == "CLEAR ALL":
del App().memories[:]
del App().chasers[:]
del App().groups[:]
del App().masters[:]
for page in range(2):
for i in range(20):
App().masters.append(Master(page + 1, i + 1, 0, 0))
App().patch.patch_empty()
App().sequence.__init__(1, text="Main Playback")
del App().sequence.steps[1:]
App().independents.__init__()
# Sequence
if line[:9].upper() == "$SEQUENCE":
p = line[10:].split(" ")
if int(p[0]) < 2 and not playback:
playback = True
type_seq = "MainPlayback"
else:
type_seq = "Chaser"
index_seq = int(p[0])
App().chasers.append(Sequence(index_seq, type_seq=type_seq))
del App().chasers[-1].steps[1:]
flag_seq = True
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = False
# Chasers
if flag_seq and type_seq == "Chaser":
if line[:4].upper() == "TEXT":
App().chasers[-1].text = line[5:]
if line[:4].upper() == "$CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
p = line[5:].split(" ")
seq = p[0]
mem = float(p[1])
if in_cue:
if line[:4].upper() == "DOWN":
p = line[5:]
time = p.split(" ")[0]
delay = p.split(" ")[1]
t_out = get_time(time)
if t_out == 0:
t_out = self.default_time
d_out = get_time(delay)
if line[:2].upper() == "UP":
p = line[3:]
time = p.split(" ")[0]
delay = p.split(" ")[1]
t_in = get_time(time)
if t_in == 0:
t_in = self.default_time
d_in = get_time(delay)
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
channels[channel - 1] = level
if line == "":
if not wait:
wait = 0.0
if not txt:
txt = ""
if not t_out:
t_out = 5.0
if not t_in:
t_in = 5.0
cue = Cue(seq, mem, channels, text=txt)
step = Step(
seq,
cue,
time_in=t_in,
time_out=t_out,
delay_out=d_out,
delay_in=d_in,
wait=wait,
text=txt,
)
App().chasers[-1].add_step(step)
in_cue = False
t_out = False
t_in = False
channels = False
# Main Playback
if flag_seq and type_seq == "MainPlayback":
if line[:0] == "!":
flag_seq = False
if line[:3].upper() == "CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
mem = float(line[4:])
if line[:4].upper() == "$CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
mem = float(line[5:])
if in_cue:
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT" and not txt:
txt = line[7:]
if line[:12].upper() == "$$PRESETTEXT":
txt = line[13:]
if line[:4].upper() == "DOWN":
p = line[5:]
time = p.split(" ")[0]
delay = p.split(" ")[1] if len(p.split(" ")) == 2 else "0"
t_out = get_time(time)
if t_out == 0:
t_out = self.default_time
d_out = get_time(delay)
if line[:2].upper() == "UP":
p = line[3:]
time = p.split(" ")[0]
delay = p.split(" ")[1] if len(p.split(" ")) == 2 else "0"
t_in = get_time(time)
if t_in == 0:
t_in = self.default_time
d_in = get_time(delay)
if line[:6].upper() == "$$WAIT":
time = line[7:].split(" ")[0]
wait = get_time(time)
if line[:11].upper() == "$$PARTTIME ":
p = line[11:]
d = p.split(" ")[0]
if d == ".":
d = 0
delay = float(d)
time_str = p.split(" ")[1]
time = get_time(time_str)
if line[:14].upper() == "$$PARTTIMECHAN":
p = line[15:].split(" ")
# We could have several channels
for chan in p:
if chan.isdigit():
channel_time[int(chan)] = ChannelTime(delay, time)
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
# Ignore channels greater than MAX_CHANNELS
if channel < MAX_CHANNELS:
level = int(r[1][1:], 16)
channels[channel - 1] = level
if line[:5].upper() == "$$AL ":
items = line[5:].split(" ")
channel = int(items[0])
if line[:4].upper() == "$$A ":
items = line[4:].split(" ")
channel = int(items[0])
param_number = int(items[1])
value = int(items[2])
if channel < MAX_CHANNELS:
device_number = abs(App().patch.channels[channel - 1][0][0])
device = App().patch.devices[device_number]
param = device.template.parameters.get(param_number)
high_byte = param.offset.get("High Byte")
low_byte = param.offset.get("Low Byte")
parameters[param_number] = {
"high byte": high_byte,
"low byte": low_byte,
"value": value,
}
devices[channel] = parameters
if line == "":
if not wait:
wait = 0.0
if not txt:
txt = ""
if not t_out:
t_out = 5.0
if not t_in:
t_in = 5.0
if not d_in:
d_in = 0.0
if not d_out:
d_out = 0.0
# Create Cue
cue = Cue(0, mem, channels, text=txt, devices=devices)
# Add cue to the list
App().memories.append(cue)
# Create Step
step = Step(
1,
cue,
time_in=t_in,
time_out=t_out,
delay_in=d_in,
delay_out=d_out,
wait=wait,
channel_time=channel_time,
text=txt,
)
# Add Step to the Sequence
App().sequence.add_step(step)
in_cue = False
txt = False
t_out = False
t_in = False
wait = False
mem = False
channels = False
channel_time = {}
devices = {}
parameters = {}
# Dimmers Patch
if line[:11].upper() == "CLEAR PATCH":
flag_seq = False
flag_patch = True
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = False
App().patch.patch_empty() # Empty patch
App().window.channels_view.flowbox.invalidate_filter()
if flag_patch and line[:0] == "!":
flag_patch = False
if line[:7].upper() == "PATCH 1":
for p in line[8:].split(" "):
q = p.split("<")
if q[0]:
r = q[1].split("@")
channel = int(q[0])
output = int(r[0])
univ = int((output - 1) / 512)
level = int(r[1])
if univ < NB_UNIVERSES:
if channel < MAX_CHANNELS:
out = output - (512 * univ)
App().patch.add_output(channel, out, univ, level)
App().window.channels_view.flowbox.invalidate_filter()
else:
print("More than", MAX_CHANNELS, "channels")
else:
print("More than", NB_UNIVERSES, "universes")
# Parameter Definitions
if line[:9].upper() == "$PARAMDEF":
item = line[10:].split(" ")
number = int(item[0])
group = int(item[1])
name = item[2]
name = ""
for i in range(2, len(item)):
name += item[i] + " "
name = name[:-1]
App().parameters[number] = [group, name]
# Device Template
if flag_template:
if line[:0] == "!":
flag_template = False
if line[:14].upper() == "$$MANUFACTURER":
template.manufacturer = line[15:]
if line[:11].upper() == "$$MODELNAME":
template.model_name = line[12:]
if line[:10].upper() == "$$MODENAME":
template.mode_name = line[11:]
if line[:10].upper() == "$$COLORCAL":
pass
if line[:11].upper() == "$$FOOTPRINT":
template.footprint = int(line[12:])
if line[:11].upper() == "$$PARAMETER":
item = line[12:].split(" ")
param_number = int(item[0])
# param_type = int(item[1])
# param_xfade = int(item[2])
parameter = Parameter(param_number)
flag_parameter = True
if flag_parameter:
if line[:8].upper() == "$$OFFSET":
item = line[9:].split(" ")
parameter.offset = {
"High Byte": int(item[0]),
"Low Byte": int(item[1]),
"Step": int(item[2]),
}
if line[:9].upper() == "$$DEFAULT":
parameter.default = int(line[10:])
if line[:11].upper() == "$$HIGHLIGHT":
parameter.highlight = int(line[12:])
if line[:7].upper() == "$$TABLE":
item = line[8:].split(" ")
start = int(item[0])
stop = int(item[1])
flags = int(item[2])
range_name = ""
for i in range(3, len(item)):
range_name += item[i] + " "
range_name = range_name[:-1]
parameter.table.append([start, stop, flags, range_name])
if line[:8].upper() == "$$RANGE ":
item = line[8:].split(" ")
percent = int(item[2]) == 1
parameter.range = {
"Minimum": int(item[0]),
"Maximum": int(item[1]),
"Percent": percent,
}
if line[:12].upper() == "$$RANGEGROUP":
pass
if line == "":
template.parameters[parameter.number] = parameter
flag_parameter = False
if line[:9].upper() == "$TEMPLATE":
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = True
name = line[10:]
template = Template(name)
App().templates.append(template)
# Devices
if line[:8].upper() == "$DEVICE ":
item = line[8:].split(" ")
channel = int(item[0])
output = int(item[1])
universe = int((output - 1) / 512)
output = output - (512 * universe)
template = ""
for i in range(6, len(item)):
template += item[i] + " "
template = template[:-1]
if channel < MAX_CHANNELS and universe < NB_UNIVERSES:
device = Device(channel, output, universe, template)
App().patch.add_device(device)
# Presets not in sequence
if line[:5].upper() == "GROUP" and console == "CONGO":
# On Congo, Preset not in sequence
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = True
flag_template = False
channels = array.array("B", [0] * MAX_CHANNELS)
preset_nb = float(line[6:])
if line[:7].upper() == "$PRESET" and (console in ("DLIGHT", "VLC")):
# On DLight, Preset not in sequence
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_template = False
flag_preset = True
channels = array.array("B", [0] * MAX_CHANNELS)
preset_nb = float(line[8:])
if flag_preset:
if line[:1] == "!":
flag_preset = False
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT":
txt = line[7:]
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if line == "":
# Find Preset's position
found = False
i = 0
for i, _ in enumerate(App().memories):
if App().memories[i].memory > preset_nb:
found = True
break
if not found:
# Preset is at the end
i += 1
if not txt:
txt = ""
# Create Preset
cue = Cue(0, preset_nb, channels, text=txt)
# Add preset to the list
App().memories.insert(i, cue)
flag_preset = False
txt = ""
# Groups
if line[:5].upper() == "GROUP" and console != "CONGO":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_inde = False
flag_template = False
flag_group = True
channels = array.array("B", [0] * MAX_CHANNELS)
group_nb = float(line[6:])
if line[:6].upper() == "$GROUP":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_inde = False
flag_template = False
flag_group = True
channels = array.array("B", [0] * MAX_CHANNELS)
group_nb = float(line[7:])
if flag_group:
if line[:1] == "!":
flag_group = False
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT":
txt = line[7:]
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if line == "":
if not txt:
txt = ""
# We don't create a group who already exist
group_exist = False
for grp in App().groups:
if group_nb == grp.index:
group_exist = True
if not group_exist:
App().groups.append(Group(group_nb, channels, txt))
flag_group = False
txt = ""
# Masters
if flag_master:
if line[:1] == "!":
flag_master = False
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if (line == "" or line[:13].upper() == "$MASTPAGEITEM") and int(
item[1]
) <= 20:
index = int(item[1]) - 1 + ((int(item[0]) - 1) * 20)
App().masters[index] = Master(
int(item[0]), int(item[1]), item[2], channels
)
flag_master = False
if line[:13].upper() == "$MASTPAGEITEM":
item = line[14:].split(" ")
# DLight use Type "2" for Groups
if console == "DLIGHT" and item[2] == "2":
item[2] = "13"
if item[2] == "2":
flag_seq = False
flag_patch = False
flag_group = False
flag_preset = False
flag_inde = False
flag_template = False
flag_master = True
channels = array.array("B", [0] * MAX_CHANNELS)
# Only 20 Masters per pages
elif int(item[1]) <= 20:
index = int(item[1]) - 1 + ((int(item[0]) - 1) * 20)
App().masters[index] = Master(
int(item[0]), int(item[1]), item[2], item[3]
)
# Independents
if line[:16].upper() == "$SPECIALFUNCTION":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_group = False
flag_template = False
flag_inde = True
channels = array.array("B", [0] * MAX_CHANNELS)
text = ""
items = line[17:].split(" ")
number = int(items[0])
# Parameters not implemented:
# ftype = items[1] # 0: inclusive, 1: Inhibit, 2: Exclusive
# button_mode = items[2] # 0: Momentary, 1: Toggling
if flag_inde:
if line[:1] == "!":
flag_inde = False
if line[:4].upper() == "TEXT":
text = line[5:]
if line[:6].upper() == "$$TEXT" and not text:
text = line[7:]
if line[:4].upper() == "CHAN":
chan_list = line[5:].split(" ")
for channel in chan_list:
item = channel.split("/")
if item[0]:
chan = int(item[0])
level = int(item[1][1:], 16)
if chan <= MAX_CHANNELS:
channels[chan - 1] = level
if line == "":
inde = Independent(number, text=text, levels=channels)
App().independents.update(inde)
flag_inde = False
# MIDI mapping
if line[:10].upper() == "$$MIDINOTE":
item = line[11:].split(" ")
App().midi.midi_notes.update({item[0]: [int(item[1]), int(item[2])]})
if line[:8].upper() == "$$MIDICC":
item = line[9:].split(" ")
App().midi.midi_cc.update({item[0]: [int(item[1]), int(item[2])]})
| gpl-3.0 | -7,586,853,367,817,942,000 | 41.001621 | 88 | 0.353849 | false | 4.790203 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.