max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
hydra_one_navigation/scripts/make_plan_caller.py | PrabhjotKaurGosal/ROS_projects | 0 | 12787951 | <gh_stars>0
#! /usr/bin/env python
import rospy
from nav_msgs.srv import GetPlan, GetPlanRequest
import sys
rospy.init_node('service_client')
rospy.wait_for_service('/move_base/make_plan')
make_plan_service = rospy.ServiceProxy('/move_base/make_plan', GetPlan)
msg = GetPlanRequest()
msg.start.header.frame_id = 'map'
msg.start.pose.position.x = 0
msg.start.pose.position.y = 0
msg.start.pose.position.z = 0
msg.start.pose.orientation.x = 0
msg.start.pose.orientation.y = 0
msg.start.pose.orientation.z = 0
msg.start.pose.orientation.w = 0
msg.goal.header.frame_id = 'map'
msg.goal.pose.position.x = 1
msg.goal.pose.position.y = 2
msg.goal.pose.position.z = 0
msg.goal.pose.orientation.x = 0
msg.goal.pose.orientation.y = 0
msg.goal.pose.orientation.z = 0
msg.goal.pose.orientation.w = 0
result = make_plan_service(msg)
print result
| 2.09375 | 2 |
python3/numpy/Comparisons Masks and Boolean Logic.py | Nahid-Hassan/code-snippets | 2 | 12787952 | # Reference Book: Python Data Science Handbook (page:(70-77))
# Date(13 April, 2019) Day-3, Time = 3:25 PM
# This section covers the use of Boolean masks to examine and manipulate values
# within NumPy arrays.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn; seaborn.set() #set plot style
# use Pandas to extract rainfall inches as a NumPy array
rainfall = pd.read_csv('/media/nahid/New Volume/GitHub/Numpy/Seattle2014.csv')['PRCP'].values
# print(rainfall)
inches = rainfall / 254 #1/10mm -> inches
print(inches.shape) #(365,)
# fig = plt.figure()
# plt.hist(inches, 40)
# print(plt.show())
# fig.savefig('rainfallHistogram.png')
'''
This histogram gives us a general idea of what the data looks like: despite its reputa‐
tion, the vast majority of days in Seattle saw near zero measured rainfall in 2014. But
this doesn’t do a good job of conveying some information we’d like to see: for exam‐
ple, how many rainy days were there in the year? What is the average precipitation on
those rainy days? How many days were there with more than half an inch of rain?
Digging into the data
One approach to this would be to answer these questions by hand: loop through the
data, incrementing a counter each time we see values in some desired range. For rea‐
sons discussed throughout this chapter, such an approach is very inefficient, both
from the standpoint of time writing code and time computing the result. We saw in
“Computation on NumPy Arrays: Universal Functions” on page 50 that NumPy’s
ufuncs can be used in place of loops to do fast element-wise arithmetic operations on
arrays; in the same way, we can use other ufuncs to do element-wise comparisons over
arrays, and we can then manipulate the results to answer the questions we have. We’ll
leave the data aside for right now, and discuss some general tools in NumPy to use
masking to quickly answer these types of questions.
'''
a = np.array([1,2,3,4,5])
print(a < 3) # [ True True False False False]
# check this apply all of others relational operator
# like
print(a != 3) # [ True True False True True]
# It is also possible to do an element-by-element comparison of two arrays, and to
# include compound expressions:
print((2 * a) == (a ** 2)) # [False True False False False]
'''
As in the case of arithmetic operators, the comparison operators are implemented as
ufuncs in NumPy; for example, when you write x < 3 , internally NumPy uses
np.less(x, 3) . A summary of the comparison operators and their equivalent ufunc
is shown here:
Operator Equivalent ufunc
== np.equal
!= np.not_equal
< np.less
<= np.less_equal
> np.greater
>= np.greater_equal
'''
# Just as in the case of arithmetic ufuncs, these will work on arrays of any size and
# shape. Here is a two-dimensional example:
rng = np.random.RandomState(0)
x = rng.randint(10, size=(3,4))
print(x)
print(x < 5)
'''
[[5 0 3 3]
[7 9 3 5]
[2 4 7 6]]
[[False True True True]
[False False True False]
[ True True False False]]
'''
print(np.count_nonzero(x < 6)) # 8
print(np.sum(x < 6)) # 8
# how many values less than 6 in each row?
print(np.sum(x < 6, axis=1)) # [4 2 2]
# If we’re interested in quickly checking whether any or all the values are true, we can
# use(you guessed it) np.any() or np.all():
print(np.any(x < 8)) #True
print(np.any(x < 0)) #False
print(np.all(x < 10)) #True
print(np.all(x == 6)) # False
# np.all() and np.any() can be used along particular axes as well. For example:
print(np.all(x < 8, axis=1)) # [ True False True]
# Here all the elements in the first and third rows are less than 8, while this is not the
# case for the second row.
#Boolean operators
print(np.sum((inches > .5) & (inches < 1))) # 29
#So we see that there are 29 days with rainfall between 0.5 and 1.0 inches.
#Using the equivalence of A AND B and NOT (A OR B)
print(np.sum(~((inches <= 0.5) | (inches >= 1)))) # 29
'''
The following table summarizes the bitwise Boolean operators and their equivalent
ufuncs:
Operator Equivalent ufunc
& np.bitwise_and
| np.bitwise_or
^ np.bitwise_xor
~ np.bitwise_not
'''
print('Number of days without rain :',np.sum(inches == 0))
print('Number of days with rain :',np.sum(inches != 0))
print('Days with more than .5 inches :',np.sum(inches > 0.5))
'''
Number of days without rain : 215
Number of days with rain : 150
Days with more than .5 inches : 37
'''
print(x[x < 5]) # [0 3 3 3 2 4]
# construct a mask of all rainy days
rainy = (inches > 0)
# construct a mask of all summer days (June 21st is the 172nd day)
summer = (np.arange(365) - 172 < 90) & (np.arange(365) - 172 > 0)
print("Median precip on rainy days in 2014 (inches):", np.median(inches[rainy]))
print("Median precip on summer days in 2014 (inches): ",
np.median(inches[summer]))
print("Maximum precip on summer days in 2014 (inches): ",
np.max(inches[summer]))
print("Median precip on non-summer rainy days (inches):",
np.median(inches[rainy & ~summer]))
#Using the Keywords and/or Versus the Operators &/|
print(bool(42), bool(0)) #True False
print(bool(42 and 0)) #False
print(bool(42 or 0)) #True
print(bin(42)) # 0b101010
print(bin(59)) # 0b111011
print(bin(42 | 59)) # 0b111011
a = np.array([1, 0, 1, 0, 1, 0], dtype=bool)
b = np.array([1, 1, 1, 0, 1, 1], dtype=bool)
print(a | b) # [ True True True False True True]
#ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
# print( a or b)
| 4 | 4 |
weatherman/sql_butler.py | aecobb53/weatherman | 1 | 12787953 | import sqlite3
import datetime
import yaml
class SQLButler:
"""
SQLButler handles data addition and extraction from the database. There is a csv
database version that is designed to be completely compatable and interchangable
but SQL is likely to be faster in the long run.
"""
def __init__(self, database_name):
self.headers = {
'time': 'datetime',
'city': 'integer',
'name': 'text',
'sky_id': 'integer',
'sky': 'text',
'sky_desc': 'text',
'temp': 'float',
'humidity': 'integer',
'wind': 'float',
'cover': 'integer',
'rain': 'float',
'snow': 'float',
}
# Load config file and set some parameters
self.master_config = 'etc/weatherman.yml'
with open(self.master_config) as ycf:
self.config = yaml.load(ycf, Loader=yaml.FullLoader)
if not isinstance(database_name, str):
raise TypeError('The provided database name is not a string')
self.database_name = database_name + '.sql'
def create_database(self):
"""
SQL needs to connect to the database any time it tries to do something.
I created the create function to either connect or create the database
if it does not already exist.
"""
self.conn = sqlite3.connect(self.database_name)
self.c = self.conn.cursor()
try:
self.c.execute("""CREATE TABLE weather (
time datetime,
city integer,
name text,
sky_id integer,
sky text,
sky_desc text,
temp float,
humidity integer,
wind float,
cover integer,
rain float,
snow float
)""")
except sqlite3.OperationalError:
pass
return self.c
def format_for_insert(self, data):
"""
Takes a dict and formats the proper data insert for SQL
"""
insert_data = []
try:
insert_data.append(data['time'].strftime(self.config['datetime_str']))
except:
insert_data.append('')
try:
if data['city'] is None:
raise ValueError
insert_data.append(data['city'])
except:
insert_data.append(0)
try:
if data['name'] is None:
raise ValueError
insert_data.append(data['name'])
except:
insert_data.append('')
try:
if data['sky_id'] is None:
raise ValueError
insert_data.append(data['sky_id'])
except:
insert_data.append(0)
try:
if data['sky'] is None:
raise ValueError
insert_data.append(data['sky'])
except:
insert_data.append('')
try:
if data['sky_desc'] is None:
raise ValueError
insert_data.append(data['sky_desc'])
except:
insert_data.append('')
try:
if data['temp'] is None:
raise ValueError
insert_data.append(data['temp'])
except:
insert_data.append(0)
try:
if data['humidity'] is None:
raise ValueError
insert_data.append(data['humidity'])
except:
insert_data.append(0)
try:
if data['wind'] is None:
raise ValueError
insert_data.append(data['wind'])
except:
insert_data.append(0)
try:
if data['cover'] is None:
raise ValueError
insert_data.append(data['cover'])
except:
insert_data.append(0)
try:
if data['rain'] is None:
raise ValueError
insert_data.append(data['rain'])
except:
insert_data.append(0)
try:
if data['snow'] is None:
raise ValueError
insert_data.append(data['snow'])
except:
insert_data.append(0)
return insert_data
def add_data(self, data):
"""
Add data sets up the data to be added.
I have not built out safetys yet but I plan to eventually incase the data
is changed in the main class and then passed on here.
"""
insert = self.format_for_insert(data)
sql = f"""INSERT INTO weather({','.join(self.headers.keys())})
VALUES(?,?,?,?,?,?,?,?,?,?,?,?)"""
self.c.execute(sql, insert)
def commit_table(self):
"""
I think this saves the database... i dont remember how needed it is i just have it.
"""
self.conn.commit()
def multi_add(self, data_list):
"""
As you can image having for loops everywhere is just asking way too much of me to add
data... so i created a function to handle it all.
"""
self.c = self.create_database()
for data in data_list:
self.add_data(data)
self.commit_table()
def tuple_to_dict(self, tpl):
"""
When getting data out of the database it comes back in a list of tuples.
I wrote this to convert the tuple of data to a dict.
"""
line = list(tpl)
try:
line[0] = datetime.datetime.strptime(line[0], self.config['datetime_str'])
except ValueError:
# HERE purge the bad data eventually
line[0] = datetime.datetime.strptime(line[0], self.config['datetime_utc_str'])
dct = {k: v for k, v in zip(self.headers.keys(), line)}
return dct
def list_tuple_to_list_dict(self, lstt):
"""
This takes the list of tuples to convert it to a list of sets.
ha ha jk. to a list of dicts. can you image how useless a list of sets
would be here???
"""
lstd = []
for line_t in lstt:
lstd.append(self.tuple_to_dict(line_t))
return lstd
def query_database(self, parameters):
"""
Based on the parameters, grab data from the database and filter it.
"""
dump = []
refined = []
self.c = self.create_database()
self.c.execute("""SELECT * FROM weather""")
data = self.c.fetchall()
dump = self.list_tuple_to_list_dict(data)
for entry in dump:
if parameters['start_time'] is not None:
if entry['time'] < parameters['start_time']:
continue
if parameters['end_time'] is not None:
if entry['time'] > parameters['end_time']:
continue
if parameters['exact_list'] is not None:
if entry['sky_id'] not in parameters['exact_list']:
continue
refined.append(entry)
return refined
def get_all_data(self):
"""
This gets all data
"""
dump = []
self.c = self.create_database()
self.c.execute("""SELECT * FROM weather""")
data = self.c.fetchall()
dump = self.list_tuple_to_list_dict(data)
return dump
# def get_bad_data(self):
# """
# This gets all data that is not clear... more or less. See a better explanation of why
# 200 and 799 are important in the main module.
# """
# dump = []
# self.c = self.create_database()
# self.c.execute("""SELECT * FROM weather WHERE
# sky_id BETWEEN 200 AND 799
# """)
# data = self.c.fetchall()
# dump = self.list_tuple_to_list_dict(data)
# return dump
def get_first_and_last(self):
"""
To get timestamps of the first and lasty entry i wrote this thing.
"""
dump = []
self.c = self.create_database()
data = list(self.c.execute("""SELECT * FROM weather""").fetchall())
dump.append(self.tuple_to_dict(data[0]))
dump.append(self.tuple_to_dict(data[-1]))
print(dump)
return dump
| 3.53125 | 4 |
src/tack/Triggers.py | jmjwozniak/tack | 0 | 12787954 |
# TRIGGERS.PY
import logging
import sys
import time
defaultDefault = object()
class TriggerFactory:
def __init__(self, tack):
self.tack = tack
self.kinds = { "timer" : TimerTrigger,
"process" : ProcessTrigger,
"globus" : GlobusTrigger,
"reader" : ReaderTrigger
}
def new(self, **kwargs):
try:
t = kwargs["kind"]
except:
logging.critical("Given trigger with no kind!")
sys.exit(1)
if not t in self.kinds:
logging.critical("No such kind: " + t)
sys.exit(1)
T = self.kinds[t]
result = T(self.tack, kwargs)
self.tack.add(result)
return result
class Trigger:
def __init__(self, tack, args, kind="SUPER"):
self.constructor(tack, args, kind)
def constructor(self, tack, args, kind):
self.tack = tack
self.id = self.tack.make_id()
self.kind = kind
self.name = self.key(args, "name")
logging.info("New Trigger: %s" % str(self))
def __str__(self):
return "%s <%i>" % (self.name, self.id)
# d: a dictionary ; k: the key ; default: optional default value
def key(self, d, k, default=defaultDefault):
try:
result = d[k]
except KeyError:
if default is defaultDefault:
logging.critical("Given trigger kind=%s with no %s!" %
(self.kind, k))
sys.exit(1)
else:
return default
return result
def info(self, message):
logging.info("%s: %s" % (str(self), message))
def debug(self, message):
logging.debug("%s: %s" % (str(self), message))
''' Returns True if something happened, else False'''
def poll(self):
logging.info("Default poll(): %s" % str(self))
''' Internal use only'''
def request_shutdown(self):
self.tack.request_shutdown(self)
''' Tells this Trigger to shutdown'''
def shutdown(self):
logging.info("Default shutdown(): %s" % str(self))
class TimerTrigger(Trigger):
def __init__(self, tack, args):
self.constructor(tack, args, kind="timer")
self.interval = self.key(args, "interval", 0)
logging.info("New TimerTrigger \"%s\" (%0.3fs)" % \
(self.name, self.interval))
self.last_poll = time.time()
self.handler = self.key(args, "handler")
def poll(self):
self.debug("poll()")
t = time.time()
if t - self.last_poll > self.interval:
self.debug("Calling handler")
self.handler(self, t)
last_poll = t
return True
return False
import threading
from Queue import Queue, Empty
class ProcessTrigger(Trigger):
def __init__(self, tack, args):
self.constructor(tack, args, kind="process")
self.command = args["command"]
logging.info("New ProcessTrigger \"%s\" <%i> (%s)" %
(self.name, self.id, self.command))
self.handler = self.key(args, "handler")
self.q_down = Queue()
self.q_up = Queue()
threading.Thread(target=self.run).start()
def poll(self):
self.debug("poll()")
try:
returncode = self.q_up.get_nowait()
except Empty:
return False
self.debug("returncode: " + str(returncode))
self.handler(self, returncode)
self.tack.remove(self)
return True
def run(self):
self.debug("process thread for <%i>: %s" %
(self.id, self.command))
import subprocess
tokens = self.command.split()
# cp = subprocess.call(tokens)
process = subprocess.Popen(tokens)
self.debug("pid is %i for: %s" % (process.pid, self.command))
while True:
p = process.poll()
if not p is None:
break
try:
message = self.q_down.get(timeout=1)
except Empty:
continue
assert(message == "TERMINATE")
self.info("terminating pid: %i: %s" %
(process.pid, self.command))
try:
process.terminate()
except OSError:
self.info("process <%i> already exited.")
process.poll()
break
self.debug("run(): done")
self.q_up.put(process.returncode)
def shutdown(self):
self.q_down.put("TERMINATE")
message = self.q_up.get()
self.debug("returncode: " + str(message))
class GlobusTrigger(Trigger):
def __init__(self, tack, args):
self.constructor(tack, args, kind="globus")
self.user = self.key(args, "user")
self.token = self.key(args, "token")
self.task = self.key(args, "task")
logging.info("New GlobusTrigger \"%s\" <%i> (%s)" %
(self.name, self.id, self.task))
self.handler = self.key(args, "handler")
self.q = Queue()
threading.Thread(target=self.run).start()
def poll(self):
self.debug("poll()")
try:
status = self.q.get_nowait()
except Empty:
return False
self.debug("status: " + status)
self.handler(self, status)
self.tack.remove(self)
return True
def run(self):
self.debug("thread for <%i>: %s" % (self.id, self.task))
from globusonline.transfer.api_client \
import TransferAPIClient
token = self.get_token()
api = TransferAPIClient(self.user, goauth=token)
while True:
code, reason, data = api.task(self.task, fields="status")
status = data["status"]
print(status)
if status in ("SUCCEEDED", "FAILED"):
break
self.debug("Globus: done " + status)
self.q.put(status)
def get_token(self):
if self.token == "ENV":
import os
v = os.getenv("TOKEN")
if v == None:
print("Globus token environment variable TOKEN is unset!")
sys.exit(1)
else:
result = v
else:
result = self.token
return result
class ReaderTrigger(Trigger):
def __init__(self, tack, args):
self.constructor(tack, args, kind="reader")
self.filename = self.key(args, "filename")
self.eof = self.key(args, "eof")
print ("eof: " + self.eof)
self.pattern = self.key(args, "pattern", default=None)
self.eof_obj = object()
if self.pattern:
self.pc = re.compile(self.pattern)
logging.info("New ReaderTrigger \"%s\" <%i> (%s)" %
(self.name, self.id, self.filename))
self.handler = self.key(args, "handler")
self.q_up = Queue()
self.q_down = Queue()
threading.Thread(target=self.run).start()
def poll(self):
self.debug("poll()")
try:
line = self.q_up.get_nowait()
except Empty:
return False
if (not line is self.eof_obj):
self.debug("line: " + line)
self.handler(self, line)
else:
self.debug("found EOF: " + self.eof)
self.tack.remove(self)
return True
def run(self):
self.debug("thread for %s" % self.filename)
with open(self.filename, "r") as f:
delay_max = 1.0
delay_min = 0.0
delay = delay_min
while True:
line = f.readline().strip()
if len(line) > 0:
delay = delay_min
if line == self.eof:
print("found eof")
break
if (not self.pattern) or self.pc.match(line):
self.q_up.put(line)
else:
delay = delay_incr(delay, delay_max)
try:
message = self.q_down.get(timeout=delay)
assert(message == "TERMINATE")
return
except Empty:
pass
self.debug("Reader: done: " + self.filename)
self.q_up.put(self.eof_obj)
def shutdown(self):
print("putting terminate")
self.q_down.put("TERMINATE")
def delay_incr(delay_now, delay_max):
if delay_now < 1.0:
result = delay_now + 0.1
else:
result = delay_now + 1.0
if result > delay_max:
result = delay_max
return result
| 2.703125 | 3 |
tools/init_db.py | yktimes/YkForm | 0 | 12787955 | <reponame>yktimes/YkForm<gh_stars>0
from apps.users.models import User
from peewee import MySQLDatabase
from YkForm.settings import database
from apps.community.models import CommunityGroup,CommunityGroupMember,PostComment,Post,CommentLike
from apps.question.models import Question,Answer
def init():
# database.create_tables([User])
# database.create_tables([CommunityGroup,CommunityGroupMember])
# database.create_tables([PostComment,Post,CommentLike])
database.create_tables([Question,Answer])
if __name__ == '__main__':
init()
| 2.046875 | 2 |
1018-Banknotes.py | OrianaCadavid/uri-oj | 0 | 12787956 | <gh_stars>0
banknotes = [100, 50, 20, 10, 5, 2, 1]
def main():
money = int(raw_input())
print money
for bn in banknotes:
print '{} nota(s) de R$ {},00'.format(money / bn, bn)
money = money % bn
if __name__ == '__main__':
main()
| 3.71875 | 4 |
quizzes/00.organize.me/Cracking the Coding Interview/18-7-checkme.py | JiniousChoi/encyclopedia-in-code | 2 | 12787957 | <reponame>JiniousChoi/encyclopedia-in-code<filename>quizzes/00.organize.me/Cracking the Coding Interview/18-7-checkme.py
#!/usr/bin/env python3
''' 주어진 단어 리스트에서 , 다른 언어들을 조합하여
만들 수 있는 가장 긴 단어를 찾는 프로그램을 작성하라. '''
import unittest
def find_longest_compound(words):
words.sort(key=lambda w: len(w), reverse=True)
db = {}
for word in words:
db[word] = None #value is dont-care
for word in words:
c = get_compound(db, word)
if c:
return ''.join(c)
return None
def get_compound(db, word):
''' returns [sub-word], at least 2 sub-words'''
def bi_split(word):
i_min = 1 #todo: optimize this
i_max = len(word)-i_min
for i in range(i_max, i_min-1, -1):
yield word[:i], word[i:]
for w1, w2 in bi_split(word):
if w1 in db and w2 in db:
return [w1, w2]
for w1, w2 in bi_split(word):
if w1 in db:
compound = get_compound(db, w2)
if compound:
return [w1] + compound
return []
class LongestCompoundTest(unittest.TestCase):
def test_sample(self):
self.assertLongestCompound(['cat', 'banana', 'dog', 'nana', 'walk', 'walker', 'dogwalker'], 'dogwalker')
self.assertLongestCompound(['cat', 'banana', 'dog', 'nana', 'walk', 'walker'], None)
def assertLongestCompound(self, words, longest_compound):
self.assertEqual(find_longest_compound(words), longest_compound)
if __name__=="__main__":
unittest.main()
| 3.578125 | 4 |
benchmarks/SimResults/_bigLittle_hrrs_splash_tugberk_heteroFair/cmp_raytrace/power.py | TugberkArkose/MLScheduler | 0 | 12787958 | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0466431,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.239324,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.315017,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.450727,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.780497,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.447637,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.67886,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.397229,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.18764,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0595135,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0163392,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.133049,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.120839,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.192563,
'Execution Unit/Register Files/Runtime Dynamic': 0.137178,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.334279,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.85709,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.31783,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00370645,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00370645,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00323215,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00125331,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00173585,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0123809,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0354002,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.116165,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.387954,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.394549,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 0.94645,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0538804,
'L2/Runtime Dynamic': 0.0110246,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.63042,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.63627,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.109781,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.109781,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 5.15094,
'Load Store Unit/Runtime Dynamic': 2.28746,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.270702,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.541404,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0960729,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0968322,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0637469,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.724585,
'Memory Management Unit/Runtime Dynamic': 0.160579,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 25.6475,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.207629,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0255462,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.230114,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.463289,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.18663,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0231676,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.220885,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.157012,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.169059,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.272686,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.137643,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.579389,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.169282,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.41247,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0296628,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00709111,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.058654,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0524431,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0883168,
'Execution Unit/Register Files/Runtime Dynamic': 0.0595342,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.129361,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.332118,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.5973,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0017317,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0017317,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00156419,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000636084,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000753349,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00578094,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.014607,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0504149,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.20682,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.165815,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.171232,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.58097,
'Instruction Fetch Unit/Runtime Dynamic': 0.407849,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0254974,
'L2/Runtime Dynamic': 0.00518343,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.72544,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.717634,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0481506,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0481507,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.95282,
'Load Store Unit/Runtime Dynamic': 1.00325,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.118731,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.237463,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0421381,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0424991,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.199388,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0272478,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.427883,
'Memory Management Unit/Runtime Dynamic': 0.069747,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.9891,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.078029,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00857709,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0845108,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.171117,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.25445,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0260819,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.223175,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.179711,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.167767,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.270601,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.13659,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.574958,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.164325,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.43877,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0339513,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00703689,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0590703,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0520421,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0930216,
'Execution Unit/Register Files/Runtime Dynamic': 0.059079,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.130967,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.337223,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.59981,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00159734,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00159734,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00144029,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000584364,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000747589,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00538255,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0135641,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0500294,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.1823,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.160699,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.169922,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.55526,
'Instruction Fetch Unit/Runtime Dynamic': 0.399597,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0295587,
'L2/Runtime Dynamic': 0.0058385,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.7387,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.724339,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0485798,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0485797,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.96811,
'Load Store Unit/Runtime Dynamic': 1.0125,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.11979,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.239579,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0425137,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0429429,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.197864,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0263873,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.427004,
'Memory Management Unit/Runtime Dynamic': 0.0693302,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.0082,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0893109,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00865606,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0836688,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.181636,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.26871,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0261018,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.22319,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.18093,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.168754,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.272193,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.137394,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.578341,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.165265,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.44212,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0341817,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00707829,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0593315,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0523483,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0935131,
'Execution Unit/Register Files/Runtime Dynamic': 0.0594266,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.131522,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.339643,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.60598,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00158782,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00158782,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00143097,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000580191,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000751987,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0053586,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0135098,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0503237,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.20102,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.160676,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.170922,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.57489,
'Instruction Fetch Unit/Runtime Dynamic': 0.40079,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.029635,
'L2/Runtime Dynamic': 0.00597297,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.75741,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.733466,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0491852,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0491853,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.98968,
'Load Store Unit/Runtime Dynamic': 1.02522,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.121282,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.242565,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0430435,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0434738,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.199028,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0263842,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.429078,
'Memory Management Unit/Runtime Dynamic': 0.069858,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.0549,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0899166,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00870797,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0841931,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.182818,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.29063,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 0.7104941532526752,
'Runtime Dynamic': 0.7104941532526752,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.0732315,
'Runtime Dynamic': 0.041044,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 76.7729,
'Peak Power': 109.885,
'Runtime Dynamic': 17.0415,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 76.6996,
'Total Cores/Runtime Dynamic': 17.0004,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.0732315,
'Total L3s/Runtime Dynamic': 0.041044,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 1.53125 | 2 |
scripts/preprocessing/index_reference_genome.py | shunhuahan/mcclintock | 62 | 12787959 | <filename>scripts/preprocessing/index_reference_genome.py
import os
import sys
import subprocess
import traceback
try:
sys.path.append(snakemake.config['args']['mcc_path'])
import scripts.mccutils as mccutils
except Exception as e:
track = traceback.format_exc()
print(track, file=sys.stderr)
print("ERROR...unable to locate required external scripts at: "+snakemake.config['args']['mcc_path']+"/scripts/", file=sys.stderr)
sys.exit(1)
def main():
try:
log = snakemake.params.log
mccutils.log("processing","making samtools and bwa index files for reference fasta", log=log)
mccutils.run_command(["samtools", "faidx", snakemake.input.ref],log=log)
mccutils.run_command(["bwa", "index", snakemake.input.ref], log=log)
for out in snakemake.output:
mccutils.check_file_exists(out)
mccutils.log("processing","samtools and bwa index files for reference fasta created")
except Exception as e:
track = traceback.format_exc()
print(track, file=sys.stderr)
print("ERROR...unable to index (samtools, bwa) reference fasta, please check the formatting of:", snakemake.input.ref, file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main() | 2.1875 | 2 |
Python/Dimensionality-Reduction/t-SNE.py | James-McNeill/Learning | 0 | 12787960 | # t-SNE is a great technique for visual exploration of high dimensional datasets
# Should be applied to non-numeric features
# Import module
from sklearn.manifold import TSNE
# Non-numerical columns in the dataset
non_numeric = ['Branch', 'Gender', 'Component']
# Drop the non-numerical columns from df
df_numeric = df.drop(non_numeric, axis=1)
# Create a t-SNE model with learning rate 50
m = TSNE(learning_rate=50)
# Fit and transform the t-SNE model on the numeric dataset
tsne_features = m.fit_transform(df_numeric)
print(tsne_features.shape)
# Perform visualizations of the reduced dataset
# Color the points by Gender
sns.scatterplot(x="x", y="y", hue='Gender', data=df)
# Show the plot
plt.show()
| 3.96875 | 4 |
statelint/nodes/succeed_state.py | taro-kayo/statelint | 0 | 12787961 | from .mixins import EndMixin
from .state import State
class SucceedState(EndMixin, State):
pass
| 1.28125 | 1 |
old/mumble/doorbot-v5.py | RABCbot/DoorButler | 3 | 12787962 | import time
import alsaaudio as aa
import pymumble.pymumble_py3 as pymumble
from pymumble.pymumble_py3.constants import *
import audioop
from threading import Thread
import queue
import subprocess
import wave
AUDIO_LEN = 640 # Audio chunk size
MUMBLE_CHANNELS = 1 # Mumble hardcoded mono
MUMBLE_RATE = 48000 # Mumble hardcoded rate Hz
MUMBLE_SAMPLESIZE = 2 # Mumble hardcoded bytes per sample
ALSA_FORMAT = aa.PCM_FORMAT_S16_LE # 16 bits litle-endian, must macth mumble sample size
MIC_VOLUME = 2
MUMBLE_SLEEP = 0.01
captureQueue = queue.Queue()
playQueue = queue.Queue()
class MumbleBot(Thread):
def __init__(self, host, usr, pwd, rate):
self.mumbleHost = host
self.mumbleUsr = usr
self.mumblePwd = <PASSWORD>
self.rate = rate
self.volume = 2
try:
self.client = pymumble.Mumble(self.mumbleHost, self.mumbleUsr, 64738, self.mumblePwd, None, None, True, [], False)
self.client.set_codec_profile("audio")
#self.client.set_loop_rate(0.01)
#self.client.callbacks.set_callback(PYMUMBLE_CLBK_USERCREATED, self.user_created)
#self.client.callbacks.set_callback(PYMUMBLE_CLBK_USERREMOVED, self.user_removed)
self.client.callbacks.set_callback(PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, self.text_received)
self.client.start()
self.client.is_ready()
except:
print("Unable to initialize mumble client")
Thread.__init__( self )
def user_created(self, user):
return
def user_removed(self, user, *args):
return
def text_received(self, text):
msg = text.message.split(" ")
if len(msg) > 0:
key = msg[0]
val = ""
if key == "hello": print("Hello World!")
if len(msg) > 1:
val = msg[1]
else:
return
def run(self):
while True:
if self.client.sound_output.get_buffer_size() > 0:
time.sleep(MUMBLE_SLEEP)
else:
time.sleep(10 * MUMBLE_SLEEP)
data = captureQueue.get()
data, state = audioop.ratecv(data, MUMBLE_SAMPLESIZE, MUMBLE_CHANNELS, self.rate, MUMBLE_RATE, None)
#data = audioop.mul(data, MUMBLE_CHANNELS, MIC_VOLUME)
self.client.sound_output.add_sound(data)
# captureQueue.task_done()
class PlayAudio(Thread):
def __init__(self, device, rate):
self.deafened = True
self.rate = rate
try:
self.output = aa.PCM(aa.PCM_PLAYBACK, self.outputDevice)
self.output.setchannels(mumble.MUMBLE_CHANNELS)
self.output.setrate(self.outputRate)
self.output.setformat(mumble.ALSA_FORMAT)
self.output.setperiodsize(mumble.AUDIO_LEN)
except:
print("Unable to initialize output audio")
else:
self.deafened = False
Thread.__init__( self )
def run(self):
while True:
if self.deafened != True:
data = playQueue.get()
data, state = audioop.ratecv(data, MUMBLE_SAMPLESIZE, MUMBLE_CHANNELS, MUMBLE_RATE, self.rate, None)
self.output.write(data)
playQueue.task_done()
time.sleep(0.05)
class CaptureAudio(Thread):
def __init__(self, device, rate):
self.silence = 600 # Threshold to detect sound
self.muted = True
try:
self.input = aa.PCM(aa.PCM_CAPTURE, aa.PCM_NORMAL, device)
self.input.setchannels(MUMBLE_CHANNELS)
self.input.setrate(rate)
self.input.setformat(ALSA_FORMAT)
self.input.setperiodsize(AUDIO_LEN)
except:
print("Unable to initialize input audio")
else:
self.muted = False
# self.wav = wave.open('tone.wav', 'rb')
Thread.__init__( self )
def run(self):
i = 0
while True:
if self.muted != True:
dataLen, data = self.input.read()
#dataLen = AUDIO_LEN
#data = self.wav.readframes(AUDIO_LEN)
if dataLen == AUDIO_LEN:
smin,smax = audioop.minmax(data, MUMBLE_SAMPLESIZE)
if smax - smin > self.silence:
captureQueue.put(data)
#captureQueue.join()
if __name__ == '__main__':
rate = 8000
bot = MumbleBot("192.168.101.112", "DoorBot", "", rate)
bot.setDaemon(True)
bot.start()
mic = CaptureAudio("plughw:1,0", rate)
mic.setDaemon(True)
mic.start()
# spk = PlayAudio(0, 8000)
# spk.setDaemon(True)
# spk.start()
print("DoorBot running...")
while True: time.sleep(0.1)
| 2.15625 | 2 |
rules.py | turnerdj95/supreme-meme | 0 | 12787963 | <reponame>turnerdj95/supreme-meme<gh_stars>0
""""
Author: <NAME>
Purpose: Set win conditions and rules for player behavior that don't initiate
"""
# Create class for checking board game for win conditions
class diaglysis:
def __init__(self,
board_array, # the board is the only thing that needs to checked for win conditions
player_value, # The integer or float that corresponds to this player's checker marker
last_play, # The board coordinate for the last play position, we only need to check the last position
num_check_win = 4 # The number of adjacent tiles that create a win condition
):
self.array = board_array
self.play_v = player_value
self.win_c = num_check_win
# Interpreted as a two item list indexable iterable
self.position = last_play
def down_right(self):
# Execution function for down and right check
dr = lambda d, x, y:tuple([x+d, y+d])
# Trying is necessary because we will often index outside of allowable limits
try:
value_point = [self.array[dr(n, self.position[0], self.position[1])] for n in range(self.win_c)]
except:
return False
# Condition for win is calculated by getting the player's value multipled by the number of spaces he/she has in this row
if (self.win_c * self.play_v) == sum(value_point):
return True
else:
return False
def down_left(self):
# Execution function for down and right check
dl = lambda d, x, y:tuple([x-d, y+d])
# Trying is necessary because we will often index outside of allowable limits
try:
value_point = [self.array[dl(n, self.position[0], self.position[1])] for n in range(self.win_c)]
except:
return False
# Condition for win is calculated by getting the player's value multipled by the number of spaces he/she has in this row
if (self.win_c * self.play_v) == sum(value_point):
return True
else:
return False
def up_left(self):
# Execution function for down and right check
ul = lambda d, x, y:tuple([x-d, y-d])
# Trying is necessary because we will often index outside of allowable limits
try:
value_point = [self.array[ul(n, self.position[0], self.position[1])] for n in range(self.win_c)]
except:
return False
# Condition for win is calculated by getting the player's value multipled by the number of spaces he/she has in this row
if (self.win_c * self.play_v) == sum(value_point):
return True
else:
return False
def up_right(self):
# Execution function for down and right check
ur = lambda d, x, y:tuple([x+d, y-d])
# Trying is necessary because we will often index outside of allowable limits
try:
value_point = [self.array[ur(n, self.position[0], self.position[1])] for n in range(self.win_c)]
except:
return False
# Condition for win is calculated by getting the player's value multipled by the number of spaces he/she has in this row
if (self.win_c * self.play_v) == sum(value_point):
return True
else:
return False
def just_right(self):
jr = lambda d, x, y: tuple([x, y+d])
try:
value_point = [self.array[jr(n, self.position[0], self.position[1])] for n in range(self.win_c)]
except:
return False
# Condition for win is calculated by getting the player's value multipled by the number of spaces he/she has in this row
if (self.win_c * self.play_v) == sum(value_point):
return True
else:
return False
def just_left(self):
jl = lambda d, x, y: tuple([x, y-d])
try:
value_point = [self.array[jl(n, self.position[0], self.position[1])] for n in range(self.win_c)]
except:
return False
# Condition for win is calculated by getting the player's value multipled by the number of spaces he/she has in this row
if (self.win_c * self.play_v) == sum(value_point):
return True
else:
return False
def just_down(self):
jd = lambda d, x, y: tuple([x+d, y])
try:
value_point = [self.array[jd(n, self.position[0], self.position[1])] for n in range(self.win_c)]
except:
return False
# Condition for win is calculated by getting the player's value multipled by the number of spaces he/she has in this row
if (self.win_c * self.play_v) == sum(value_point):
return True
else:
return False
# We don't need a just_up function because it won't ever flag true
| 3.875 | 4 |
projects/python-desk/python_pq_bridge/alembic/versions/392e1a7038a5_set_default_to_string.py | zaqwes8811/coordinator-tasks | 0 | 12787964 | """set default to string
Revision ID: 392e1a7038a5
Revises: <PASSWORD>
Create Date: 2014-11-17 17:41:47.983000
"""
# revision identifiers, used by Alembic.
revision = '392e1a7038a5'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
# default ignored
op.drop_column('task_entity', 'feature')
op.add_column('task_entity', sa.Column('feature', sa.String, server_default=""))
def downgrade():
# default ignored
op.drop_column('task_entity', 'feature')
op.add_column('task_entity', sa.Column('feature', sa.String, server_default='')) | 1.359375 | 1 |
cmd_parser.py | Play-Werewolf/werewolf-discord-bindings | 0 | 12787965 |
class Parser:
def __init__(self, discord_bot, context, command, args):
self.discord_bot = discord_bot
self.context = context
self.command = command
self.args = args
self.parse()
def parse(self):
commands = {
"roles": self.print_roles,
"new": self.start_new_game,
"join": self.join_game
}
def print_roles(self):
self.context.channel.send("The roles are")
def start_new_game(self):
"""The args can be like that"""
pass
def check_channels(self):
pass
def set_channel_state(self):
pass
def join_game(self):
pass
| 2.96875 | 3 |
pyFixedFlatFile/exceptions.py | anderson89marques/PyFixedFlatFile | 5 | 12787966 | class ParamsException(Exception):
"""Exception raised when tp, fmt and size values are wrongs"""
pass
class LineSizeException(Exception):
"""Exception raised when line size is bigger then specified"""
pass
class LineIdentifierException(Exception):
"""Exception raised when line indentifier rased from the
file is different to the line identifier used in the specification
obs: line identifier is defined using .eq() function
"""
pass
| 2.625 | 3 |
apps/dcl/nnm/dcl_util.py | yt7589/cvep | 0 | 12787967 | #
import datetime
class DclUtil(object):
@staticmethod
def datetime_format():
return datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S") | 2.953125 | 3 |
scripts/configuration.py | TheLokin/Kabalayn | 0 | 12787968 | import os
import json
from utils import Colors, clamp
from .controllers import Keyboard
from .managers.sprite_font import SpriteFontManager
from pygame.locals import K_UP, K_w, K_DOWN, K_s, K_LEFT, K_a, K_RIGHT, K_d, K_SPACE, K_e, K_RETURN, K_ESCAPE
def read_float(options, key, default):
try:
option = options[key]
return option if isinstance(option, float) else default
except KeyError:
return default
def read_int(options, key, default):
try:
option = options[key]
return option if isinstance(option, int) else default
except KeyError:
return default
def read_string(options, key, default):
try:
option = options[key]
return option if isinstance(option, (str)) else default
except KeyError:
return default
class Configuration(object):
lang = 'english.json'
master_volume = 1
sound_volume = 1
music_volume = 1
level = 1
@classmethod
def load(cls):
# Load options
if os.path.isfile('datafiles/options.json'):
with open('datafiles/options.json', 'r', encoding='utf-8') as file:
options = json.load(file)
# Language options
cls.lang = read_string(options, 'lang', cls.lang)
if not os.path.isfile(os.path.join('datafiles', 'lang', cls.lang)):
cls.lang = 'english.json'
# Volume options
cls.master_volume = read_float(options, 'master_volume', cls.master_volume)
cls.master_volume = clamp(cls.master_volume, 0, 1)
cls.sound_volume = read_float(options, 'sound_volume', cls.sound_volume)
cls.sound_volume = clamp(cls.sound_volume, 0, 1)
cls.music_volume = read_float(options, 'music_volume', cls.music_volume)
cls.music_volume = clamp(cls.music_volume, 0, 1)
# Init sprite font
SpriteFontManager.load_font('spr_font.png', '0123456789')
# Init keyboard
Keyboard.add('up', (K_UP, K_w))
Keyboard.add('down', (K_DOWN, K_s))
Keyboard.add('left', (K_LEFT, K_a))
Keyboard.add('right', (K_RIGHT, K_d))
Keyboard.add('interact', (K_e, K_RETURN, K_SPACE))
Keyboard.add('any', (K_LEFT, K_a, K_RIGHT, K_d, K_UP, K_w))
Keyboard.add('key_left', (K_LEFT, K_a))
Keyboard.add('key_right', (K_RIGHT, K_d))
Keyboard.add('key_shoot', (K_UP, K_w))
Keyboard.add('key1_left', K_a)
Keyboard.add('key1_right', K_d)
Keyboard.add('key1_shoot', K_w)
Keyboard.add('key2_left', K_RIGHT)
Keyboard.add('key2_right', K_LEFT)
Keyboard.add('key2_shoot', K_UP)
Keyboard.add('pause', K_ESCAPE)
@classmethod
def save(cls):
options = {}
# Language options
options['lang'] = cls.lang
# Volume options
options['master_volume'] = cls.master_volume
options['sound_volume'] = cls.sound_volume
options['music_volume'] = cls.music_volume
if not os.path.exists('datafiles'):
os.mkdir('datafiles')
with open('datafiles/options.json', 'w', encoding='utf-8') as file:
json.dump(options, file)
@classmethod
def state(cls):
if cls.level == 1:
return {
'color': Colors.ENEMY,
'shoot_delay': (50, 60),
'balls': [
(125, 175, 10)
],
'max_score': 3,
'max_balls': 6,
'music': 'bgm_stage2.ogg'
}
elif cls.level == 2:
return {
'color': Colors.KYRGOS,
'shoot_delay': (40, 60),
'balls': [
(125, 175, 10)
],
'max_score': 3,
'max_balls': 6,
'music': 'bgm_stage3.ogg'
}
elif cls.level == 3:
return {
'color': Colors.QUADROPA,
'shoot_delay': (30, 60),
'balls': [
(125, 175, 10)
],
'max_score': 3,
'max_balls': 6,
'music': 'bgm_stage3.ogg'
}
elif cls.level == 4:
return {
'color': Colors.LANDIUS,
'shoot_delay': (30, 40),
'balls': [
(125, 175, 10),
(65, 175, 10),
(185, 175, 10)
],
'max_score': 3,
'max_balls': 10,
'music': 'bgm_stage6.ogg'
}
elif cls.level == 5:
return {
'color': Colors.ALOHA,
'shoot_delay': (20, 40),
'balls': [
(125, 175, 10),
(65, 205, 10),
(185, 145, 10),
(65, 115, 10),
(185, 235, 10)
],
'max_score': 5,
'max_balls': 10,
'music': 'bgm_stage4.ogg'
}
elif cls.level == 6:
return {
'color': Colors.KEETESH,
'shoot_delay': (10, 30),
'balls': [
(125, 175, 10),
(65, 235, 10),
(185, 115, 10),
(65, 145, 10),
(185, 205, 10)
],
'max_score': 5,
'max_balls': 10,
'music': 'bgm_stage4.ogg'
}
elif cls.level == 7:
return {
'color': Colors.KROTIK,
'shoot_delay': (10, 20),
'balls': [
(125, 175, 10),
(65, 205, 10),
(185, 145, 10),
(65, 115, 10),
(185, 235, 10)
],
'max_score': 5,
'max_balls': 10,
'music': 'bgm_stage5.ogg'
} | 2.59375 | 3 |
statarb/src/python/lib/data_handlers/barra_test.py | mikimaus78/ml_monorepo | 51 | 12787969 | import os
import string
import util
import datetime
import newdb
import datafiles
#import config
database = newdb.get_db()
__CAPITALIZATION_UPDATE_THRESHOLD = 0.03 # 5%
def __capEquals(previous, new):
if previous == 0:
if new == 0: return True
else: return False
else:
return abs(new / previous) <= 1 + __CAPITALIZATION_UPDATE_THRESHOLD and abs(new / previous) >= 1 - __CAPITALIZATION_UPDATE_THRESHOLD
def __barraDateToCompact(barraDate):
return datetime.datetime.strptime(barraDate, "%d%b%Y")
#comma separated, string attributes enclosed in double quotes. floats always contain . , otherwise integers
def __getListFromBarraLine(line):
tokens = line.strip().split(",")
data = []
try:
for token in tokens:
if token[0] == '"' or token[-1] == '"': #string. it should be an and there, changed it to protect from compustat using commas within quotes
data.append(token.strip('"').strip())
elif token.find(".") < 0: #integer
data.append(int(token))
else: #double
data.append(float(token))
return data
except ValueError, e:
util.error("Error processing line: {}".format(line))
util.error(str(e))
return []
def __removeUnwantedAttributes(data):
if "BARRID" in data: del data["BARRID"]
#del data["TICKER"]
#del data["CUSIP"]
#del data["NAME"]
if "INTRA_MONTH_ADDITION" in data: del data["INTRA_MONTH_ADDITION"]
def insertBarraAttribute(datatype, barraid, date, source, attributeName, attributeValue, born, backfill=0, compareWithRecent=False, valueEquals=(lambda x, y: x == y)):
assert date.__class__ is long and born.__class__ is long
assert len(barraid) == 7
assert datatype in ("n", "s")
table = database.BARRA + datatype
attrType = database.getAttributeType(attributeName, source, datatype, table)
if datatype == 'n':
value = float(attributeValue)
elif datatype == 's':
value = str(attributeValue)[0:database.MAX_STR_LEN]
if not compareWithRecent:
updates = database.insertTimelineRow(table, {"barraid":barraid, "type":attrType, "date":date}, {"value":value, "backfill":backfill}, born)
database.updateAttributeStats(attrType, *updates)
else:
sqlWhere = "barraid=%(barraid)s AND type=%(type)s AND date<=%(date)s"
if born is None:
sqlWhere = sqlWhere + " AND died IS NULL"
else:
sqlWhere = sqlWhere + " AND born<=%(born)s AND (died IS NULL OR died>%(born)s)"
params = {"barraid": barraid, "type": attrType, "date":date, "born":born}
row = database.execute("SELECT value FROM {} WHERE {} ORDER BY date DESC,born DESC LIMIT 1".format(table, sqlWhere), params).fetchone()
if row is None or not valueEquals(row["value"], value):
updates = database.insertTimelineRow(table, {"barraid":barraid, "type":attrType, "date":date}, {"value":value, "backfill":backfill}, born)
database.updateAttributeStats(attrType, *updates)
#extra processing for TICKER,CUSIP
if attributeName == "TICKER":
database.killOrDeleteTimelineRow(database.BARRA + "xref", {"xref_type":2, "value":attributeValue}, date)
database.insertTimelineRow(database.BARRA + "xref", {"barraid":barraid, "xref_type":2}, {"value":attributeValue}, date)
elif attributeName == "CUSIP":
database.killOrDeleteTimelineRow(database.BARRA + "xref", {"xref_type":1, "value":util.cusip8to9(attributeValue)}, date)
database.insertTimelineRow(database.BARRA + "xref", {"barraid":barraid, "xref_type":1}, {"value":util.cusip8to9(attributeValue)}, date)
def updateBarraRef(source, barraid, cusip, timestamp, historical):
#get existing barraref
refTable = database.BARRA + "ref"
refTable = refTable + "_hist" if historical else refTable
code = database.getAttributeType("BARRAID", source, "s", refTable)
row = database.getTimelineRow(refTable, {"barraid":barraid}, timestamp)
barraSecid = None if row is None else row["secid"]
#get the implied mapping based on cusip
cusipSecid = database.getSecidFromXref("CUSIP", cusip, timestamp, "compustat_idhist", newdb.xrefsolve.preferUS)
if barraSecid is None and cusipSecid is None:
return None
elif barraSecid is None and cusipSecid is not None:
#database.insertTimelineRow(refTable, {"secid":cusipSecid}, {"barraid":barraid}, timestamp)
updates = database.killOrDeleteTimelineRow(refTable, {"secid":cusipSecid}, timestamp)
database.updateAttributeStats(code, *updates)
updates = database.insertTimelineRow(refTable, {"barraid":barraid}, {"secid":cusipSecid}, timestamp)
database.updateAttributeStats(code, *updates)
return cusipSecid
elif barraSecid is not None and cusipSecid is not None and barraSecid == cusipSecid:
return barraSecid
elif barraSecid is not None and cusipSecid is not None and barraSecid != cusipSecid:
updates = database.killOrDeleteTimelineRow(refTable, {"secid":cusipSecid}, timestamp)
database.updateAttributeStats(code, *updates)
updates = database.insertTimelineRow(refTable, {"barraid":barraid}, {"secid":cusipSecid}, timestamp)
database.updateAttributeStats(code, *updates)
return cusipSecid
else: #barraSecid is not None and cusipSecid is None
updates = database.killOrDeleteTimelineRow(refTable, {"barraid":barraid}, timestamp) #only one should be needed
database.updateAttributeStats(code, *updates)
return None
#remove non printable characters that can have creeped in name
def __printableString(name):
#check first if it is printable
printable = reduce(lambda x, y: x and (y in string.printable), name, True)
if printable:
return name
else:
newName = [c for c in name if c in string.printable]
newName = ''.join(newName).strip()
return newName
def verifyMappings(filePath, source):
return process(filePath, source, True)
def process(filePath, source, verifyOnly=False):
#process the RSK files for now
if filePath.find(".RSK.") < 0:
return
file = open(filePath, "r")
#The first 2 lines should be the pricedate and the modeldate for daily files
#For the monthly files it is just the model date
#check if it is a daily file or a monthly file. Check if the first line contains PriceDate
firstLine = file.readline()
if "PriceDate" in firstLine:
daily = True
file.seek(0) #get to the first line again
tokens = file.readline().strip().split(":")
if tokens[0] != "PriceDate":
util.error("It doesn't seem like a barra daily format")
raise Exception
else:
priceDate = __barraDateToCompact(tokens[1].strip())
tokens = file.readline().strip().split(":")
if tokens[0] != "ModelDate":
util.error("It doesn't seem like a barra daily format")
raise Exception
else:
modelDate = __barraDateToCompact(tokens[1].strip())
else:
daily = False
file.seek(0) #get to the first line again
token = file.readline().strip()
priceDate = __barraDateToCompact(token)
modelDate = __barraDateToCompact(token)
# If we have acquisition times, use these for real born time.
# Else, use the priceDate + 1 day
fileInfo = datafiles.read_info_file(filePath)
if fileInfo['date_last_absent'] is not None:
timestamp = util.convert_date_to_millis(fileInfo['date_first_present'])
backfill = 0;
else:
if daily:
date = priceDate + datetime.timedelta(days=1)
else:
date = priceDate + datetime.timedelta(days=2)
timestamp = util.convert_date_to_millis(date.strftime("%Y%m%d"))
backfill = 1
database.setAttributeAutoCreate(True)
priceDate = util.convert_date_to_millis(priceDate)
modelDate = util.convert_date_to_millis(modelDate)
#get the header names. comma separated, surrounded by double quotes
line = file.readline()
headers = __getListFromBarraLine(line)
for line in file:
data = __getListFromBarraLine(line)
if len(data) != len(headers):
util.warning("Skipping bad line: {}".format(line))
continue
data = dict(zip(headers, data))
barraid = data["BARRID"]
cusip = util.cusip8to9(data["CUSIP"])
#updateBarraRef(barraid, cusip, timestamp, False)
updateBarraRef(source, barraid, cusip, priceDate, True)
#Now, insert barra attributes and attribute values
__removeUnwantedAttributes(data)
for attributeName, attributeValue in data.iteritems():
if isinstance(attributeValue, str):
table = "s"
elif isinstance(attributeValue, int):
table = "n"
elif isinstance(attributeValue, float):
table = "n"
else:
util.error("Dude, attribute values should be either int,float or str")
raise
#With the exeption of capitalization and price, the other barra attributes
#are attributes that are evaluated monthly. for them, the date should be the
#model date. price we ignore, while capitatlization, we only create a new tuple
#if the capitalization has changed more than a threshould since the last date
#for which we have a tuple
if attributeName == "PRICE":
continue
elif attributeName == "CAPITALIZATION":
insertBarraAttribute("n", barraid, priceDate, source, attributeName, attributeValue, timestamp, backfill, True, __capEquals)
elif attributeName in ("TICKER", "CUSIP", "NAME"):
#protect against crappy names:
if attributeName == "NAME": attributeValue = __printableString(attributeValue)
insertBarraAttribute("s", barraid, priceDate, source, attributeName, attributeValue, timestamp, backfill, True)
else:
insertBarraAttribute(table, barraid, modelDate, source, attributeName, attributeValue, timestamp, backfill)
file.close()
def regenerateMappings():
#get the cusips
rows = database.execute("SELECT * FROM {} WHERE type={} ORDER BY born,barraid".format(database.BARRA + "s", database.getAttributeType("CUSIP", "barra", None, None))).fetchall()
for row in rows:
#kill whoever owned the cusip
database.killOrDeleteTimelineRow("barra_xref", {"xref_type":1, "value":util.cusip8to9(row["value"])}, row["date"])
database.insertTimelineRow("barra_xref", {"barraid":row["barraid"], "xref_type":1}, {"value":util.cusip8to9(row["value"])}, row["date"])
#get the tickers
rows = database.execute("SELECT * FROM {} WHERE type={} ORDER BY born,barraid".format(database.BARRA + "s", database.getAttributeType("TICKER", "barra", None, None))).fetchall()
for row in rows:
#kill whoever owned the cusip
database.killOrDeleteTimelineRow("barra_xref", {"xref_type":2, "value":row["value"]}, row["date"])
database.insertTimelineRow("barra_xref", {"barraid":row["barraid"], "xref_type":2}, {"value":row["value"]}, row["date"])
if __name__ == "__main__":
#ammend barra data and add the INDNAME values
newdb.init_db(os.environ["DB_CONFIG_FILE"])
database = newdb.get_db()
#collect all the files processed so far
processedFiles = database.getProcessedFilesTimeOrdered("barra")
#database.start_transaction()
try:
database.start_transaction()
regenerateMappings();
database.commit()
# i = 0
# for file in processedFiles:
# if file=="20100401/USE3S1003.RSK.439dbb03":
# continue
# path="/".join((os.environ["DATA_DIR"], "barra","use3s_init_load", file))
# print datetime.datetime.now(), file
# if not os.path.exists(path):
# print "Not found, looking in other directory"
# path="/".join((os.environ["DATA_DIR"], "barra","use3s_daily", file))
# if not os.path.exists(path):
# print "Not found, looking in other directory"
# path="/".join((os.environ["DATA_DIR"], "barra","use3s_monthly", file))
# if not os.path.exists(path):
# print "Not found"
# continue
# database.start_transaction()
# process(path, "barra")
# database.commit()
except Exception, e:
print e
database.rollback()
# else:
# database.commit()
| 2.78125 | 3 |
instagram_api/interfaces/api_response.py | Yuego/instagram_api | 13 | 12787970 | <reponame>Yuego/instagram_api<gh_stars>10-100
from abc import ABCMeta, abstractmethod, abstractproperty
__all__ = ['ApiResponseInterface']
class ApiResponseInterface(metaclass=ABCMeta):
@abstractproperty
def is_ok(self): ...
@abstractmethod
def get_message(self): ...
@abstractproperty
def http_response(self): ...
@abstractproperty
def has_http_response(self): ...
| 2.375 | 2 |
cards/deck.py | doctoryes/card-play | 0 | 12787971 | """
Deck of cards.
"""
import itertools
import json
import random
SUITS = (
'Hearts',
'Diamonds',
'Clubs',
'Spades'
)
RANKS = [str(x) for x in range(2, 11)] + ['Jack', 'Queen', 'King', 'Ace']
class Card(object):
"""
A single card in a classic card deck.
"""
def __init__(self, suit=None, rank=None):
self.suit = suit
self.rank = rank
def to_json(self):
"""
Return a string containing a JSON representation of a card.
"""
return '{{"suit":"{}", "rank":"{}"}}'.format(self.suit, self.rank)
def from_json(self, json_str):
"""
Loads a card from a valid JSON representation.
"""
card = json.loads(json_str)
self.suit = card['suit']
self.rank = card['rank']
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.suit == other.suit and self.rank == other.rank
return False
def __unicode__(self):
return "{} of {}".format(self.rank, self.suit)
def __repr__(self):
return self.__unicode__()
class CardList(object):
"""
An ordered list of an arbitrary number of Card objects.
"""
def __init__(self, card_list=None):
"""
"""
if card_list is None:
self.reset_to_standard()
else:
self.cards = card_list
def reset_to_standard(self):
"""
Form a full 52 card deck, ordered.
"""
self.cards = [Card(s, r) for s, r in list(itertools.product(SUITS, RANKS))]
def shuffle(self):
"""
Shuffle the remaining cards in the deck.
"""
for idx, card in enumerate(self.cards):
new_idx = random.randrange(0, len(self.cards))
tmp_card = self.cards[new_idx]
self.cards[new_idx] = self.cards[idx]
self.cards[idx] = tmp_card
def give(self, num_cards):
"""
Give out the specified number of cards, removing them from the deck.
Cards are given from the top of the deck.
"""
if num_cards < 0:
return None
given_cards = self.cards[:num_cards]
self.cards = self.cards[num_cards:]
return given_cards
def take(self, cards):
"""
Takes a list of Card objects into this hand. Appends them to the end of the card list.
"""
if isinstance(cards, list):
for card in cards:
if isinstance(card, Card):
self.cards.append(card)
@property
def count(self):
"""
Returns the number of cards in the list.
"""
return len(self.cards)
def to_json(self):
"""
Return a string containing a JSON representation of a card list.
"""
return '[{}]'.format(','.join([card.to_json() for card in self.cards]))
def from_json(self, json_str):
"""
Loads a card list from a valid JSON representation.
WARNING: Erases all existing cards in the list!
"""
card_list = json.loads(json_str)
self.cards = []
for card in card_list:
self.cards.append(Card(card['suit'], card['rank']))
def __eq__(self, other):
# if isinstance(other, self.__class__):
if other.count == self.count:
for idx in range(0, other.count):
if self.cards[idx] != other.cards[idx]:
return False
return True
return False
def __unicode__(self):
return ':'.join(['{}{}'.format(c.suit[0], c.rank[0]) for c in self.cards])
def __repr__(self):
return self.__unicode__()
| 4 | 4 |
solutions/Task Scheduler/solution.py | nilax97/leetcode-solutions | 3 | 12787972 | class Solution:
def leastInterval(self, tasks: List[str], n: int) -> int:
tasks_count = list(collections.Counter(tasks).values())
max_count = max(tasks_count)
max_count_tasks = tasks_count.count(max_count)
return max(len(tasks), (max_count-1)*(n+1)+max_count_tasks)
| 3.09375 | 3 |
src/py/analysis_lib/video_creator/colors.py | LandonFuhr/aseen | 0 | 12787973 | from matplotlib import cm
# (B, G, R)
mouse_colors = [
(83, 255, 18), # green
(0, 139, 232), # orange
(255, 136, 0), # blue
(0, 196, 255), # yellow
(0, 25, 255) # red
]
outline_colormap = cm.get_cmap('plasma', 100)
| 2.34375 | 2 |
nsd1805/python/day10/mysql_tedu.py | MrWangwf/nsd1806 | 0 | 12787974 | <gh_stars>0
import pymysql
conn = pymysql.connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='<PASSWORD>',
db='tedu1805',
charset='utf8'
)
cursor = conn.cursor()
insert1 = 'INSERT INTO departments(dep_id, dep_name) VALUES (%s, %s)'
# result1 = cursor.execute(insert1, (1, '人事部'))
# result2 = cursor.executemany(insert1, [(2, '运维部'), (3, '开发部')])
# result3 = cursor.executemany(insert1, [(4, '财务部'), (5, '市场部'), (6, '销售部')])
#############################################
# query1 = "SELECT * FROM departments"
# result4 = cursor.execute(query1)
# print(cursor.fetchone())
# print('-' * 30)
# print(cursor.fetchmany(2))
# print('-' * 30)
# print(cursor.fetchall())
#############################################
# query1 = "SELECT * FROM departments"
# result4 = cursor.execute(query1)
# print(cursor.fetchone())
# print('-' * 30)
# cursor.scroll(0, mode='absolute') # 移回开头
# print(cursor.fetchone())
# print('-' * 30)
# cursor.scroll(2) # mode默认是relative相对
# print(cursor.fetchone())
#############################################
# update1 = 'UPDATE departments SET dep_name=%s WHERE dep_name=%s'
# result5 = cursor.execute(update1, ('人力资源部', '人事部'))
# print(result5) # 返回值是1,表示影响一行记录
#############################################
delete1 = 'DELETE FROM departments WHERE dep_id=%s'
result6 = cursor.execute(delete1, (5,))
conn.commit() # 如果不确认,数据库表不会真正插入数据
cursor.close()
conn.close()
| 2.546875 | 3 |
project/file_upload/models.py | Tanukium/msemi | 9 | 12787975 | from django.db import models
import os
from django.conf import settings
from django.core.exceptions import ValidationError
# Create your models here.
# Define user directory path
def file_size(value):
limit = 524000
if value.size > limit:
raise ValidationError('File too large. Size should not exceed 500KiB.')
def user_directory_path(instance, filename):
return os.path.join("files", filename)
class File(models.Model):
file = models.FileField(upload_to='files', null=True, validators=[file_size])
def abspath_file(self):
root = settings.MEDIA_ROOT
path = os.path.dirname(self.file.name)
file = os.path.basename(self.file.name)
return os.path.join(root, path, file)
| 2.40625 | 2 |
snmpv3_example1.py | maddula55/python_test | 0 | 12787976 | if True:
IP = " 172.16.31.10"
a_user="pysnmp"
auth_key = "galileo1"
encrypt_key= "galileo1"
snmp_user = (a_user, auth_key, encrypt_key)
pynet_rtr1 = (IP, 7961)
pynet_rtr2 = (IP, 8061)
snmp_data = snmp_helper.snmp_get_oid_v3(pynet_rtr1, snmp_user, oid="1.3.6.1.2.1.1.5.0")
# function snmp_get_oid_v3 is part of snmp helper module
output = snmp_helper.snmp_extract(snmp_data)
print output
| 2.421875 | 2 |
app/utils/mail.py | jshwi/jss | 1 | 12787977 | """
app.utils.mail
==============
Setup app's mailer.
"""
import typing as t
from threading import Thread
from flask import Flask, current_app
from flask_mail import Message
from app.extensions import mail
def _send_async_email(app: Flask, msg: Message) -> None:
with app.app_context():
mail.send(msg)
def send_email(
attachments: t.Optional[t.Iterable[t.Dict[str, str]]] = None,
sync: bool = False,
**kwargs: t.Any,
) -> None:
"""Send a threaded email.
Without threading the app will wait until the email has been sent
before continuing.
In order to access the application context for this function a
protected ``werkzeug`` attribute has to be accessed.
From https://blog.miguelgrinberg.com/post/
``the-flask-mega-tutorial-part-xv-a-better-application-structure``:
Using current_app directly in the send_async_email() function
that runs as a background thread would not have worked, because
current_app is a context-aware variable that is tied to the
thread that is handling the client request. In a different
thread, current_app would not have a value assigned.
Passing current_app directly as an argument to the thread object
would not have worked either, because current_app is really a
proxy object that is dynamically mapped to the application
instance. So passing the proxy object would be the same as using
current_app directly in the thread.
What I needed to do is access the real application instance that
is stored inside the proxy object, and pass that as the app
argument. The current_app._get_current_object() expression
extracts the actual application instance from inside the proxy
object, so that is what I passed to the thread as an argument.
Note: Keyword args (dict) to pass to ``attachments``:
See ``flask_mail.Message.attach``.
* filename: filename of attachment
* content_type: file mimetype
* data: the raw file data
:param attachments: Iterable of kwargs to construct attachment.
:param sync: Don't thread if True: Defaults to False.
:param kwargs: Keyword args to pass to ``Message``:
See ``flask_mail.Message``.
"""
# noinspection PyProtectedMember
# pylint: disable=protected-access
app = current_app._get_current_object() # type: ignore
subject_prefix = app.config["MAIL_SUBJECT_PREFIX"]
subject = kwargs.get("subject", "")
kwargs["subject"] = f"{subject_prefix}{subject}"
kwargs["sender"] = kwargs.get("sender", app.config["DEFAULT_MAIL_SENDER"])
message = Message(**kwargs)
if attachments:
for attachment in attachments:
message.attach(**attachment)
if sync:
mail.send(message)
else:
thread = Thread(target=_send_async_email, args=(app, message))
thread.start()
| 2.703125 | 3 |
venv/lib/python3.6/site-packages/matchbook/endpoints/betting.py | slarkjm0803/autobets | 1 | 12787978 | <gh_stars>1-10
import datetime
from matchbook.endpoints.baseendpoint import BaseEndpoint
from matchbook import resources
from matchbook.enums import Side, Status, AggregationType
from matchbook.utils import clean_locals
class Betting(BaseEndpoint):
def get_orders(self, event_ids=None, market_ids=None, runner_ids=None, offer_id=None, offset=0, per_page=500,
interval=None, side=Side.Default, status=Status.Default, session=None):
"""
Get all orders which fit the argument filters.
:param event_ids: operate only on orders on specified events.
:type event_ids: comma separated string
:param market_ids: operate only on orders on specified markets.
:type market_ids: comma separated string
:param runner_ids: operate only on orders on specified runners.
:type runner_ids: comma separated string
:param offer_id: specific order id to use.
:param offset: starting point of results. Default 0.
:type offset: int
:param per_page: no. of offers returned in a single response, Max 500. Default 20.
:type per_page: int
:param interval: check for orders updated/created in last x seconds, status param must be 'matched'.
:type interval: int
:param side: filter results by side (dependent on exchange-type). Default None.
:type side: MatchbookAPI.bin.enums.Side
:param status: operate only on orders with specified status. Default None.
:type status: MatchbookAPI.bin.enums.Status
:param session: requests session to be used.
:type session: requests.Session
:returns: Orders data
:raises: MatchbookAPI.bin.exceptions.ApiError
"""
params = clean_locals(locals())
params['exchange-type'] = self.client.exchange_type
method = 'offers'
date_time_sent = datetime.datetime.utcnow()
if offer_id:
method = 'offers/{0}'.format(offer_id)
params = {'odds-type': self.client.odds_type}
response = self.request("GET", self.client.urn_edge, method, params=params, session=session).json()
else:
response = self.request(
"GET", self.client.urn_edge, method, params=params, target='offers', session=session
)
date_time_received = datetime.datetime.utcnow()
return self.process_response(response, resources.Order, date_time_sent, date_time_received)
def send_orders(self, runner_id, odds, side, stake, temp_id=None, session=None):
"""
Place an order(s) on a runner, multiple orders can be places by providing lists of the required arguments.
:param runner_id: runner(s) on which to place bets.
:type runner_id: int
:param odds: odds at which we wish to place the bet.
:type odds: float
:param side: The type of bet to place, dependent on exchange.
:type side: MatchbookAPI.bin.enums.Side
:param stake: amount in account currency to place on the bet.
:type stake: float
:param temp_id: A helper ID generated by the client to help understand the correlation between multiple submitted offers and their responses.
:type temp_id: str
:param session: requests session to be used.
:type session: requests.Session
:returns: Orders responses, i.e. filled or at exchange or errors.
:raises: MatchbookAPI.bin.exceptions.ApiError
"""
date_time_sent = datetime.datetime.utcnow()
params = {
'offers': [],
'odds-type': self.client.odds_type,
'exchange-type': self.client.exchange_type,
'currency': self.client.currency,
}
if isinstance(runner_id, list):
if isinstance(temp_id, list):
for i, _ in enumerate(runner_id):
params['offers'].append({'runner-id': runner_id[i], 'side': side[i], 'stake': stake[i],
'odds': odds[i], 'temp-id': temp_id[i]})
else:
for i, _ in enumerate(runner_id):
params['offers'].append({'runner-id': runner_id[i], 'side': side[i], 'stake': stake[i],
'odds': odds[i]})
else:
params['offers'].append(
{'runner-id': runner_id, 'side': side, 'stake': stake, 'odds': odds, 'temp-id': temp_id}
)
method = 'offers'
response = self.request("POST", self.client.urn_edge, method, data=params, session=session)
date_time_received = datetime.datetime.utcnow()
return self.process_response(
response.json().get('offers', []), resources.Order, date_time_sent, date_time_received
)
def get_agg_matched_bets(self, event_ids=None, market_ids=None, runner_ids=None, side=None, offset=0, per_page=500,
aggregation_type=AggregationType.Default, session=None):
# TODO: Make aggregate matched bets resource
"""
Get matched bets aggregated.
:param event_ids: operate only on orders on specified events.
:type event_ids: comma separated string
:param market_ids: operate only on orders on specified markets.
:type market_ids: comma separated string
:param runner_ids: operate only on orders on specified runners.
:type runner_ids: comma separated string
:param offset: starting point of results. Default 0.
:type offset: int
:param per_page: no. of offers returned in a single response, Max 500. Default 20.
:type per_page: int
:param side: filter results by side (dependent on exchange-type). Default None.
:type side: MatchbookAPI.bin.enums.Side
:param aggregation_type: how to aggregate bets
:type aggregation_type: matchbook.enums.AggregationType
:param session: requests session to be used.
:type session: requests.Session
:returns: Orders data
:raises: MatchbookAPI.bin.exceptions.ApiError
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
method = 'bets/matched/aggregated'
response = self.request("GET", self.client.urn_edge, method, params=params, target='bets', session=session)
date_time_received = datetime.datetime.utcnow()
return self.process_response(
response, resources.MatchedBets, date_time_sent, date_time_received
)
def get_positions(self, event_ids=None, market_ids=None, runner_ids=None, offset=0, per_page=500, session=None):
#TODO: Make positions resource
"""
Get potential profit or loss on each runner.
:param event_ids: operate only on orders on specified events.
:type event_ids: comma separated string
:param market_ids: operate only on orders on specified markets.
:type market_ids: comma separated string
:param runner_ids: operate only on orders on specified runners.
:type runner_ids: comma separated string
:param offset: starting point of results. Default 0.
:type offset: int
:param per_page: no. of offers returned in a single response, Max 500. Default 20.
:type per_page: int
:param session: requests session to be used.
:type session: requests.Session
:returns: Orders data
:raises: MatchbookAPI.bin.exceptions.ApiError
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
method = 'account/positions'
response = self.request("GET", self.client.urn_edge, method, params=params, session=session)
date_time_received = datetime.datetime.utcnow()
return self.process_response(
response.json().get('bets', []), resources.Order, date_time_sent, date_time_received
)
def amend_orders(self, order_id, odds, side, stake, session=None):
"""
Adjust/Update an order(s), multiple orders can be adjusted by providing lists of the required arguments.
:param order_id: order id to adjust.
:type order_id: int
:param odds: odds at which we wish to place the bet.
:type odds: float
:param side: back,lay|win,lose side to place bets on.
:type side: MatchbookAPI.bin.enums.Side
:param stake: amount in account currency to place on the bet.
:type stake: float
:param session: requests session to be used.
:type session: requests.Session
:returns: Orders responses, i.e. filled or at exchange or errors.
:raises: MatchbookAPI.bin.exceptions.ApiError
"""
date_time_sent = datetime.datetime.utcnow()
params = {
'offers': [],
'odds-type': self.client.odds_type,
'exchange-type': self.client.exchange_type,
'currency': self.client.currency,
}
if isinstance(order_id, list):
method = 'offers'
for i, _ in enumerate(order_id):
params['offers'].append({'id': order_id[i], 'side': side[i], 'stake': stake[i], 'odds': odds[i]})
else:
method = 'offers/{}'.format(order_id)
del params['offers']
params['stake'] = stake
params['odds'] = odds
response = self.request('PUT', self.client.urn_edge, method, data=params, session=session)
date_time_received = datetime.datetime.utcnow()
return self.process_response(
response.json().get('offers', response.json()), resources.Order, date_time_sent, date_time_received
)
def delete_bulk_orders(self, event_ids=None, market_ids=None, runner_ids=None, offer_ids=None, session=None):
"""
Delete all orders which fit the argument filters.
:param event_ids: bulk delete orders on specified events.
:type event_ids: comma separated string
:param market_ids: bulk delete orders on specified markets.
:type market_ids: comma separated string
:param runner_ids: bulk delete orders on specified runners.
:type runner_ids: comma separated string
:param offer_ids: delete specific order id. Max offerids in one delete request is 25
:type offer_ids: comma separated string
:param session: requests session to be used.
:type session: requests.Session
:returns: orders deletion report.
:raises: MatchbookAPI.bin.exceptions.ApiError
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
method = 'offers'
response = self.request('DELETE', self.client.urn_edge, method, data=params, session=session)
date_time_received = datetime.datetime.utcnow()
return self.process_response(
response.json().get('offers', []), resources.Order, date_time_sent, date_time_received
)
def delete_order(self, offer_id, session=None):
"""
Delete all orders which fit the argument filters.
:param offer_id: delete specific order id.
:type offer_id: int
:param session: requests session to be used.
:type session: requests.Session
:returns: order deletion report.
:raises: MatchbookAPI.bin.exceptions.ApiError
"""
date_time_sent = datetime.datetime.utcnow()
method = 'offers/{}'.format(offer_id)
response = self.request('DELETE', self.client.urn_edge, method, session=session)
date_time_received = datetime.datetime.utcnow()
return self.process_response(
response.json(), resources.Order, date_time_sent, date_time_received
)
| 2.359375 | 2 |
tests/test_mpain.py | raeoks/mpain | 0 | 12787979 | from mpain import MPain
class TestMPain(object):
def test_sanity(self):
assert MPain
| 1.734375 | 2 |
DB/mysite/tables_daniel/migrations/0003_auto_20200629_1331.py | stancld/MSc-Project | 2 | 12787980 | <reponame>stancld/MSc-Project<filename>DB/mysite/tables_daniel/migrations/0003_auto_20200629_1331.py
# Generated by Django 3.0.6 on 2020-06-29 13:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tables_daniel', '0002_auto_20200629_1254'),
]
atomic = False
operations = [
migrations.RenameModel(
old_name='CompanyTable',
new_name='Companies',
),
migrations.RenameModel(
old_name='GlassdoorTable',
new_name='Reviews',
),
]
| 1.546875 | 2 |
MEG_SPC/spyking_circus_updates/clustering.py | tommytommy81/MEG-SPC | 1 | 12787981 | from .shared.utils import *
import circus.shared.algorithms as algo
from .shared import plot
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
import h5py
from circus.shared.probes import get_nodes_and_edges
from .shared.files import get_dead_times
from circus.shared.messages import print_and_log, init_logging
from circus.shared.utils import get_parallel_hdf5_flag
def main(params, nb_cpu, nb_gpu, use_gpu):
numpy.random.seed(520)
parallel_hdf5 = get_parallel_hdf5_flag(params)
#params = detect_memory(params)
logger = init_logging(params.logfile)
logger = logging.getLogger('circus.clustering')
#################################################################
data_file = params.data_file
data_file.open()
N_e = params.getint('data', 'N_e')
N_total = params.nb_channels
N_t = params.getint('detection', 'N_t')
dist_peaks = params.getint('detection', 'dist_peaks')
template_shift = params.getint('detection', 'template_shift')
file_out_suff = params.get('data', 'file_out_suff')
sign_peaks = params.get('detection', 'peaks')
alignment = params.getboolean('detection', 'alignment')
smoothing = params.getboolean('detection', 'smoothing')
isolation = params.getboolean('detection', 'isolation')
over_factor = float(params.getint('detection', 'oversampling_factor'))
matched_filter = params.getboolean('detection', 'matched-filter')
spike_thresh = params.getfloat('detection', 'spike_thresh')
smoothing_factor = params.getfloat('detection', 'smoothing_factor')
if params.getboolean('data', 'global_tmp'):
tmp_path_loc = os.path.join(os.path.abspath(params.get('data', 'file_out_suff')), 'tmp')
else:
tmp_path_loc = tempfile.gettempdir()
plot_path = os.path.join(params.get('data', 'file_out_suff'), 'plots')
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
safety_time = params.getint('clustering', 'safety_time')
safety_space = params.getboolean('clustering', 'safety_space')
comp_templates = params.getboolean('clustering', 'compress')
dispersion = params.get('clustering', 'dispersion').replace('(', '').replace(')', '').split(',')
dispersion = list(map(float, dispersion))
nodes, edges = get_nodes_and_edges(params)
chunk_size = params.getint('data', 'chunk_size')
max_elts_elec = params.getint('clustering', 'max_elts')
if sign_peaks == 'both':
max_elts_elec *= 2
nb_elts = int(params.getfloat('clustering', 'nb_elts')*N_e*max_elts_elec)
nb_repeats = params.getint('clustering', 'nb_repeats')
nclus_min = params.getfloat('clustering', 'nclus_min')
make_plots = params.get('clustering', 'make_plots')
sim_same_elec = params.getfloat('clustering', 'sim_same_elec')
noise_thr = params.getfloat('clustering', 'noise_thr')
remove_mixture = params.getboolean('clustering', 'remove_mixture')
extraction = params.get('clustering', 'extraction')
smart_search = params.getboolean('clustering', 'smart_search')
n_abs_min = params.getint('clustering', 'n_abs_min')
sensitivity = params.getfloat('clustering', 'sensitivity')
hdf5_compress = params.getboolean('data', 'hdf5_compress')
blosc_compress = params.getboolean('data', 'blosc_compress')
test_clusters = params.getboolean('clustering', 'test_clusters')
tmp_limits = params.get('fitting', 'amp_limits').replace('(', '').replace(')', '').split(',')
amp_limits = list(map(float, tmp_limits))
elt_count = 0
m_ratio = nclus_min
sub_output_dim = params.getint('clustering', 'sub_dim')
inv_nodes = numpy.zeros(N_total, dtype=numpy.int32)
inv_nodes[nodes] = numpy.argsort(nodes)
to_write = ['clusters_', 'times_', 'data_', 'peaks_']
ignore_dead_times = params.getboolean('triggers', 'ignore_times')
jitter_range = params.getint('detection', 'jitter_range')
template_shift_2 = template_shift + jitter_range
nb_ss_bins = 50
use_hanning = params.getboolean('detection', 'hanning')
#################################################################
if sign_peaks == 'negative':
search_peaks = ['neg']
elif sign_peaks == 'positive':
search_peaks = ['pos']
elif sign_peaks == 'both':
search_peaks = ['neg', 'pos']
smart_searches = {}
for p in search_peaks:
smart_searches[p] = numpy.ones(N_e, dtype=numpy.float32)*int(smart_search)
basis = {}
if use_hanning:
hanning_filter = numpy.hanning(N_t)
if sign_peaks in ['negative', 'both']:
basis['proj_neg'], basis['rec_neg'] = io.load_data(params, 'basis')
if sign_peaks in ['positive', 'both']:
basis['proj_pos'], basis['rec_pos'] = io.load_data(params, 'basis-pos')
thresholds = io.load_data(params, 'thresholds')
mads = io.load_data(params, 'mads')
if do_spatial_whitening:
spatial_whitening = io.load_data(params, 'spatial_whitening')
if do_temporal_whitening:
temporal_whitening = io.load_data(params, 'temporal_whitening')
if matched_filter:
if sign_peaks in ['negative', 'both']:
waveform_neg = io.load_data(params, 'waveform')
waveform_neg /= (numpy.abs(numpy.sum(waveform_neg))* len(waveform_neg))
matched_tresholds_neg = io.load_data(params, 'matched-thresholds')
if sign_peaks in ['positive', 'both']:
waveform_pos = io.load_data(params, 'waveform-pos')
waveform_pos /= (numpy.abs(numpy.sum(waveform_pos))* len(waveform_pos))
matched_tresholds_pos = io.load_data(params, 'matched-thresholds-pos')
if ignore_dead_times:
all_dead_times = get_dead_times(params)
result = {}
if use_gpu:
import cudamat as cmt
## Need to properly handle multi GPU per MPI nodes?
if nb_gpu > nb_cpu:
gpu_id = int(comm.rank//nb_cpu)
else:
gpu_id = 0
cmt.cuda_set_device(gpu_id)
cmt.init()
cmt.cuda_sync_threads()
if test_clusters:
injected_spikes = io.load_data(params, 'injected_spikes')
if comm.rank == 0:
if not os.path.exists(tmp_path_loc):
os.makedirs(tmp_path_loc)
if alignment:
cdata = numpy.linspace(-jitter_range, jitter_range, int(over_factor*2*jitter_range))
xdata = numpy.arange(-template_shift_2, template_shift_2 + 1)
xoff = len(cdata)/2.
if isolation:
yoff = numpy.array(list(range(0, N_t//4)) + list(range(3*N_t//4, N_t)))
comm.Barrier()
if use_gpu and do_spatial_whitening:
spatial_whitening = cmt.CUDAMatrix(spatial_whitening, copy_on_host=False)
elec_positions = {}
for i in range(N_e):
result['loc_times_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
result['all_times_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
result['times_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
result['clusters_' + str(i)] = numpy.zeros(0, dtype=numpy.int32)
result['peaks_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
for p in search_peaks:
result['pca_%s_' %p + str(i)] = None
indices = numpy.take(inv_nodes, edges[nodes[i]])
elec_positions[i] = numpy.where(indices == i)[0]
max_elts_elec //= comm.size
nb_elts //= comm.size
few_elts = False
nb_chunks, _ = data_file.analyze(chunk_size)
if nb_chunks < comm.size:
res = io.data_stats(params, show=False)
chunk_size = int(res*params.rate//comm.size)
if comm.rank == 0:
print_and_log(["Too much cores, automatically resizing the data chunks"], 'debug', logger)
nb_chunks, last_chunk_len = data_file.analyze(chunk_size)
if smart_search is False:
gpass = 1
else:
gpass = 0
## We will perform several passes to enhance the quality of the clustering
while gpass < (nb_repeats + 1):
comm.Barrier()
if gpass == 1:
sdata = all_gather_array(smart_searches[search_peaks[0]][comm.rank::comm.size], comm, 0)
if comm.rank == 0:
if gpass == 0:
print_and_log(["Searching random spikes to sample amplitudes..."], 'default', logger)
elif gpass == 1:
if not numpy.all(sdata > 0):
lines = ["Smart Search disabled on %d electrodes" %(numpy.sum(sdata == 0))]
print_and_log(lines, 'debug', logger)
if numpy.any(sdata > 0):
print_and_log(["Smart Search of good spikes for the clustering (%d/%d)..." %(gpass, nb_repeats)], 'default', logger)
else:
print_and_log(["Searching random spikes for the clustering (%d/%d) (no smart search)" %(gpass, nb_repeats)], 'default', logger)
else:
print_and_log(["Searching random spikes to refine the clustering (%d/%d)..." %(gpass, nb_repeats)], 'default', logger)
for i in range(N_e):
if gpass == 0:
for p in search_peaks:
result['tmp_%s_' %p + str(i)] = numpy.zeros(0, dtype=numpy.float32)
result['nb_chunks_%s_' %p + str(i)] = 1
else:
n_neighb = len(edges[nodes[i]])
for p in search_peaks:
result['tmp_%s_' %p + str(i)] = numpy.zeros((0, basis['proj_%s' %p].shape[1] * n_neighb), dtype=numpy.float32)
# If not the first pass, we sync all the detected times among nodes and give all nodes the w/pca
result['all_times_' + str(i)] = numpy.concatenate((result['all_times_' + str(i)], all_gather_array(result['loc_times_' + str(i)], comm, dtype='uint32', compress=blosc_compress)))
result['loc_times_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
if gpass == 1:
for p in search_peaks:
result['pca_%s_' %p + str(i)] = comm.bcast(result['pca_%s_' %p + str(i)], root=numpy.mod(i, comm.size))
result['data_%s_' %p + str(i)] = numpy.zeros((0, basis['proj_%s' %p].shape[1] * n_neighb), dtype=numpy.float32)
result['data_' + str(i)] = numpy.zeros((0, sub_output_dim), dtype=numpy.float32)
# I guess this is more relevant, to take signals from all over the recordings
numpy.random.seed(gpass)
all_chunks = numpy.random.permutation(numpy.arange(nb_chunks, dtype=numpy.int64))
rejected = 0
elt_count = 0
## This is not easy to read, but during the smart search pass, we need to loop over all chunks, and every nodes should
## search spikes for a subset of electrodes, to avoid too many communications.
if gpass <= 1:
nb_elecs = numpy.sum(comm.rank == numpy.mod(numpy.arange(N_e), comm.size))
loop_max_elts_elec = params.getint('clustering', 'max_elts')
if sign_peaks == 'both':
loop_max_elts_elec *= 2
loop_nb_elts = numpy.int64(params.getfloat('clustering', 'nb_elts') * nb_elecs * loop_max_elts_elec)
to_explore = range(nb_chunks)
else:
loop_max_elts_elec = max_elts_elec
loop_nb_elts = nb_elts
to_explore = range(comm.rank, nb_chunks, comm.size)
if comm.rank == 0:
to_explore = get_tqdm_progressbar(to_explore)
comm.Barrier()
## Random selection of spikes
for gcount, gidx in enumerate(to_explore):
gidx = all_chunks[gidx]
if (elt_count < loop_nb_elts):
#print "Node", comm.rank, "is analyzing chunk", gidx, "/", nb_chunks, " ..."
local_chunk, t_offset = data_file.get_data(gidx, chunk_size, nodes=nodes)
local_shape = len(local_chunk)
if do_spatial_whitening:
if use_gpu:
local_chunk = cmt.CUDAMatrix(local_chunk, copy_on_host=False)
local_chunk = local_chunk.dot(spatial_whitening).asarray()
else:
local_chunk = numpy.dot(local_chunk, spatial_whitening)
if do_temporal_whitening:
local_chunk = scipy.ndimage.filters.convolve1d(local_chunk, temporal_whitening, axis=0, mode='constant')
#print "Extracting the peaks..."
all_peaktimes = numpy.zeros(0, dtype=numpy.uint32)
all_extremas = numpy.zeros(0, dtype=numpy.uint32)
if matched_filter:
if sign_peaks in ['positive', 'both']:
filter_chunk = scipy.ndimage.filters.convolve1d(local_chunk, waveform_pos, axis=0, mode='constant')
for i in range(N_e):
peaktimes = algo.detect_peaks(filter_chunk[:, i], matched_tresholds_pos[i], mpd=dist_peaks)
all_peaktimes = numpy.concatenate((all_peaktimes, peaktimes))
all_extremas = numpy.concatenate((all_extremas, i*numpy.ones(len(peaktimes), dtype=numpy.uint32)))
if sign_peaks in ['negative', 'both']:
filter_chunk = scipy.ndimage.filters.convolve1d(local_chunk, waveform_neg, axis=0, mode='constant')
for i in range(N_e):
peaktimes = algo.detect_peaks(filter_chunk[:, i], matched_tresholds_neg[i], mpd=dist_peaks)
all_peaktimes = numpy.concatenate((all_peaktimes, peaktimes))
all_extremas = numpy.concatenate((all_extremas, i*numpy.ones(len(peaktimes), dtype=numpy.uint32)))
else:
for i in range(N_e):
if sign_peaks == 'negative':
peaktimes = algo.detect_peaks(local_chunk[:, i], thresholds[i], valley=True, mpd=1) #mpd=dist_peaks
elif sign_peaks == 'positive':
peaktimes = algo.detect_peaks(local_chunk[:, i], thresholds[i], valley=False, mpd=20)
elif sign_peaks == 'both':
#------------------------------------MEG-SPIKES--------------------------------------------------#
if i == 144:
plot_ts = False
else: plot_ts = False
peaktimes = algo.detect_peaks(local_chunk[:, i], thresholds[i], valley=False, mpd=dist_peaks, show=plot_ts, chunk_number=gidx) #numpy.abs(local_chunk[:, i])
#------------------------------------MEG-SPIKES--------------------------------------------------#
all_peaktimes = numpy.concatenate((all_peaktimes, peaktimes))
all_extremas = numpy.concatenate((all_extremas, i*numpy.ones(len(peaktimes), dtype=numpy.uint32)))
#print "Removing the useless borders..."
if alignment:
local_borders = (template_shift_2, local_shape - template_shift_2)
else:
local_borders = (template_shift, local_shape - template_shift)
idx = (all_peaktimes >= local_borders[0]) & (all_peaktimes < local_borders[1])
all_peaktimes = numpy.compress(idx, all_peaktimes)
all_extremas = numpy.compress(idx, all_extremas)
local_peaktimes = numpy.unique(all_peaktimes)
local_offset = t_offset
if ignore_dead_times:
indices = numpy.searchsorted(all_dead_times, [t_offset, t_offset + local_shape])
if indices[0] != indices[1]:
local_peaktimes = numpy.array(list(set(local_peaktimes + t_offset).difference(all_dead_times[indices[0]:indices[1]])), dtype=numpy.uint32) - t_offset
local_peaktimes = numpy.sort(local_peaktimes)
if len(local_peaktimes) > 0:
diff_times = local_peaktimes[-1]-local_peaktimes[0]
all_times = numpy.zeros((N_e, diff_times+1), dtype=numpy.bool)
min_times = numpy.maximum(local_peaktimes - local_peaktimes[0] - safety_time, 0)
max_times = numpy.minimum(local_peaktimes - local_peaktimes[0] + safety_time + 1, diff_times)
n_times = len(local_peaktimes)
argmax_peak = numpy.random.permutation(numpy.arange(n_times))
all_idx = numpy.take(local_peaktimes, argmax_peak)
if gpass > 1:
for elec in range(N_e):
subset = result['all_times_' + str(elec)] - local_offset
peaks = numpy.compress((subset >= 0) & (subset < (local_shape)), subset)
inter = numpy.in1d(local_peaktimes, peaks)
indices = numpy.take(inv_nodes, edges[nodes[elec]])
remove = numpy.where(inter == True)[0]
for t in remove:
if safety_space:
all_times[indices, min_times[t]:max_times[t]] = True
else:
all_times[elec, min_times[t]:max_times[t]] = True
#print "Selection of the peaks with spatio-temporal masks..."
for midx, peak in zip(argmax_peak, all_idx):
if elt_count == loop_nb_elts:
break
if sign_peaks == 'negative':
elec = numpy.argmin(local_chunk[peak])
negative_peak = True
loc_peak = 'neg'
elif sign_peaks == 'positive':
elec = numpy.argmax(local_chunk[peak])
negative_peak = False
loc_peak = 'pos'
elif sign_peaks == 'both':
if N_e == 1:
if local_chunk[peak] < 0:
negative_peak = True
loc_peak = 'neg'
elif local_chunk[peak] > 0:
negative_peak = False
loc_peak = 'pos'
elec = 0
else:
if numpy.abs(numpy.max(local_chunk[peak])) > numpy.abs(numpy.min(local_chunk[peak])):
elec = numpy.argmax(local_chunk[peak])
negative_peak = False
loc_peak = 'pos'
else:
elec = numpy.argmin(local_chunk[peak])
negative_peak = True
loc_peak = 'neg'
if ((gpass > 1) or (numpy.mod(elec, comm.size) == comm.rank)):
indices = numpy.take(inv_nodes, edges[nodes[elec]])
if safety_space:
myslice = all_times[indices, min_times[midx]:max_times[midx]]
else:
myslice = all_times[elec, min_times[midx]:max_times[midx]]
is_local_extrema = elec in all_extremas[all_peaktimes == peak]
if is_local_extrema and not myslice.any():
to_accept = False
if gpass == 1:
to_update = result['data_%s_' %loc_peak + str(elec)]
else:
to_update = result['tmp_%s_' %loc_peak + str(elec)]
if len(to_update) < loop_max_elts_elec:
if alignment:
idx = elec_positions[elec]
zdata = numpy.take(local_chunk[peak - template_shift_2:peak + template_shift_2 + 1], indices, axis=1)
ydata = numpy.arange(len(indices))
if len(ydata) == 1:
#if False:
# smoothing_factor = smoothing_factor*xdata.size*mads[elec]**2
# f = scipy.interpolate.UnivariateSpline(xdata, zdata, s=smoothing_factor, k=3)
#else:
f = scipy.interpolate.UnivariateSpline(xdata, zdata, k=3, s=0)
if negative_peak:
rmin = (numpy.argmin(f(cdata)) - xoff)/over_factor
else:
rmin = (numpy.argmax(f(cdata)) - xoff)/over_factor
ddata = numpy.linspace(rmin - template_shift, rmin + template_shift, N_t)
sub_mat = f(ddata).astype(numpy.float32).reshape(N_t, 1)
else:
#if False:
# smoothing_factor = smoothing_factor*zdata.size*numpy.median(mads[indices])**2
# f = scipy.interpolate.RectBivariateSpline(xdata, ydata, zdata, s=smoothing_factor, kx=3, ky=1)
#else:
f = scipy.interpolate.RectBivariateSpline(xdata, ydata, zdata, kx=3, ky=1, s=0)
if negative_peak:
rmin = (numpy.argmin(f(cdata, idx)[:, 0]) - xoff)/over_factor
else:
rmin = (numpy.argmax(f(cdata, idx)[:, 0]) - xoff)/over_factor
ddata = numpy.linspace(rmin - template_shift, rmin + template_shift, N_t)
sub_mat = f(ddata, ydata).astype(numpy.float32)
else:
sub_mat = numpy.take(local_chunk[peak - template_shift:peak + template_shift+1], indices, axis=1)
if use_hanning:
sub_mat = (sub_mat.T*hanning_filter).T
if isolation:
is_isolated = numpy.all(numpy.max(numpy.abs(sub_mat[yoff]), 0) <= thresholds[indices])
to_accept = False
else:
is_isolated = True
if is_isolated:
if gpass == 0:
to_accept = True
idx = elec_positions[elec]
ext_amp = sub_mat[template_shift, idx]
result['tmp_%s_' %loc_peak + str(elec)] = numpy.concatenate((result['tmp_%s_' %loc_peak + str(elec)], ext_amp))
elif gpass == 1:
if smart_searches[loc_peak][elec] > 0:
idx = elec_positions[elec]
ext_amp = sub_mat[template_shift, idx]
idx = numpy.searchsorted(result['bounds_%s_' %loc_peak + str(elec)], ext_amp, 'right') - 1
to_keep = result['hist_%s_' %loc_peak + str(elec)][idx] < numpy.random.rand()
if to_keep:
to_accept = True
else:
rejected += 1
else:
to_accept = True
if to_accept:
sub_mat = numpy.dot(basis['rec_%s' %loc_peak], sub_mat)
nx, ny = sub_mat.shape
sub_mat = sub_mat.reshape((1, nx * ny))
result['data_%s_' %loc_peak + str(elec)] = numpy.vstack((result['data_%s_' %loc_peak + str(elec)], sub_mat))
else:
sub_mat = numpy.dot(basis['rec_%s' %loc_peak], sub_mat)
nx, ny = sub_mat.shape
sub_mat = sub_mat.reshape((1, nx * ny))
to_accept = True
result['tmp_%s_' %loc_peak + str(elec)] = numpy.vstack((result['tmp_%s_' %loc_peak + str(elec)], sub_mat))
if to_accept:
elt_count += 1
if gpass >= 1:
to_add = numpy.array([peak + local_offset], dtype=numpy.uint32)
result['loc_times_' + str(elec)] = numpy.concatenate((result['loc_times_' + str(elec)], to_add))
if gpass == 1:
result['peaks_' + str(elec)] = numpy.concatenate((result['peaks_' + str(elec)], [int(negative_peak)]))
if safety_space:
all_times[indices, min_times[midx]:max_times[midx]] = True
else:
all_times[elec, min_times[midx]:max_times[midx]] = True
if gpass == 0:
for i in range(comm.rank, N_e, comm.size):
for p in search_peaks:
if len(result['tmp_%s_' %p + str(i)]) < loop_max_elts_elec:
result['nb_chunks_%s_' %p + str(i)] += 1
comm.Barrier()
sys.stderr.flush()
print_and_log(['Node %d has collected %d spikes and rejected %d spikes' % (comm.rank, elt_count, rejected)], 'debug', logger)
gdata = all_gather_array(numpy.array([elt_count], dtype=numpy.float32), comm, 0)
gdata2 = gather_array(numpy.array([rejected], dtype=numpy.float32), comm, 0)
nb_elements = numpy.int64(numpy.sum(gdata))
nb_rejected = numpy.int64(numpy.sum(gdata2))
nb_total = numpy.int64(nb_elts*comm.size)
if ((smart_search and (gpass == 0)) or (not smart_search and (gpass == 1))) and nb_elements == 0:
if comm.rank == 0:
print_and_log(['No waveforms found! Are the data properly loaded??'], 'error', logger)
sys.exit(0)
if nb_elements == 0:
gpass = nb_repeats
if comm.rank == 0:
if gpass != 1:
if isolation:
print_and_log(["We found %d isolated spikes over %d requested" %(nb_elements, nb_total)], 'default', logger)
else:
print_and_log(["We found %d spikes over %d requested" %(nb_elements, nb_total)], 'default', logger)
if nb_elements == 0:
print_and_log(["No more isolated spikes in the recording, stop searching"], 'info', logger)
else:
if isolation:
print_and_log(["We found %d isolated spikes over %d requested (%d rejected)" %(nb_elements, nb_total, nb_rejected)], 'default', logger)
else:
print_and_log(["We found %d spikes over %d requested (%d rejected)" %(nb_elements, nb_total, nb_rejected)], 'default', logger)
if nb_elements < 0.2*nb_total:
few_elts = True
#CLUSTERING: once we have been through enough chunks (we don't need all of them), we run a clustering for each electrode.
#print "Clustering the data..."
local_nb_clusters = 0
local_hits = 0
local_mergings = 0
cluster_results = {}
for p in search_peaks:
cluster_results[p] = {}
if gpass > 1:
for ielec in range(N_e):
for p in search_peaks:
result['tmp_%s_' %p + str(ielec)] = gather_array(result['tmp_%s_' %p + str(ielec)], comm, numpy.mod(ielec, comm.size), 1, compress=blosc_compress)
elif gpass == 1:
for ielec in range(comm.rank, N_e, comm.size):
result['times_' + str(ielec)] = numpy.copy(result['loc_times_' + str(ielec)])
if comm.rank == 0:
if gpass == 0:
print_and_log(["Estimating amplitudes distributions..."], 'default', logger)
elif gpass == 1:
print_and_log(["Computing density estimations..."], 'default', logger)
else:
print_and_log(["Refining density estimations..."], 'default', logger)
if not os.path.exists(plot_path):
os.makedirs(plot_path)
if gpass == 1:
dist_file = tempfile.NamedTemporaryFile()
tmp_file = os.path.join(tmp_path_loc, os.path.basename(dist_file.name)) + '.hdf5'
dist_file.close()
result['dist_file'] = tmp_file
tmp_h5py = h5py.File(result['dist_file'], 'w', libver='earliest')
print_and_log(["Node %d will use temp file %s" %(comm.rank, tmp_file)], 'debug', logger)
elif gpass > 1:
tmp_h5py = h5py.File(result['dist_file'], 'r', libver='earliest')
to_explore = list(range(comm.rank, N_e, comm.size))
sys.stderr.flush()
if (comm.rank == 0) and gpass == nb_repeats:
print_and_log(["Running density-based clustering..."], 'default', logger)
to_explore = get_tqdm_progressbar(to_explore)
for ielec in to_explore:
for p in search_peaks:
cluster_results[p][ielec] = {}
if gpass == 0:
if len(result['tmp_%s_' %p + str(ielec)]) > 1:
# Need to estimate the number of spikes
ratio = nb_chunks / float(result['nb_chunks_%s_' %p + str(ielec)])
ampmin, ampmax = numpy.min(result['tmp_%s_' %p + str(ielec)]), numpy.max(result['tmp_%s_' %p + str(ielec)])
if p == 'pos':
if matched_filter:
bound = matched_tresholds_pos[ielec]
else:
bound = thresholds[ielec]
if bound < ampmax:
bins = [-numpy.inf] + numpy.linspace(bound, ampmax, nb_ss_bins - 1).tolist() + [numpy.inf]
else:
bins = [-numpy.inf] + numpy.linspace(bound, bound*5, nb_ss_bins - 1).tolist() + [numpy.inf]
elif p == 'neg':
if matched_filter:
bound = -matched_tresholds_neg[ielec]
else:
bound = -thresholds[ielec]
if ampmin < bound:
bins = [-numpy.inf] + numpy.linspace(ampmin, bound, nb_ss_bins - 1).tolist() + [numpy.inf]
else:
bins = [-numpy.inf] + numpy.linspace(5*bound, bound, nb_ss_bins - 1).tolist() + [numpy.inf]
a, b = numpy.histogram(result['tmp_%s_' %p + str(ielec)], bins)
nb_spikes = numpy.sum(a)
a = a/float(nb_spikes)
z = a[a > 0]
c = 1./numpy.min(z)
d = (1./(c*a))
d = numpy.minimum(1, d)
d /= numpy.sum(d)
twist = numpy.sum(a*d)
factor = twist*c
rejection_curve = numpy.minimum(0.95, factor*a)
if ratio > 1:
target_max = 1 - (1 - rejection_curve.max())/ratio
rejection_curve *= target_max/(rejection_curve.max())
result['hist_%s_'%p + str(ielec) ] = rejection_curve
result['bounds_%s_' %p + str(ielec)] = b
# if make_plots not in ['None', '']:
# save = [plot_path, '%s_%d.%s' %(p, ielec, make_plots)]
# plot.view_rejection(a, b[1:], result['hist_%s_'%p + str(ielec)], save=save)
else:
smart_searches[p][ielec] = 0
if smart_searches[p][ielec] > 0:
print_and_log(['Smart search is actived on channel %d' % ielec], 'debug', logger)
elif gpass == 1:
if len(result['data_%s_' %p + str(ielec)]) >= 1:
if result['pca_%s_' %p + str(ielec)] is None:
pca = PCA(sub_output_dim)
pca.fit(result['data_%s_' %p + str(ielec)])
result['pca_%s_' %p + str(ielec)] = pca.components_.T.astype(numpy.float32)
print_and_log(["The percentage of variance explained by local PCA on electrode %d is %s"
%(ielec, numpy.sum(pca.explained_variance_ratio_))], 'debug', logger)
if result['pca_%s_' %p + str(ielec)].shape[1] < sub_output_dim:
zeros = numpy.zeros((result['pca_%s_' %p + str(ielec)].shape[0], sub_output_dim - result['pca_%s_' %p + str(ielec)].shape[1]))
result['pca_%s_' %p + str(ielec)] = numpy.hstack((result['pca_%s_' %p + str(ielec)], zeros))
result['sub_%s_' %p + str(ielec)] = numpy.dot(result['data_%s_' %p + str(ielec)], result['pca_%s_' %p + str(ielec)])
rho, dist, sdist = algo.compute_rho(result['sub_%s_' %p + str(ielec)], mratio=m_ratio)
result['rho_%s_' %p + str(ielec)] = rho
result['sdist_%s_' %p + str(ielec)] = sdist
if hdf5_compress:
tmp_h5py.create_dataset('dist_%s_' %p + str(ielec), data=dist.distances, chunks=True, compression='gzip')
else:
tmp_h5py.create_dataset('dist_%s_' %p + str(ielec), data=dist.distances, chunks=True)
del dist, rho
else:
if result['pca_%s_' %p + str(ielec)] is None:
n_neighb = len(edges[nodes[ielec]])
dimension = basis['proj_%s' %p].shape[1] * n_neighb
result['pca_%s_' %p + str(ielec)] = numpy.zeros((dimension, sub_output_dim), dtype=numpy.float32)
result['pca_%s_' %p + str(ielec)][numpy.arange(sub_output_dim), numpy.arange(sub_output_dim)] = 1
result['rho_%s_' %p + str(ielec)] = numpy.zeros((0), dtype=numpy.float32)
result['sub_%s_' %p + str(ielec)] = numpy.zeros((0, sub_output_dim), dtype=numpy.float32)
result['sdist_%s_' %p + str(ielec)] = numpy.zeros((0), dtype=numpy.float32)
else:
if len(result['tmp_%s_' %p + str(ielec)]) > 1:
data = numpy.dot(result['tmp_%s_' %p + str(ielec)], result['pca_%s_' %p + str(ielec)])
rho, sdist = algo.compute_rho(result['sub_%s_' %p + str(ielec)], update=(data, result['sdist_%s_' %p + str(ielec)]), mratio=m_ratio)
result['rho_%s_' %p + str(ielec)] = rho
result['sdist_%s_' %p + str(ielec)] = sdist
del rho
if gpass == nb_repeats:
result.pop('tmp_%s_' %p + str(ielec))
n_data = len(result['data_%s_' %p + str(ielec)])
n_min = numpy.maximum(n_abs_min, int(nclus_min*n_data))
if p == 'pos':
flag = 'positive'
elif p == 'neg':
flag = 'negative'
if (n_data > 1):
dist = tmp_h5py.get('dist_%s_' %p + str(ielec))[:]
result['rho_%s_' %p + str(ielec)] = -result['rho_%s_' %p + str(ielec)] + result['rho_%s_' %p + str(ielec)].max()
cluster_results[p][ielec]['groups'], r, d, c = algo.clustering_by_density(result['rho_%s_' %p + str(ielec)], dist,
n_min=n_min, alpha=sensitivity)
# Now we perform a merging step, for clusters that look too similar
data = result['sub_%s_' %p + str(ielec)]
cluster_results[p][ielec]['groups'], merged = algo.merging(cluster_results[p][ielec]['groups'],
sim_same_elec,
data)
idx_clusters, counts = numpy.unique(cluster_results[p][ielec]['groups'], return_counts=True)
for label, cluster_size in zip(idx_clusters, counts):
if cluster_size < n_min:
tmp = cluster_results[p][ielec]['groups'] == label
cluster_results[p][ielec]['groups'][tmp] = -1
if make_plots not in ['None', '']:
save = [plot_path, '%s_%d.%s' %(p, ielec, make_plots)]
injected = None
if test_clusters:
injected = numpy.zeros(len(result['data_%s_' %p + str(ielec)]), dtype=numpy.bool)
key = 'spikes_' + str(ielec)
thresh = 2
if key in injected_spikes:
for icount, spike in enumerate(result['loc_times_' + str(ielec)]):
idx = numpy.where(numpy.abs(spike - injected_spikes['spikes_' + str(ielec)]) < thresh)[0]
if len(idx) > 0:
if icount < (len(injected) - 1):
injected[icount] = True
data = numpy.dot(result['data_%s_' %p + str(ielec)], result['pca_%s_' %p + str(ielec)])
plot.view_clusters(data, r, d, c,
cluster_results[p][ielec]['groups'], injected=injected,
save=save, alpha=sensitivity)
keys = ['loc_times_' + str(ielec), 'all_times_' + str(ielec), 'rho_%s_' %p + str(ielec)]
for key in keys:
if key in result:
result.pop(key)
mask = numpy.where(cluster_results[p][ielec]['groups'] > -1)[0]
cluster_results[p][ielec]['n_clus'] = len(numpy.unique(cluster_results[p][ielec]['groups'][mask]))
n_clusters = []
result['clusters_%s_' %p + str(ielec)] = cluster_results[p][ielec]['groups']
for i in numpy.unique(cluster_results[p][ielec]['groups'][mask]):
n_clusters += [numpy.sum(cluster_results[p][ielec]['groups'][mask] == i)]
line = ["Node %d: %d-%d %s templates on channel %d from %d spikes: %s" %(comm.rank, merged[0], merged[1], flag, ielec, n_data, str(n_clusters))]
print_and_log(line, 'debug', logger)
local_mergings += merged[1]
del dist, r, d, c
else:
cluster_results[p][ielec]['groups'] = numpy.zeros(0, dtype=numpy.int32)
cluster_results[p][ielec]['n_clus'] = 0
result['clusters_%s_' %p + str(ielec)] = numpy.zeros(0, dtype=numpy.int32)
line = ["Node %d: not enough %s spikes on channel %d" %(comm.rank, flag, ielec)]
print_and_log(line, 'debug', logger)
local_nb_clusters += cluster_results[p][ielec]['n_clus']
if gpass >= 1:
tmp_h5py.close()
gpass += 1
sys.stderr.flush()
try:
os.remove(result['dist_file'])
except Exception:
pass
comm.Barrier()
gdata = gather_array(numpy.array([local_hits], dtype=numpy.float32), comm, 0)
gdata2 = gather_array(numpy.array([local_mergings], dtype=numpy.float32), comm, 0)
gdata3 = gather_array(numpy.array([local_nb_clusters], dtype=numpy.float32), comm, 0)
mean_channels = 0
if comm.rank == 0:
total_hits = int(numpy.sum(gdata))
total_mergings = int(numpy.sum(gdata2))
total_nb_clusters = int(numpy.sum(gdata3))
lines = ["Number of clusters found : %d" %total_nb_clusters,
"Number of local merges : %d" %total_mergings]
if few_elts:
lines += ["Not enough spikes gathered: -put safety_space=False?"]
if numpy.any(sdata > 0):
lines += [" -remove smart_search?"]
if isolation:
lines += [" -remove isolation mode?"]
print_and_log(lines, 'info', logger)
print_and_log(["Estimating the templates with the %s procedure ..." %extraction], 'default', logger)
if extraction in ['median-raw', 'median-pca', 'mean-raw', 'mean-pca']:
total_nb_clusters = int(comm.bcast(numpy.array([int(numpy.sum(gdata3))], dtype=numpy.int32), root=0)[0])
offsets = numpy.zeros(comm.size, dtype=numpy.int32)
for i in range(comm.size-1):
offsets[i+1] = comm.bcast(numpy.array([local_nb_clusters], dtype=numpy.int32), root=i)
node_pad = numpy.sum(offsets[:comm.rank+1])
if parallel_hdf5:
hfile = h5py.File(file_out_suff + '.templates.hdf5', 'w', driver='mpio', comm=comm, libver='earliest')
norms = hfile.create_dataset('norms', shape=(2*total_nb_clusters, ), dtype=numpy.float32, chunks=True)
electrodes = hfile.create_dataset('electrodes', shape=(total_nb_clusters, ), dtype=numpy.int32, chunks=True)
amps_lims = hfile.create_dataset('limits', shape=(total_nb_clusters, 2), dtype=numpy.float32, chunks=True)
g_count = node_pad
g_offset = total_nb_clusters
else:
hfile = h5py.File(file_out_suff + '.templates-%d.hdf5' %comm.rank, 'w', libver='earliest')
electrodes = hfile.create_dataset('electrodes', shape=(local_nb_clusters, ), dtype=numpy.int32, chunks=True)
norms = hfile.create_dataset('norms', shape=(2*local_nb_clusters, ), dtype=numpy.float32, chunks=True)
amps_lims = hfile.create_dataset('limits', shape=(local_nb_clusters, 2), dtype=numpy.float32, chunks=True)
g_count = 0
g_offset = local_nb_clusters
temp_x = numpy.zeros(0, dtype=numpy.uint32)
temp_y = numpy.zeros(0, dtype=numpy.uint32)
temp_data = numpy.zeros(0, dtype=numpy.float32)
shifted_templates = numpy.zeros(0, dtype=numpy.int32)
comm.Barrier()
cfile = h5py.File(file_out_suff + '.clusters-%d.hdf5' %comm.rank, 'w', libver='earliest')
count_templates = node_pad
data_file.close()
to_explore = range(comm.rank, N_e, comm.size)
if (comm.rank == 0):
to_explore = get_tqdm_progressbar(to_explore)
for ielec in to_explore:
n_neighb = len(edges[nodes[ielec]])
indices = inv_nodes[edges[nodes[ielec]]]
for p in search_peaks:
#print "Dealing with cluster", ielec
n_data = len(result['data_%s_' %p + str(ielec)])
data = result['data_%s_' %p + str(ielec)].reshape(n_data, basis['proj_%s' %p].shape[1], n_neighb)
loc_pad = count_templates
myamps = []
mask = numpy.where(cluster_results[p][ielec]['groups'] > -1)[0]
if p == 'pos':
myslice2 = numpy.where(result['peaks_' + str(ielec)] == 0)[0]
elif p == 'neg':
myslice2 = numpy.where(result['peaks_' + str(ielec)] == 1)[0]
loc_times = numpy.take(result['times_' + str(ielec)], myslice2)
loc_clusters = numpy.take(cluster_results[p][ielec]['groups'], mask)
for group in numpy.unique(loc_clusters):
electrodes[g_count] = ielec
myslice = numpy.where(cluster_results[p][ielec]['groups'] == group)[0]
if extraction == 'median-pca':
sub_data = numpy.take(data, myslice, axis=0)
first_component = numpy.median(sub_data, axis=0)
tmp_templates = numpy.dot(first_component.T, basis['rec_%s' %p])
elif extraction == 'mean-pca':
sub_data = numpy.take(data, myslice, axis=0)
first_component = numpy.mean(sub_data, axis=0)
tmp_templates = numpy.dot(first_component.T, basis['rec_%s' %p])
elif extraction == 'median-raw':
labels_i = numpy.random.permutation(myslice)[:min(len(myslice), 250)]
times_i = numpy.take(loc_times, labels_i)
sub_data = io.get_stas(params, times_i, labels_i, ielec, neighs=indices, nodes=nodes, pos=p)
first_component = numpy.median(sub_data, 0)
tmp_templates = first_component
elif extraction == 'mean-raw':
labels_i = numpy.random.permutation(myslice)[:min(len(myslice), 250)]
times_i = numpy.take(loc_times, labels_i)
sub_data = io.get_stas(params, times_i, labels_i, ielec, neighs=indices, nodes=nodes, pos=p) #io.get_stas(sub_data, times_i, labels_i, ielec, neighs=indices, nodes=nodes, pos=p)
first_component = numpy.mean(sub_data, 0)
tmp_templates = first_component
if p == 'neg':
tmpidx = divmod(tmp_templates.argmin(), tmp_templates.shape[1])
elif p == 'pos':
tmpidx = divmod(tmp_templates.argmax(), tmp_templates.shape[1])
shift = template_shift - tmpidx[1]
if np.abs(shift) > template_shift / 4:
shifted_templates = numpy.concatenate((shifted_templates, numpy.array([count_templates], dtype='int32')))
myamps += [[0, 10]]
else:
templates = numpy.zeros((N_e, N_t), dtype=numpy.float32)
if shift > 0:
templates[indices, shift:] = tmp_templates[:, :-shift]
elif shift < 0:
templates[indices, :shift] = tmp_templates[:, -shift:]
else:
templates[indices, :] = tmp_templates
mean_channels += len(indices)
if comp_templates:
to_delete = []
for i in indices:
if (numpy.abs(templates[i, :]).max() < 0.5*(thresholds[i]/spike_thresh)):
templates[i, :] = 0
to_delete += [i]
mean_channels -= len(to_delete)
templates = templates.ravel()
dx = templates.nonzero()[0].astype(numpy.uint32)
temp_x = numpy.concatenate((temp_x, dx))
temp_y = numpy.concatenate((temp_y, count_templates*numpy.ones(len(dx), dtype=numpy.uint32)))
temp_data = numpy.concatenate((temp_data, templates[dx]))
norms[g_count] = numpy.sqrt(numpy.sum(templates.ravel()**2)/(N_e*N_t))
x, y, z = sub_data.shape
sub_data_flat = sub_data.reshape(x, y*z)
first_flat = first_component.reshape(y*z, 1)
amplitudes = numpy.dot(sub_data_flat, first_flat)
amplitudes /= numpy.sum(first_flat**2)
variation = numpy.median(numpy.abs(amplitudes - numpy.median(amplitudes)))
physical_limit = noise_thr*(-thresholds[indices[tmpidx[0]]])/tmp_templates.min()
amp_min = min(0.8, max(physical_limit, numpy.median(amplitudes) - dispersion[0]*variation))
amp_max = max(1.2, numpy.median(amplitudes) + dispersion[1]*variation)
amps_lims[g_count] = [amp_min, amp_max]
myamps += [[amp_min, amp_max]]
for i in range(x):
sub_data_flat[i, :] -= amplitudes[i]*first_flat[:, 0]
if len(sub_data_flat) > 1:
pca = PCA(1)
pca.fit(sub_data_flat)
second_component = pca.components_.T.astype(numpy.float32).reshape(y, z)
else:
second_component = sub_data_flat.reshape(y, z)/numpy.sum(sub_data_flat**2)
if extraction in ['median-pca', 'mean-pca']:
tmp_templates = numpy.dot(second_component.T, basis['rec_%s' %p])
elif extraction in ['median-raw', 'mean-raw']:
tmp_templates = second_component
offset = total_nb_clusters + count_templates
sub_templates = numpy.zeros((N_e, N_t), dtype=numpy.float32)
if shift > 0:
sub_templates[indices, shift:] = tmp_templates[:, :-shift]
elif shift < 0:
sub_templates[indices, :shift] = tmp_templates[:, -shift:]
else:
sub_templates[indices, :] = tmp_templates
if comp_templates:
for i in to_delete:
sub_templates[i, :] = 0
sub_templates = sub_templates.ravel()
dx = sub_templates.nonzero()[0].astype(numpy.uint32)
temp_x = numpy.concatenate((temp_x, dx))
temp_y = numpy.concatenate((temp_y, offset*numpy.ones(len(dx), dtype=numpy.uint32)))
temp_data = numpy.concatenate((temp_data, sub_templates[dx]))
norms[g_count + g_offset] = numpy.sqrt(numpy.sum(sub_templates.ravel()**2)/(N_e*N_t))
count_templates += 1
g_count += 1
if make_plots not in ['None', '']:
if n_data > 1:
save = [plot_path, '%s_%d.%s' %(p, ielec, make_plots)]
idx = numpy.where(indices == ielec)[0][0]
sub_data = numpy.take(data, idx, axis=2)
nb_temp = cluster_results[p][ielec]['n_clus']
vidx = numpy.where((temp_y >= loc_pad) & (temp_y < loc_pad+nb_temp))[0]
sub_tmp = scipy.sparse.csr_matrix((temp_data[vidx], (temp_x[vidx], temp_y[vidx]-loc_pad)), shape=(N_e*N_t, nb_temp))
sub_tmp = sub_tmp.toarray().reshape(N_e, N_t, nb_temp)
sub_tmp = sub_tmp[ielec, :, :]
plot.view_waveforms_clusters(numpy.dot(sub_data, basis['rec_%s' %p]), cluster_results[p][ielec]['groups'],
thresholds[ielec], sub_tmp,
numpy.array(myamps), save=save)
data = numpy.dot(result['data_%s_' %p + str(ielec)], result['pca_%s_' %p + str(ielec)])
result['data_' + str(ielec)] = numpy.concatenate((result['data_' + str(ielec)], data))
if len(result['clusters_' + str(ielec)]) > 0:
max_offset = numpy.int32(numpy.max(result['clusters_' + str(ielec)]) + 1)
else:
max_offset = numpy.int32(0)
mask = result['clusters_%s_' %p + str(ielec)] > -1
result['clusters_%s_' %p + str(ielec)][mask] += max_offset
result['clusters_' + str(ielec)] = numpy.concatenate((result['clusters_' + str(ielec)], result['clusters_%s_' %p + str(ielec)]))
del data
all_indices = numpy.zeros(0, dtype=numpy.uint32)
for p in search_peaks:
if p == 'pos':
target = 0
elif p == 'neg':
target = 1
all_indices = numpy.concatenate((all_indices, numpy.where(result['peaks_' + str(ielec)] == target)[0]))
result['times_' + str(ielec)] = result['times_' + str(ielec)][all_indices]
result['peaks_' + str(ielec)] = result['peaks_' + str(ielec)][all_indices]
io.write_datasets(cfile, to_write, result, ielec, compression=hdf5_compress)
#At the end we should have a templates variable to store.
cfile.close()
del result, amps_lims
sys.stderr.flush()
comm.Barrier()
if local_nb_clusters > 0:
mean_channels /= local_nb_clusters
gdata4 = gather_array(numpy.array([mean_channels], dtype=numpy.float32), comm)
shifted_templates = all_gather_array(shifted_templates, comm, 0, dtype='int32')
if comm.rank == 0:
idx = numpy.where(gdata4 != 0)[0]
mean_channels = numpy.mean(gdata4[idx])
if mean_channels < 3 and params.getfloat('clustering', 'cc_merge') != 1:
print_and_log(["Templates on few channels only, cc_merge should be 1"], 'info', logger)
#We need to gather the sparse arrays
temp_x = gather_array(temp_x, comm, dtype='uint32', compress=blosc_compress)
temp_y = gather_array(temp_y, comm, dtype='uint32', compress=blosc_compress)
temp_data = gather_array(temp_data, comm, compress=blosc_compress)
if parallel_hdf5:
if comm.rank == 0:
rs = [h5py.File(file_out_suff + '.clusters-%d.hdf5' %i, 'r', libver='earliest') for i in range(comm.size)]
cfile = h5py.File(file_out_suff + '.clusters.hdf5', 'w', libver='earliest')
io.write_datasets(cfile, ['electrodes'], {'electrodes' : electrodes[:]})
for i in range(comm.size):
for j in range(i, N_e, comm.size):
io.write_datasets(cfile, to_write, rs[i], j, compression=hdf5_compress)
rs[i].close()
os.remove(file_out_suff + '.clusters-%d.hdf5' %i)
cfile.close()
hfile.close()
else:
hfile.close()
comm.Barrier()
if comm.rank == 0:
ts = [h5py.File(file_out_suff + '.templates-%d.hdf5' %i, 'r', libver='earliest') for i in range(comm.size)]
rs = [h5py.File(file_out_suff + '.clusters-%d.hdf5' %i, 'r', libver='earliest') for i in range(comm.size)]
result = {}
hfile = h5py.File(file_out_suff + '.templates.hdf5', 'w', libver='earliest')
cfile = h5py.File(file_out_suff + '.clusters.hdf5', 'w', libver='earliest')
electrodes = hfile.create_dataset('electrodes', shape=(total_nb_clusters, ), dtype=numpy.int32, chunks=True)
norms = hfile.create_dataset('norms', shape=(2*total_nb_clusters, ), dtype=numpy.float32, chunks=True)
amplitudes = hfile.create_dataset('limits', shape=(total_nb_clusters, 2), dtype=numpy.float32, chunks=True)
count = 0
for i in range(comm.size):
loc_norms = ts[i].get('norms')
middle = len(loc_norms)//2
norms[count:count+middle] = loc_norms[:middle]
norms[total_nb_clusters+count:total_nb_clusters+count+middle] = loc_norms[middle:]
electrodes[count:count+middle] = ts[i].get('electrodes')
amplitudes[count:count+middle] = ts[i].get('limits')
count += middle
for j in range(i, N_e, comm.size):
io.write_datasets(cfile, to_write, rs[i], j, compression=hdf5_compress)
ts[i].close()
rs[i].close()
os.remove(file_out_suff + '.templates-%d.hdf5' %i)
os.remove(file_out_suff + '.clusters-%d.hdf5' %i)
hfile.flush() # we need to flush otherwise electrodes[:] refers to zeros and not the real values
io.write_datasets(cfile, ['electrodes'], {'electrodes' : electrodes[:]})
hfile.close()
cfile.close()
if comm.rank == 0:
hfile = h5py.File(file_out_suff + '.templates.hdf5', 'r+', libver='earliest')
if hdf5_compress:
hfile.create_dataset('temp_x', data=temp_x, compression='gzip')
hfile.create_dataset('temp_y', data=temp_y, compression='gzip')
hfile.create_dataset('temp_data', data=temp_data, compression='gzip')
else:
hfile.create_dataset('temp_x', data=temp_x)
hfile.create_dataset('temp_y', data=temp_y)
hfile.create_dataset('temp_data', data=temp_data)
hfile.create_dataset('temp_shape', data=numpy.array([N_e, N_t, 2*total_nb_clusters], dtype=numpy.int32))
hfile.close()
del temp_x, temp_y, temp_data
import gc
gc.collect()
comm.Barrier()
if len(shifted_templates) > 0:
if comm.rank == 0:
print_and_log(["Removing %d strongly shifted templates..." %len(shifted_templates)], 'default', logger)
if comm.rank == 0:
result = io.load_data(params, 'clusters')
else:
result = []
algo.slice_templates(params, to_remove=shifted_templates)
algo.slice_clusters(params, to_remove=shifted_templates, result=result)
del result
comm.Barrier()
total_nb_clusters = int(io.load_data(params, 'nb_templates') // 2)
if total_nb_clusters > 0:
if comm.rank == 0:
print_and_log(["Merging similar templates..."], 'default', logger)
merged1 = algo.merging_cc(params, nb_cpu=nb_cpu, nb_gpu=nb_gpu, use_gpu=use_gpu)
comm.Barrier()
if remove_mixture:
if comm.rank == 0:
print_and_log(["Removing mixtures of templates..."], 'default', logger)
merged2 = algo.delete_mixtures(params, nb_cpu=nb_cpu, nb_gpu=nb_gpu, use_gpu=use_gpu)
else:
merged2 = [0, 0]
else:
merged1 = [0, 0]
merged2 = [0, 0]
if comm.rank == 0:
print_and_log(["Number of global merges : %d" %merged1[1],
"Number of mixtures removed : %d" %merged2[1]], 'info', logger)
comm.Barrier()
io.get_overlaps(params, erase=True, nb_cpu=nb_cpu, nb_gpu=nb_gpu, use_gpu=use_gpu)
| 1.796875 | 2 |
code/replicate.py | tedunderwood/fiction | 21 | 12787982 | #!/usr/bin/env python3
# replicate.py
# This master script gives me a way to record the settings I used for
# various aspects of the article "The Life Cycles of Genres," and
# (I hope) allows other scholars to reproduce the same tests.
# Generally, I've defined a separate function for each part of the
# article that needs replication, and generally, they're listed
# in article order.
# Running this script gives you a menu and allows you to choose
# a test to replicate. Or you could just use this code as a guide
# in order to build your own script that calls logisticpredict with
# settings of your own devising.
# I have to admit that those "settings" are bloody complicated.
# This is a consequence of trying to build a single
# script that can do a whole bunch of different things.
# The "paths" are somewhat straightforward. They tell the script where
# to look for metadata, data files, and a vocabulary of features to
# use in classification (if this last isn't found, it will be created.)
#
# The "exclusions" can focus the model on a particular segment of the timeline.
# The fuss about nonegatives is less important than it seems; in most
# circumstances logisticpredict will automatically exclude tags in the
# positive set from the negative contrast set. The only situation where
# you need to explicitly exclude particular tags is elucidated in the
# function ghastly_stew below. In that case, it's achieved by putting
# a special key in excludeif which excludes tags from the *negative* set,
# (whereas excludeif would ordinarily exclude from the positives.)
#
# The testconditions variable is very important for one key move made
# in the article: extrapolating from a model to another set of volumes.
# This is achieved by defining a set of volumes that are only ever allowed
# to be in the test set; they never appear in the training set.
#
# Unfortunately, the logic I've used is confusing.
# When I provide a pair of dates in testconditions, this actually sets
# the range of dates within which volumes *are* allowed in the training
# set. On the other hand, when I provide a tag in testconditions, this
# defines a tag that *is not* allowed into the training set (unless volumes
# bearing that tag are also qualified as a member of the positive set
# by some other positive tag). This is all done by the function
# get_donttrainset in metafilter.py. Sorry the logic is a bit
# convoluted.
#
# Those are, I think, the gnarliest aspects of this code.
import logisticpredict, comparemodels
import datetime, sys
def ghastly_stew():
## PATHS.
sourcefolder = '../newdata/'
extension = '.fic.tsv'
metadatapath = '../meta/finalmeta.csv'
vocabpath = '../lexicon/new10k.csv'
modelname = 'ghastlystew'
outputpath = '../results/' + modelname + str(datetime.date.today()) + '.csv'
# We can simply exclude volumes from consideration on the basis on any
# metadata category we want, using the dictionaries defined below.
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
allstewgenres = {'cozy', 'hardboiled', 'det100', 'chimyst', 'locdetective', 'lockandkey', 'crime', 'locdetmyst', 'blcrime', 'anatscifi', 'locscifi', 'chiscifi', 'femscifi', 'stangothic', 'pbgothic', 'lochorror', 'chihorror', 'locghost'}
# We have to explicitly exclude genres because the category "stew" in the
# positive category wouldn't otherwise automatically exclude the constituent
# tags that were used to create it.
# I would just have put all those tags in the positive tag list, but then you'd lose
# the ability to explicitly balance equal numbers of crime, gothic,
# and science fiction, plus sensation novels. You'd get a list dominated by
# the crime categories, which are better-represented in the dataset.
excludeif['negatives'] = allstewgenres
sizecap = 250
# CLASSIFY CONDITIONS
# We ask the user for a list of categories to be included in the positive
# set, as well as a list for the negative set. Default for the negative set
# is to include all the "random"ly selected categories. Note that random volumes
# can also be tagged with various specific genre tags; they are included in the
# negative set only if they lack tags from the positive set.
positive_tags = ['stew']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
def make_paths(modelname):
'''
Makes a pathtuple using a model name and a default set of
paths to feature-vocab and metadata files.
'''
sourcefolder = '../newdata/'
extension = '.fic.tsv'
metadatapath = '../meta/finalmeta.csv'
vocabpath = '../lexicon/new10k.csv'
# These words will be used as features
outputpath = '../results/' + modelname + str(datetime.date.today()) + '.csv'
return (sourcefolder, extension, metadatapath, outputpath, vocabpath)
def make_exclusions(startdate, enddate, sizecap, negatives):
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = startdate
excludeabove['firstpub'] = enddate
if negatives != 'nonegatives':
excludeif['negatives'] = set(negatives)
# This is a way to exclude certain tags from the negative contrast set.
return (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
def model_taglist(positive_tags, modelname):
print('We are modeling these positive tags:')
for tag in positive_tags:
print(tag)
sizecap = 1000
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath, vocabpath = paths
exclusions = make_exclusions(0, 2000, sizecap, 'nonegatives')
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
return allvolumes
def model_taglist_within_dates(positive_tags, modelname, mindate, maxdate):
print('We are modeling these positive tags:')
for tag in positive_tags:
print(tag)
sizecap = 1000
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath, vocabpath = paths
exclusions = make_exclusions(mindate, maxdate, sizecap, 'nonegatives')
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
return allvolumes
def project_detective_beyond_date(dividedate):
print('First we create a model of detective fiction only after ' + str(dividedate))
sizecap = 300
modelname = 'detectivejustpost' + str(dividedate)
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath1, vocabpath = paths
exclusions = make_exclusions(dividedate, 2000, sizecap, 'nonegatives')
positive_tags = ['locdetective', 'locdetmyst', 'chimyst', 'det100']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
print()
print('Then we create a model of detective fiction blindly predicting after ' + str(dividedate))
modelname = 'detectivepredictpost' + str(dividedate)
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath2, vocabpath = paths
exclusions = make_exclusions(0, 2001, sizecap, 'nonegatives')
testconditions = {'1700', str(dividedate)}
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the second dataset at 0.5, accuracy is: ', str(rawaccuracy))
print()
# Now we compare the predictions made by these two models, comparing only
# the volumes that are in both models but excluded from the training process
# in the second model.
comparemodels.compare_untrained(outputpath1, outputpath2)
def project_tag_to_another(tagtoproject, tagtarget):
print('First we create a model of ' + tagtarget)
sizecap = 400
modelname = tagtarget + 'byitself'
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath1, vocabpath = paths
exclusions = make_exclusions(0, 2000, sizecap, tagtoproject)
# Note that we exclude tagtoproject from the negative contrast set, so the
# contrast sets for the two models will be identical.
positive_tags = [tagtarget]
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
print()
print('Then we create a model of ' + tagtoproject + ' and use it to predict ' + tagtarget)
modelname = tagtoproject + 'predicts' + tagtarget
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath2, vocabpath = paths
exclusions = make_exclusions(0, 2001, sizecap, 'nonegatives')
positive_tags = [tagtarget, tagtoproject]
testconditions = {tagtarget}
# That's the line that actually excludes tagtarget from training.
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the second dataset at 0.5, accuracy is: ', str(rawaccuracy))
print()
# Now we compare the predictions made by these two models, comparing only
# the volumes that are in both models but excluded from the training process
# in the second model.
comparemodels.compare_untrained(outputpath1, outputpath2)
def project_tags(tagstoproject, tagtargets):
targetstring = ','.join(tagtargets)
projectstring = ','.join(tagstoproject)
print('First we create a model of ' + targetstring)
sizecap = 400
modelname = targetstring + 'byitself'
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath1, vocabpath = paths
exclusions = make_exclusions(0, 2000, sizecap, tagstoproject)
# Note that we exclude tagstoproject from the negative contrast set, so the
# contrast sets for the two models will be identical.
positive_tags = tagtargets
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
print()
print('Then we create a model of ' + projectstring + ' and use it to predict ' + targetstring)
modelname = projectstring + 'predicts' + targetstring
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath2, vocabpath = paths
exclusions = make_exclusions(0, 2000, sizecap, 'nonegatives')
positive_tags = list(tagtargets)
positive_tags.extend(tagstoproject)
testconditions = set(tagtargets)
# That's the line that actually excludes tagtarget from training.
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the second dataset at 0.5, accuracy is: ', str(rawaccuracy))
print()
# Now we compare the predictions made by these two models, comparing only
# the volumes that are in both models but excluded from the training process
# in the second model.
comparemodels.compare_untrained(outputpath1, outputpath2)
def the_red_and_the_black():
sizecap = 140
modelname = 'blackandthered'
paths = make_paths(modelname)
exclusions = make_exclusions(1700, 2001, sizecap, 'nonegatives')
positive_tags = ['teamred']
negative_tags = ['teamblack']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
accuracies = []
for i in range(40):
modelname = 'redandtheblack' + str(i)
paths = make_paths(modelname)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print(rawaccuracy)
accuracies.append(rawaccuracy)
with open('finalaccuracies.csv', mode = 'w', encoding = 'utf-8') as f:
for accuracy in accuracies:
f.write(str(accuracy) + '\n')
def replicate_stew():
sizecap = 140
modelname = 'replicatestew'
paths = make_paths(modelname)
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
allstewgenres = {'cozy', 'hardboiled', 'det100', 'chimyst', 'locdetective', 'lockandkey', 'crime', 'locdetmyst', 'blcrime', 'anatscifi', 'locscifi', 'chiscifi', 'femscifi', 'stangothic', 'pbgothic', 'lochorror', 'chihorror', 'locghost'}
# We have to explicitly exclude genres because the category "stew" in the
# positive category wouldn't otherwise automatically exclude the constituent
# tags that were used to create it.
# I would just have put all those tags in the positive tag list, but then you'd lose
# the ability to explicitly balance equal numbers of crime, gothic,
# and science fiction, plus sensation novels. You'd get a list dominated by
# the crime categories, which are better-represented in the dataset.
excludeif['negatives'] = allstewgenres
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
positive_tags = ['stew']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
accuracies = []
for i in range(20):
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print(rawaccuracy)
accuracies.append(rawaccuracy)
with open('stewaccuracies.csv', mode = 'a', encoding = 'utf-8') as f:
for accuracy in accuracies:
f.write(str(accuracy) + '\n')
def replicate_detective():
sizecap = 140
modelname = 'replicatedet'
paths = make_paths(modelname)
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
# We have to explicitly exclude genres because the category "stew" in the
# positive category wouldn't otherwise automatically exclude the constituent
# tags that were used to create it.
# I would just have put all those tags in the positive tag list, but then you'd lose
# the ability to explicitly balance equal numbers of crime, gothic,
# and science fiction, plus sensation novels. You'd get a list dominated by
# the crime categories, which are better-represented in the dataset.
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
positive_tags = ['locdetective', 'locdetmyst', 'chimyst', 'locdetmyst', 'det100']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
accuracies = []
for i in range(20):
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print(rawaccuracy)
accuracies.append(rawaccuracy)
with open('detaccuracies.csv', mode = 'a', encoding = 'utf-8') as f:
for accuracy in accuracies:
f.write(str(accuracy) + '\n')
def calibrate_detective():
'''
Tests accuracy of classification for detective fiction at different sample
sizes.
'''
modelname = 'calibratedet'
paths = make_paths(modelname)
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
positive_tags = ['locdetective', 'locdetmyst', 'chimyst', 'locdetmyst', 'det100']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
sizes = [5,6,7,8,9,11,13,15,17,18,21,27,29,32,34,36,40,45,50,55,60,65,70,75,80,85,90,100]
# with open('../results/collateddetectiveaccuracies.tsv', mode = 'a', encoding = 'utf-8') as f:
# f.write('sizecap\tavgsize\trawaccuracy\n')
accuracies = []
for sizecap in sizes:
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
trainsizes = []
for vol in allvolumes:
trainsizes.append(vol[11])
# this is unfortunately dependent on the exact way
# logisticpredict formats its output
avgsize = sum(trainsizes) / len(trainsizes)
print(sizecap, avgsize, rawaccuracy)
with open('../final/collateddetaccuracies.tsv', mode = 'a', encoding = 'utf-8') as f:
f.write(str(sizecap) + '\t' + str(avgsize) + '\t' + str(rawaccuracy) + '\n')
return None
def calibrate_stew():
'''
Tests accuracy of classification for ghastly stew at different sample
sizes.
'''
modelname = 'calibratestew'
paths = make_paths(modelname)
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
allstewgenres = {'cozy', 'hardboiled', 'det100', 'chimyst', 'locdetective', 'lockandkey', 'crime', 'locdetmyst', 'blcrime', 'anatscifi', 'locscifi', 'chiscifi', 'femscifi', 'stangothic', 'pbgothic', 'lochorror', 'chihorror', 'locghost'}
# We have to explicitly exclude genres because the category "stew" in the
# positive category wouldn't otherwise automatically exclude the constituent
# tags that were used to create it.
# I would just have put all those tags in the positive tag list, but then you'd lose
# the ability to explicitly balance equal numbers of crime, gothic,
# and science fiction, plus sensation novels. You'd get a list dominated by
# the crime categories, which are better-represented in the dataset.
excludeif['negatives'] = allstewgenres
positive_tags = ['stew']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
sizes = [5,6,7,8,9,11,13,15,17,18,21,27,29,32,34,36,40,45,50,55,60,65,70,75,80,85,90,100]
# with open('../results/collatedstewaccuracies.tsv', mode = 'a', encoding = 'utf-8') as f:
# f.write('sizecap\tavgsize\trawaccuracy\n')
accuracies = []
for sizecap in sizes:
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
trainsizes = []
for vol in allvolumes:
trainsizes.append(vol[11])
# this is unfortunately dependent on the exact way
# logisticpredict formats its output
avgsize = sum(trainsizes) / len(trainsizes)
print(sizecap, avgsize, rawaccuracy)
with open('../final/collatedstewaccuracies.tsv', mode = 'a', encoding = 'utf-8') as f:
f.write(str(sizecap) + '\t' + str(avgsize) + '\t' + str(rawaccuracy) + '\n')
return None
def project_gothic_beyond_date(dividedate):
print('First we create a model of gothic fiction only after ' + str(dividedate))
sizecap = 300
modelname = 'gothicjustpost' + str(dividedate)
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath1, vocabpath = paths
exclusions = make_exclusions(dividedate, 2000, sizecap, 'nonegatives')
positive_tags = ['lochorror', 'pbgothic', 'locghost', 'stangothic', 'chihorror']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
print()
print('Then we create a model of gothic fiction blindly predicting after ' + str(dividedate))
modelname = 'gothicpredictpost' + str(dividedate)
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath2, vocabpath = paths
exclusions = make_exclusions(0, 2001, sizecap, 'nonegatives')
testconditions = {'1700', str(dividedate)}
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the second dataset at 0.5, accuracy is: ', str(rawaccuracy))
print()
# Now we compare the predictions made by these two models, comparing only
# the volumes that are in both models but excluded from the training process
# in the second model.
comparemodels.compare_untrained(outputpath1, outputpath2)
if __name__ == '__main__':
# args = sys.argv
print('Your options include: ')
print(' 1) Model Indiana detective fiction by itself.')
print(' 2) Model LOC detective-esque categories by themselves.')
print(' 3) Model LOC and Indiana together.')
print(' 4) Extrapolate a model of LoC detective fiction to the Indiana exhibition.')
print(' 5) Extrapolate a model of detective fiction beyond a particular date.')
print(' 6) Extrapolate a model of one arbitrary genre tag to another.')
print(' 7) Extrapolate a model of gothic fiction beyond a particular date.')
print(' 8) Extrapolate a model of several tags to several others.')
print(' 9) Run detective prediction at many different sizes.')
print(' 10) Run ghastly stew prediction at many different sizes.')
print(' 11) Try to use detective fiction to predict scifi (fails).')
print(' 12) Model an arbitrary tag against random control set.')
print(' 13) Model all early gothic 1760-1840.')
print(' 14) Model all gothic.')
print(' 15) Model all SF.')
userchoice = int(input('\nyour choice: '))
if userchoice == 1:
tagstomodel = ['det100']
modelname = 'IndianaDetective'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
elif userchoice == 2:
tagstomodel = ['locdetmyst', 'locdetective', 'chimyst']
modelname = 'LOCdetective'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
elif userchoice == 3:
tagstomodel = ['det100', 'locdetmyst', 'locdetective', 'chimyst']
modelname = 'AllDetective'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
elif userchoice == 4:
tagtoproject = ['locdetmyst', 'locdetective', 'chimyst']
tagtarget = ['det100']
project_tags(tagtoproject, tagtarget)
elif userchoice == 5:
dividedate = int(input('date beyond which to project: '))
project_detective_beyond_date(dividedate)
elif userchoice == 6:
tagtoproject = input('tag to project from: ')
tagtarget = input('tag to project onto: ')
project_tag_to_another(tagtoproject, tagtarget)
elif userchoice == 7:
dividedate = int(input('date beyond which to project: '))
project_gothic_beyond_date(dividedate)
elif userchoice == 8:
tagstoproject = input('comma-separated list of tags to model and project from: ')
tagstoproject = [x.strip() for x in tagstoproject.split(',')]
tagtargets = input('comma-separated list of tags project onto: ')
tagtargets = [x.strip() for x in tagtargets.split(',')]
project_tags(tagstoproject, tagtargets)
elif userchoice == 9:
calibrate_detective()
elif userchoice == 10:
calibrate_stew()
elif userchoice == 11:
projectfrom = 'chimyst'
projectonto = 'chiscifi'
project_tag_to_another(projectfrom, projectonto)
elif userchoice == 12:
tagtomodel = input('tag to model (must be in metadata)? ')
tagstomodel = [tagtomodel]
allvolumes = model_taglist(tagstomodel, tagtomodel)
elif userchoice == 13:
tagstomodel = ['stangothic', 'pbgothic', 'lochorror', 'locghost']
allvolumes = model_taglist_within_dates(tagstomodel, 'EarlyGothic', 1760, 1840)
elif userchoice == 14:
tagstomodel = ['stangothic', 'pbgothic', 'lochorror', 'locghost', 'chihorror']
modelname = 'AllGothic'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
elif userchoice == 15:
tagstomodel = ['locscifi', 'femscifi', 'anatscifi', 'chiscifi']
modelname = 'AllSF'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
print('Done.')
| 2.015625 | 2 |
test/conftest.py | BvB93/noodles | 22 | 12787983 | <reponame>BvB93/noodles
import pytest
from test.workflows import workflows
from test.backends import backends
def pytest_addoption(parser):
# parser.addoption("--all", action="store_true",
# help="run all combinations")
parser.addoption(
"--workflow",
help="run test only on specified workflow")
parser.addoption(
"--backend",
help="run test only using specified backend")
def pytest_generate_tests(metafunc):
if 'workflow' in metafunc.fixturenames:
selection = metafunc.config.getoption('workflow')
if selection is None:
metafunc.parametrize(
"workflow", list(workflows.values()),
ids=list(workflows.keys()))
else:
metafunc.parametrize(
"workflow", [workflows[selection]],
ids=[selection])
if 'backend' in metafunc.fixturenames:
selection = metafunc.config.getoption('backend')
if selection is None:
metafunc.parametrize(
"backend", list(backends.values()),
ids=list(backends.keys()))
else:
metafunc.parametrize(
"backend", [backends[selection]],
ids=[selection])
try:
import xenon
except ImportError:
pass
else:
@pytest.fixture(scope="session")
def xenon_server(request):
print("============== Starting Xenon-GRPC server ================")
m = xenon.init(do_not_exit=True, disable_tls=False, log_level='INFO')
yield m
print("============== Closing Xenon-GRPC server =================")
for scheduler in xenon.Scheduler.list_schedulers():
jobs = list(scheduler.get_jobs())
statuses = scheduler.get_job_statuses(jobs)
for status in statuses:
if status.running:
print("xenon job {} still running, cancelling ... "
.format(status.job.id), end='')
try:
status = scheduler.cancel_job(status.job)
if not status.done:
scheduler.wait_until_done(status.job)
except xenon.XenonException:
print("not Ok")
else:
print("Ok")
m.__exit__(None, None, None)
@pytest.fixture
def local_filesystem(request, xenon_server):
fs = xenon.FileSystem.create(adaptor='file')
yield fs
fs.close()
@pytest.fixture
def local_scheduler(request, xenon_server):
scheduler = xenon.Scheduler.create(adaptor='local')
yield scheduler
scheduler.close()
| 2.046875 | 2 |
src/ruv_dl/organize.py | HaukurPall/ruv_dl | 2 | 12787984 | <gh_stars>1-10
import hashlib
import logging
import re
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from ruv_dl.storage import EpisodeDownload
log = logging.getLogger(__name__)
ROMAN_NUMERALS_TO_INT = {
"I": 1,
"II": 2,
"III": 3,
"IV": 4,
"V": 5,
"VI": 6,
"VII": 7,
"VIII": 8,
"IX": 9,
"X": 10,
}
ROMAN_NUMERALS = set(ROMAN_NUMERALS_TO_INT.keys())
ROMAN_NUMERALS_REGEX = "("
for roman_numeral in ROMAN_NUMERALS:
ROMAN_NUMERALS_REGEX += roman_numeral + "|"
ROMAN_NUMERALS_REGEX = ROMAN_NUMERALS_REGEX[:-1] + ")$"
def _format_show_name(
show_name: str, season_num: int, show_num_tuple: Optional[Tuple[int, int]], original_title: str, quality: str
) -> str:
"""
Format the show number into 'Show Name - SxxEyy' if show_num_tuple is not None and has the same value.
Format the show number into 'Show Name - SxxEyy-Ezz' if show_num_tuple is not None and has different values.
Otherwise, just return 'Show Name - Sxx - Original title'.
"""
if show_num_tuple is not None:
show_num_start, show_num_end = show_num_tuple
if show_num_start == show_num_end:
return f"{show_name} - S{season_num:02}E{show_num_start:02}{quality}"
else:
return f"{show_name} - S{season_num:02}E{show_num_start:02}-E{show_num_end:02}{quality}"
else:
return f"{show_name} - S{season_num:02} - {original_title}{quality}"
def _guess_show_num(episode_name: str) -> Optional[Tuple[int, int]]:
"""
Guess the show number from the episode name.
Supports:
Exx-Eyy
Exx
YYYYY show_num ZZZZZZZZ ...
Returns:
(show_num_start, show_num_end) where show_num_end is equal show_num_start if only one show number is present.
if no show number is present, returns None.
"""
one_episode_regexp = re.compile(r"^E(?P<show_num_start>\d+)$")
two_episode_regexp = re.compile(r"^E(?P<show_num_start>\d+)-E(?P<show_num_end>\d+)$")
match = two_episode_regexp.match(episode_name)
if match:
return (
int(match.group("show_num_start")),
int(match.group("show_num_end")),
)
match = one_episode_regexp.match(episode_name)
if match:
return (int(match.group("show_num_start")), int(match.group("show_num_start")))
try:
show_num = int(episode_name.split(" ")[1])
return (show_num, show_num)
except (IndexError, ValueError):
return None
def organize(episodes_to_organize: List[Path], destination_dir: Path, translations: Dict[str, str], dry_run=True):
"""
Organize shows into seasons and directories.
"""
def map_path(show, destination_dir: Path, translations: Dict[str, str]) -> Optional[Path]:
log.info(f"Processing {show}")
show_regex_match = re.match(EpisodeDownload.file_name_regexp("mp4"), show.name)
if show_regex_match is None:
log.warning(f"Skipping {show} (no regex match)")
return None
program_name_is, episode_name, program_name_en, quality = show_regex_match.groups()
if program_name_en == "None":
if program_name_is in translations:
program_name_en = translations[program_name_is]
log.info(f"Foreign title translation {program_name_en}")
else:
log.warning(f"Skipping {show} (no foreign title)")
return None
season_num = 1
roman_numeral_regex_match = re.search(ROMAN_NUMERALS_REGEX, program_name_en)
if roman_numeral_regex_match is not None:
roman_numeral = roman_numeral_regex_match.group(0)
season_num = ROMAN_NUMERALS_TO_INT[roman_numeral]
program_name_en = program_name_en.replace(roman_numeral_regex_match.group(0), "").strip()
season_dir = destination_dir / program_name_en / f"Season {season_num:02}"
show_name = _format_show_name(program_name_en, season_num, _guess_show_num(episode_name), episode_name, quality)
return season_dir / (show_name + ".mp4")
path_mapping = {
episode_to_organize: map_path(episode_to_organize, destination_dir, translations)
for episode_to_organize in episodes_to_organize
}
for old_path, new_path in path_mapping.items():
if new_path is None:
continue
if new_path.exists():
log.warning(f"{new_path} is already in {destination_dir}. Not moving.")
if hashlib.md5(new_path.read_bytes()).hexdigest() == hashlib.md5(old_path.read_bytes()).hexdigest():
log.warning(f"{new_path} has same checksum as {old_path}")
else:
log.warning(f"{new_path} does not have the same checksum as {old_path}")
continue
if dry_run:
log.warning(f"Would move {old_path} to {new_path}")
else:
new_path.parent.mkdir(parents=True, exist_ok=True)
log.info(f"Moving {old_path} to {new_path}")
old_path.rename(new_path)
| 2.671875 | 3 |
src/data/Alynize.py | xiaoyuehe/TFFinance | 0 | 12787985 | <gh_stars>0
if __name__ == '__main__':
df = [1,2,3,5,7,6,4,6,8,9]
mid = []
cur = []
cur_dir = 1
cur_value = df[0]
cur.append(cur_value)
mid.append(cur)
for i in range(len(df)-1):
new_value = df[i+1]
new_dir = 1 if new_value >= cur_value else -1
cur_value = new_value
if new_dir * cur_dir < 0:
cur = []
mid.append(cur)
cur_dir = new_dir
cur.append(cur_value)
for i in range(len(mid)):
print("-"*20)
for j in range(len(mid[i])):
print(mid[i][j])
| 2.875 | 3 |
recipes/Python/576587_Sort_sections_keys_ini/recipe-576587.py | tdiprima/code | 2,023 | 12787986 | #!/usr/bin/python
# -*- coding: cp1250 -*-
__version__ = '$Id: sort_ini.py 543 2008-12-19 13:44:59Z mn $'
# author: <NAME>
import sys
USAGE = 'USAGE:\n\tsort_ini.py file.ini'
def sort_ini(fname):
"""sort .ini file: sorts sections and in each section sorts keys"""
f = file(fname)
lines = f.readlines()
f.close()
section = ''
sections = {}
for line in lines:
line = line.strip()
if line:
if line.startswith('['):
section = line
continue
if section:
try:
sections[section].append(line)
except KeyError:
sections[section] = [line, ]
if sections:
sk = sections.keys()
sk.sort()
for k in sk:
vals = sections[k]
vals.sort()
print k
print '\n'.join(vals)
print
if '--version' in sys.argv:
print __version__
elif len(sys.argv) < 2:
print USAGE
else:
sort_ini(sys.argv[1])
| 2.8125 | 3 |
tally_ho/apps/tally/models/quarantine_check.py | onaio/tally-ho | 12 | 12787987 | from django.db import models
from django.utils.translation import ugettext as _
import reversion
from tally_ho.libs.models.base_model import BaseModel
from tally_ho.apps.tally.models.user_profile import UserProfile
class QuarantineCheck(BaseModel):
class Meta:
app_label = 'tally'
user = models.ForeignKey(UserProfile, null=True, on_delete=models.PROTECT)
name = models.CharField(max_length=256, unique=True)
method = models.CharField(max_length=256, unique=True)
value = models.FloatField(default=0)
percentage = models.FloatField(default=0)
active = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
def local_name(self):
return _(self.name)
reversion.register(QuarantineCheck)
| 1.984375 | 2 |
alvi/tests/resources/client/local_python_client/__init__.py | alviproject/alvi | 10 | 12787988 | import logging
import importlib
import multiprocessing
from alvi.tests.resources.base import Resource
logger = logging.getLogger(__name__)
class LocalPythonClient(Resource):
def __init__(self):
logger.info("setting up clients")
self._clients = []
for scene in self.scenes:
module_name, class_name = scene.rsplit(".", 1)
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
process = multiprocessing.Process(target=class_.start)
process.start()
self._clients.append(process)
def destroy(self):
logger.info("terminating clients")
for client in self._clients:
client.terminate()
@property
def scenes(self):
#TODO following scenes could be autodiscovered
PREFIX = 'alvi.tests.resources.client.local_python_client.scenes.'
return (
PREFIX + 'graph.create_node.GraphCreateNode',
PREFIX + 'graph.update_node.GraphUpdateNode',
PREFIX + 'graph.remove_node.GraphRemoveNode',
PREFIX + 'graph.add_multi_marker.GraphAddMultiMarker',
PREFIX + 'graph.marker.GraphMarker',
PREFIX + 'array.create_node.ArrayCreateNode',
PREFIX + 'array.update_node.ArrayUpdateNode',
PREFIX + 'tree.create_node.TreeCreateNode',
PREFIX + 'tree.append_and_insert.TreeAppendAndInsert',
PREFIX + 'tree.marker.TreeMarker',
PREFIX + 'tree.multi_marker.TreeMultiMarker',
)
| 2.3125 | 2 |
bindsnet_master/bindsnet/preprocessing/__init__.py | Singular-Brain/ProjectBrain | 6 | 12787989 | <reponame>Singular-Brain/ProjectBrain
from .preprocessing import AbstractPreprocessor
| 0.960938 | 1 |
results/reorganize.py | UoB-HPC/everythingsreduced | 0 | 12787990 | import pandas as pd
import glob
import csv
files = [
"a100-results.csv",
"clx-1S-results.csv",
"clx-results.csv",
"gen9-results.csv",
"mi100-results.csv",
# "rome-results-aocc.csv",
"rome-results-cce.csv"]
csv_frames = []
for f in files:
csv_frames.append(pd.read_csv(f, skipinitialspace=True))
df = pd.concat(csv_frames, axis=0, ignore_index=True)
df.loc[df['model'] == 'kokkos-sycl',['model']] = 'kokkos'
df.set_index(["kernel", "model", "arch", "compiler"], inplace=True)
df.sort_index(inplace=True)
avg = df.groupby(level=["kernel", "model", "arch", "compiler"]).mean()
peaks = pd.read_csv("peaks.csv", skipinitialspace=True)
peaks= pd.Series(peaks.bandwidth.values, index=peaks.arch).to_dict()
peakmap= {'rome': (2, 'EPYC 7742'),
'clx_1S': (1, 'Xeon 6230'),
'clx': (2, 'Xeon 6230'),
'gen9': (1, 'Core 6770HQ')
}
arches = avg.index.unique(level='arch')
for arch in arches:
try:
mul, key = peakmap[arch]
except KeyError:
mul, key = 1, arch
avg.loc[(slice(None), slice(None), arch), 'bandwidth'] /= (mul*peaks[key])
app_name_map = {
"openmp": "OpenMP",
"kokkos-sycl" : "Kokkos (SYCL)",
"omp-target": "OpenMP (target)",
"onedpl": "oneDPL",
"raja": "Raja",
"kokkos": "Kokkos",
"sycl": "SYCL",
}
app_order = ['openmp', 'kokkos', 'raja', 'sycl', 'onedpl']
subapp_map = {
'openmp' : 'openmp',
'omp-target' : 'openmp',
'kokkos' : 'kokkos',
'kokkos-sycl' : 'kokkos',
'raja' : 'raja',
'sycl' : 'sycl',
'onedpl' : 'onedpl',
}
platform_name_map = {
'clx' : "2 x Intel® Xeon® Gold 6230",
'clx_1S' : "1 x Intel® Xeon® Gold 6230",
'a100' : "NVIDIA A100",
'mi100' : "AMD MI100",
'rome' : '2 x AMD EPYC 7742',
'rome_cce' : '2 x AMD EPYC 7742',
'rome_aocc' : '2 x AMD EPYC 7742',
'gen9' : 'Intel® Iris® Pro 580'
}
for kernel in avg.index.unique(level='kernel'):
with open(f"{kernel}.csv", "w") as fp:
ocsv = csv.writer(fp)
kslice = avg.loc[kernel]
kslice.index.remove_unused_levels()
models = kslice.index.unique(level='model')
ocsv.writerow(["Device"] + list([app_name_map[x] for x in models]))
for arch in arches:
res = [platform_name_map[arch]]
for m in models:
try:
v = avg.loc[(kernel, m, arch),'bandwidth'][0]*100
except KeyError:
v = 'X'
res.append(v)
ocsv.writerow(res)
| 2.3125 | 2 |
scripts/slave/recipe_modules/gatekeeper/api.py | bopopescu/chromium-build | 0 | 12787991 | <reponame>bopopescu/chromium-build
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
class Gatekeeper(recipe_api.RecipeApi):
"""Module for Gatekeeper NG."""
def __call__(self, gatekeeper_default_json, gatekeeper_trees_json):
config = self.m.json.read(
'reading %s' % self.m.path.basename(gatekeeper_trees_json),
gatekeeper_trees_json,
).json.output
for tree_name, tree_args in config.iteritems():
# TODO(martiniss): create a creds recipe module to make a nice path
# reference to /creds
# Use tree-specific config if specified, otherwise use default.
# Tree-specific configs must be relative to the trees file.
gatekeeper_json = gatekeeper_default_json
if tree_args.get('config'):
assert '..' not in tree_args['config'].split('/')
gatekeeper_json = self.m.path.join(
self.m.path.dirname(gatekeeper_trees_json),
*tree_args['config'].split('/'))
args = [
'--json', gatekeeper_json, '--email-app-secret-file',
'/creds/gatekeeper/mailer_password',
'--milo-creds',
self.m.service_account.get_json_path('gatekeeper'),
]
if tree_args.get('status-url'):
args.extend(['--status-url', tree_args['status-url']])
if tree_args.get('sheriff-url'):
args.extend(['--sheriff-url', tree_args['sheriff-url']])
if tree_args.get('set-status'):
args.append('--set-status')
if tree_args.get('open-tree'):
args.append('--open-tree')
if tree_args.get('track-revisions'):
args.append('--track-revisions')
if tree_args.get('revision-properties'):
args.extend(['--revision-properties', tree_args['revision-properties']])
if tree_args.get('build-db'):
args.extend(['--build-db', tree_args['build-db']])
if tree_args.get('password-file'):
args.extend(['--password-file', tree_args['password-file']])
if tree_args.get('use-project-email-address'):
args.extend(['--default-from-email',
<EMAIL>' % tree_name])
elif tree_args.get('default-from-email'): # pragma: nocover
args.extend(['--default-from-email', tree_args['default-from-email']])
if tree_args.get('filter-domain'):
args.extend(['--filter-domain', tree_args['filter-domain']])
if tree_args.get('status-user'):
args.extend(['--status-user', tree_args['status-user']])
if tree_args.get('masters'):
if self.c and self.c.use_new_logic:
valid_masters = []
modifies_tree = False
if tree_args.get('set-status') or tree_args.get('open-tree'):
modifies_tree = True
for master, allowed in tree_args['masters'].items():
if '*' in allowed:
valid_masters.append(master)
elif allowed:
valid_masters.append(master + ':' + ','.join(allowed))
args.extend(valid_masters)
else: #pragma: no cover
args.extend(tree_args['masters'])
try:
self.m.build.python(
'gatekeeper: %s' % str(tree_name),
self.package_repo_resource('scripts', 'slave', 'gatekeeper_ng.py'),
args,
)
except self.m.step.StepFailure:
pass
| 1.945313 | 2 |
worker/setup.py | unbalancedparentheses/indielangs | 4 | 12787992 | <filename>worker/setup.py
"""setup.py controls the build, testing, and distribution of the egg"""
from setuptools import setup, find_packages
import os.path
PROJECT = "indielangs"
setup(
name=PROJECT,
version="0.1",
description="Store list of languages detected by github in database",
keywords='programming languages indie',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/unbalancedparentheses/indielangs',
license='MIT License',
packages=find_packages(exclude=['ez_setup']),
zip_safe=True,
entry_points={
'console_scripts': [
'indielangs = indielangs.worker:main'
]
},
install_requires=[
'schedule==0.3.2',
'PyYAML==3.11',
'rethinkdb'
]
)
| 1.507813 | 2 |
tests/gdb/complete.py | cohortfsllc/cohort-cocl2-sandbox | 2,151 | 12787993 | # -*- python -*-
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gdb_test
class CompleteTest(gdb_test.GdbTest):
def test_complete(self):
# Test that continue causes the debugged program to run to completion.
self.gdb.ResumeCommand('continue')
def tearDown(self):
# Test program should run to completion and return a special value.
# Intentionally bypass superclass's tearDown as it assumes gdb exits first.
self.AssertSelLdrExits(expected_returncode=123)
self.gdb.Quit()
self.gdb.Wait()
if __name__ == '__main__':
gdb_test.Main()
| 2.5625 | 3 |
scheduler/apps/authentication/migrations/0003_merge_20191106_1150.py | bryan-munene/scheduler-api | 0 | 12787994 | <filename>scheduler/apps/authentication/migrations/0003_merge_20191106_1150.py
# Generated by Django 2.2.6 on 2019-11-06 08:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0002_auto_20191106_1018'),
('authentication', '0002_auto_20191106_1148'),
]
operations = [
]
| 1.304688 | 1 |
tests/tests.py | leomaurodesenv/multiple-nfe-reader | 0 | 12787995 | #--------------------------------------------
#-- Libraries
#--------------------------------------------
# system imports
import os
import sys
# add parent path to python paths
parentdir = os.path.join(os.path.dirname(__file__), '../')
sys.path.insert(0,parentdir)
# imports
import cv2
import datasets
from nfeReader import barcode, qrcode, ocr
#--------------------------------------------
#-- Testing
#--------------------------------------------
def showImage(imageArray, imageName="Without name"):
cv2.imshow('Image - %s' % imageName,imageArray)
cv2.waitKey(0)
cv2.destroyAllWindows()
def checkAllImages(imagesPath, decoder, display=False):
for image in imagesPath:
# decoding
decoding, imgArray = decoder(image)
for decodedItem in decoding:
print("-[INFO] Found {} code: {}".format(decodedItem['type'], decodedItem['data']))
# display
if display:
showImage(imgArray, image)
def checkOCRFromImages(imagesPath, decoder, display=False):
for image in imagesPath:
# decoding
decoding = decoder(image)
print("-[OCR] Found '{}':".format(image))
print(decoding)
# display
if display:
imgArray = cv2.imread(image,0)
showImage(imgArray, image)
#--------------------------------------------
#-- Main
#--------------------------------------------
if __name__ == '__main__':
# barcode
print('\n-- Testing: barcode')
checkAllImages(datasets.barcodeImages, barcode.decode, display=True)
# QR code
print('\n-- Testing: qrcode')
checkAllImages(datasets.qrcodeImages, qrcode.decode, display=True)
# OCR
print('\n-- Testing: OCR - Optical Character Recognition')
checkOCRFromImages(datasets.ocrImages, ocr.decode, display=True) | 2.65625 | 3 |
src/connectors/airwatch_devices.py | sfc-gh-kmaurya/SnowAlert | 144 | 12787996 | <gh_stars>100-1000
"""Airwatch
Collect Device information using API Key, Host, and CMSURL Authentication
"""
from runners.helpers import log
from runners.helpers import db
from runners.helpers.dbconfig import ROLE as SA_ROLE
from datetime import datetime
import requests
from urllib.error import HTTPError
from .utils import yaml_dump
PAGE_SIZE = 500
CONNECTION_OPTIONS = [
{
'name': 'api_key',
'title': "Airwatch API Key",
'prompt': "Your Airwatch API Key",
'type': 'str',
'secret': True,
'required': True,
},
{
'name': 'host_airwatch',
'title': "Airwatch Host",
'prompt': "Your Airwatch Host",
'type': 'str',
'secret': True,
'required': True,
},
{
'name': 'device_auth',
'title': "Device URL",
'prompt': "Your Airwatch CMS Auth for Device URL",
'type': 'str',
'secret': True,
'required': True,
},
{
'name': 'custom_attributes_auth',
'title': "Custom Attributes URL",
'prompt': "Your Airwatch CMS Auth for Custom Attributes URL",
'type': 'str',
'secret': True,
'required': True,
},
]
LANDING_TABLE_COLUMNS_DEVICE = [
('INSERT_ID', 'NUMBER IDENTITY START 1 INCREMENT 1'),
('SNAPSHOT_AT', 'TIMESTAMP_LTZ(9)'),
('RAW', 'VARIANT'),
('EAS_IDS', 'VARIANT'),
('UDID', 'VARCHAR(256)'),
('SERIAL_NUMBER', 'VARCHAR(256)'),
('MAC_ADDRESS', 'VARCHAR(256)'),
('IMEI', 'VARCHAR(256)'),
('EAS_ID', 'VARCHAR(256)'),
('ASSET_NUMBER', 'VARCHAR(256)'),
('DEVICE_FRIENDLY_NAME', 'VARCHAR(256)'),
('LOCATION_GROUP_ID', 'VARIANT'),
('LOCATION_GROUP_NAME', 'VARCHAR(256)'),
('USER_ID', 'VARIANT'),
('USER_NAME', 'VARCHAR(256)'),
('DATA_PROTECTION_STATUS', 'NUMBER(38,0)'),
('USER_EMAIL_ADDRESS', 'VARCHAR(256)'),
('OWNERSHIP', 'VARCHAR(256)'),
('PLATFORM_ID', 'VARIANT'),
('PLATFORM', 'VARCHAR(256)'),
('MODEL_ID', 'VARIANT'),
('MODEL', 'VARCHAR(256)'),
('OPERATING_SYSTEM', 'VARCHAR(256)'),
('PHONE_NUMBER', 'VARCHAR(256)'),
('LAST_SEEN', 'TIMESTAMP_LTZ(9)'),
('ENROLLMENT_STATUS', 'VARCHAR(256)'),
('COMPLIANCE_STATUS', 'VARCHAR(256)'),
('COMPROMISED_STATUS', 'BOOLEAN'),
('LAST_ENROLLED_ON', 'TIMESTAMP_LTZ(9)'),
('LAST_COMPLIANCE_CHECK_ON', 'TIMESTAMP_LTZ(9)'),
('LAST_COMPROMISED_CHECK_ON', 'TIMESTAMP_LTZ(9)'),
('IS_SUPERVISED', 'BOOLEAN'),
('VIRTUAL_MEMORY', 'NUMBER(38,0)'),
('DEVICE_CAPACITY', 'FLOAT'),
('AVAILABLE_DEVICE_CAPACITY', 'FLOAT'),
('IS_DEVICE_DND_ENABLED', 'BOOLEAN'),
('IS_DEVICE_LOCATOR_ENABLED', 'BOOLEAN'),
('IS_CLOUD_BACKUP_ENABLED', 'BOOLEAN'),
('IS_ACTIVATION_LOCK_ENABLED', 'BOOLEAN'),
('IS_NETWORKTETHERED', 'BOOLEAN'),
('BATTERY_LEVEL', 'VARCHAR(256)'),
('IS_ROAMING', 'BOOLEAN'),
('SYSTEM_INTEGRITY_PROTECTION_ENABLED', 'BOOLEAN'),
('PROCESSOR_ARCHITECTURE', 'NUMBER(38,0)'),
('TOTAL_PHYSICAL_MEMORY', 'NUMBER(38,0)'),
('AVAILABLE_PHYSICAL_MEMORY', 'NUMBER(38,0)'),
('DEVICE_CELLULAR_NETWORK_INFO', 'VARIANT'),
('ENROLLMENT_USER_UUID', 'VARCHAR(256)'),
('ID', 'VARIANT'),
('UUID', 'VARCHAR(256)'),
]
LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES = [
('INSERT_ID', 'NUMBER IDENTITY START 1 INCREMENT 1'),
('SNAPSHOT_AT', 'TIMESTAMP_LTZ(9)'),
('RAW', 'VARIANT'),
('DEVICE_ID', 'INT'),
('UDID', 'VARCHAR(256)'),
('SERIAL_NUMBER', 'VARCHAR(256)'),
('ENROLLMENT_USER_NAME', 'VARCHAR(256)'),
('ASSET_NUMBER', 'VARCHAR(256)'),
('CUSTOM_ATTRIBUTES', 'VARIANT'),
]
def get_data(url: str, cms_auth: str, api_key: str, params: dict = {}) -> dict:
headers: dict = {
'Content-Type': 'application/json',
'aw-tenant-code': api_key,
'Accept': 'application/json',
'Authorization': cms_auth,
}
try:
log.debug(f"Preparing GET: url={url} with params={params}")
req = requests.get(url, params=params, headers=headers)
req.raise_for_status()
except HTTPError as http_err:
log.error(f"Error GET: url={url}")
log.error(f"HTTP error occurred: {http_err}")
raise
return req.json()
def connect(connection_name, options):
landing_table_device = f'data.airwatch_devices_{connection_name}_device_connection'
landing_table_custom_attributes = (
f'data.airwatch_devices_{connection_name}_custom_attributes_connection'
)
comment = yaml_dump(module='airwatch_devices', **options)
db.create_table(
name=landing_table_device, cols=LANDING_TABLE_COLUMNS_DEVICE, comment=comment, rw_role=SA_ROLE
)
db.create_table(
name=landing_table_custom_attributes,
cols=LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES,
comment=comment,
rw_role=SA_ROLE
)
return {'newStage': 'finalized', 'newMessage': "Airwatch ingestion tables created!"}
def ingest(table_name, options):
host_airwatch = options['host_airwatch']
api_key = options['api_key']
device_auth = options['device_auth']
custom_attributes_auth = options['custom_attributes_auth']
ingest_type = (
'device' if table_name.endswith('_DEVICE_CONNECTION') else 'custom_attributes'
)
timestamp = datetime.utcnow()
landing_table = f'data.{table_name}'
if ingest_type == 'device':
device_params: dict = {'PageSize': PAGE_SIZE, 'Page': 0}
url = f'https://{host_airwatch}/api/mdm/devices/search'
while 1:
result: dict = get_data(url, device_auth, api_key, device_params)
devices = result['Devices']
db.insert(
landing_table,
values=[
(
timestamp,
device,
device.get('EasIds'),
device.get('Udid'),
device.get('SerialNumber'),
device.get('MacAddress'),
device.get('Imei'),
device.get('EasId'),
device.get('AssetNumber'),
device.get('DeviceFriendlyName'),
device.get('LocationGroupId'),
device.get('LocationGroupName'),
device.get('UserId'),
device.get('UserName'),
device.get('DataProtectionStatus'),
device.get('UserEmailAddress'),
device.get('Ownership'),
device.get('PlatformId'),
device.get('Platform'),
device.get('ModelId'),
device.get('Model'),
device.get('OperatingSystem'),
device.get('PhoneNumber'),
device.get('LastSeen'),
device.get('EnrollmentStatus'),
device.get('ComplianceStatus'),
device.get('CompromisedStatus'),
device.get('LastEnrolledOn'),
device.get('LastComplianceCheckOn'),
device.get('LastCompromisedCheckOn'),
device.get('IsSupervised'),
device.get('VirtualMemory'),
device.get('DeviceCapacity'),
device.get('AvailableDeviceCapacity'),
device.get('IsDeviceDNDEnabled'),
device.get('IsDeviceLocatorEnabled'),
device.get('IsCloudBackupEnabled'),
device.get('IsActivationLockEnabled'),
device.get('IsNetworkTethered'),
device.get('BatteryLevel'),
device.get('IsRoaming'),
device.get('SystemIntegrityProtectionEnabled'),
device.get('ProcessorArchitecture'),
device.get('TotalPhysicalMemory'),
device.get('AvailablePhysicalMemory'),
device.get('DeviceCellularNetworkInfo'),
device.get('EnrollmentUserUuid'),
device.get('Id'),
device.get('Uuid'),
)
for device in devices
],
select=db.derive_insert_select(LANDING_TABLE_COLUMNS_DEVICE),
columns=db.derive_insert_columns(LANDING_TABLE_COLUMNS_DEVICE),
)
log.info(f'Inserted {len(devices)} rows ({landing_table}).')
yield len(devices)
processed_total = (result['Page'] + 1) * result['PageSize']
if processed_total >= result['Total']:
break
device_params['Page'] += 1
else:
custom_device_params: dict = {'PageSize': PAGE_SIZE, 'Page': 0}
url = f'https://{host_airwatch}/api/mdm/devices/customattribute/search'
while 1:
result: dict = get_data(
url, custom_attributes_auth, api_key, custom_device_params
)
device_attributes = result['Devices']
db.insert(
landing_table,
values=[
(
timestamp,
device_attr,
device_attr.get('DeviceId'),
device_attr.get('Udid'),
device_attr.get('SerialNumber'),
device_attr.get('EnrollmentUserName'),
device_attr.get('AssetNumber'),
device_attr.get('CustomAttributes'),
)
for device_attr in device_attributes
],
select=db.derive_insert_select(LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES),
columns=db.derive_insert_columns(
LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES
),
)
log.info(f'Inserted {len(device_attributes)} rows ({landing_table}).')
yield len(device_attributes)
processed_total = (result['Page'] + 1) * result['PageSize']
if processed_total >= result['Total']:
break
custom_device_params['Page'] += 1
| 2.234375 | 2 |
custom_components/idasen-desk-controller/const.py | Xlinx64/idasen-desk-controller | 1 | 12787997 | """
Constants for Idasen Desk Controller Integration
"""
import logging
LOGGER = logging.getLogger(__package__)
DOMAIN = 'idasen-desk-controller'
PLATFORMS = ["cover", "sensor", "switch"]
MIN_HEIGHT = 620
MAX_HEIGHT = 1270 # 6500
HEIGHT_TOLERANCE = 2.0
ADAPTER_NAME = 'hci0'
SCAN_TIMEOUT = 5
CONNECTION_TIMEOUT = 20
MOVEMENT_TIMEOUT = 30
| 1.84375 | 2 |
04 - String variables/code_challenge.py | geezerP/workshop | 0 | 12787998 | <reponame>geezerP/workshop
# ask a user to enter their first
# name and store it in a variable
# ask a user to enter their
# last name and store it in a variable
# print their full name
# Make sure you have a space
# between first and last name
# Make sure the first letter
# of first name and last name is uppercase
# Make sure the rest of the name is lowercase
| 2.953125 | 3 |
gwaripper/migrations/0001_normalize.py | nilfoer/gwaripper | 6 | 12787999 | <filename>gwaripper/migrations/0001_normalize.py<gh_stars>1-10
import os
import datetime
import sqlite3
import logging
import re
from .. import config
date = '2020-11-08'
logger = logging.getLogger(__name__)
FILENAME_MAX_LEN = 185
DELETED_USR_FOLDER = "deleted_users"
UNKNOWN_USR_FOLDER = "_unknown_user_files"
def sanitize_filename(subpath: str, filename: str):
# folder names must not start or end with spaces
assert subpath.strip() == subpath
# [^\w\-_\.,\[\] ] -> match not(^) any of \w \- _ and whitepsace etc.,
# replace any that isnt in the [] with _
chars_remaining = FILENAME_MAX_LEN - len(subpath)
assert chars_remaining >= 30
return re.sub(r"[^\w\-_.,\[\] ]", "_", filename.strip()[:chars_remaining].strip())
def date_str_to_date(date_str: str) -> datetime.date:
try:
# important that all dates are of type date and not datetime since
# you can't compare the two
date = datetime.date(year=2000, month=1, day=1) if date_str is None else (
datetime.datetime.strptime(date_str, '%Y-%m-%d').date())
except ValueError:
date = datetime.date(year=2000, month=1, day=1)
return date
def combine_duplicate_url(c):
# get duplicate urls
# some (from very old version) rows only have url_file
rows = c.execute("""
--SELECT url FROM Downloads WHERE url IS NOT NULL GROUP BY url HAVING count(*) > 1
--UNION
--SELECT url_file FROM Downloads WHERE url IS NULL GROUP BY url_file HAVING count(*) > 1
--ORDER BY url
-- could either select all url or url_file and then query for that
-- or use the group by that we're doing anyway to get all the ids
-- using aggregate funciton group_concat and then later just
-- getting the rows by id
SELECT group_concat(id, ',') as ids FROM Downloads WHERE url IS NOT NULL
GROUP BY url HAVING count(*) > 1
UNION
SELECT group_concat(id, ',') as ids FROM Downloads WHERE url IS NULL
GROUP BY url_file HAVING count(*) > 1
""").fetchall()
#print("\n".join(f"{url[0]}" for url in rows))
for r in rows:
duplicate_row_ids = [int(i) for i in r[0].split(',')]
c.execute(f"""
SELECT * FROM Downloads
WHERE id IN ({','.join('?' for _ in range(len(duplicate_row_ids)))})
ORDER BY id""", duplicate_row_ids)
duplicate_rows = c.fetchall()
assert len(duplicate_rows) > 1
# keep latest / greatest id
keep = duplicate_rows[-1]
del duplicate_rows[-1]
diffs = []
update_dict = {}
for row in duplicate_rows:
for k in keep.keys():
if k == 'id':
continue
# for logging diffs
if row[k] and k not in ('date', 'time') and keep[k] != row[k]:
diffs.append(f"Keep[{k}] = {keep[k]}")
diffs.append(f"Del[{k}] = {row[k]}")
# the row we want to keep has no value set but this one has
if not keep[k] and row[k]:
# later rows might overwrite this but we prioritize rows that
# were added later (and thats the order of our iteration)
update_dict[k] = row[k]
# DEL ROW!!
c.execute("DELETE FROM Downloads WHERE id = ?", (row['id'],))
if diffs:
logger.info("Title: %s by %s\n%s\n\n", keep['title'], keep['author_page'],
'\n'.join(diffs))
if update_dict:
upd_cols = [f"{col} = :{col}" for col in update_dict]
update_dict['id'] = keep['id']
c.execute(f"""
UPDATE Downloads SET
{', '.join(col_set_stmt for col_set_stmt in upd_cols)}
WHERE id = :id""", update_dict)
def upgrade(db_con):
# NOTE: don't use imported code here otherwise changes to that code
# might break the migration!
rf = db_con.row_factory
db_con.row_factory = sqlite3.Row
c = db_con.cursor()
db_con.row_factory = rf
combine_duplicate_url(c)
c.execute("ALTER TABLE Downloads RENAME TO temp")
c.execute("DROP TABLE Downloads_fts_idx")
c.execute("DROP TRIGGER Downloads_ai")
c.execute("DROP TRIGGER Downloads_ad")
c.execute("DROP TRIGGER Downloads_au")
create_tables(c)
rows = c.execute("SELECT * FROM temp").fetchall()
# keep track of rowids for inserted filecollections and aliases so we don't
# have to query for them
# reddit is the only filecol currently tracked in db since it's the only
# that con contain audio files
reddit_id_collection_id = {}
alias_name_rowid_artist_set = {}
c.execute("INSERT OR IGNORE INTO Alias(name) VALUES (?)", (DELETED_USR_FOLDER,))
alias_name_rowid_artist_set[DELETED_USR_FOLDER] = (c.lastrowid, True)
c.execute("INSERT OR IGNORE INTO Alias(name) VALUES (?)", (UNKNOWN_USR_FOLDER,))
alias_name_rowid_artist_set[UNKNOWN_USR_FOLDER] = (c.lastrowid, True)
for r in rows:
date = date_str_to_date(r['date'])
reddit_user = r['reddit_user']
reddit_id = r['reddit_id']
alias = r['author_page']
alias = alias.strip() if alias is not None else alias
reddit_user_set = False
if (not reddit_user or reddit_user == 'None') and reddit_id:
# reddit info but we have a deleted user
reddit_user = DELETED_USR_FOLDER
elif reddit_user:
c.execute("INSERT OR IGNORE INTO Artist(name) VALUES (?)", (reddit_user,))
reddit_user_set = True
# use a function since we have to do this 2 times: once for author_page
# and once for reddit_user
# save if we set an artist so we can update an alias' artist if we later
# do have the reddit info for that
def get_or_create_alias(name):
try:
alias_id, artist_set = alias_name_rowid_artist_set[name]
if not artist_set and reddit_user_set:
c.execute("""UPDATE Alias SET
artist_id = (SELECT Artist.id FROM Artist WHERE name = ?)
WHERE Alias.id = ?""", (reddit_user, alias_id))
except KeyError:
c.execute("""INSERT INTO Alias(artist_id, name) VALUES (
(SELECT id FROM Artist WHERE name = ?), ?)""",
(reddit_user, name))
alias_id = c.lastrowid
alias_name_rowid_artist_set[name] = (c.lastrowid, reddit_user_set)
return alias_id
if not alias:
if reddit_user:
alias = reddit_user
else:
alias = UNKNOWN_USR_FOLDER
alias_id = get_or_create_alias(alias)
collection_id = None
if reddit_id:
try:
collection_id = reddit_id_collection_id[reddit_id]
except KeyError:
reddit_user_alias_id = get_or_create_alias(reddit_user) if reddit_user else None
submission_self_url = r['reddit_url']
if submission_self_url.startswith('http'):
submission_self_url = submission_self_url.replace('http:', 'https:')
else:
submission_self_url = f"https://www.reddit.com{submission_self_url}"
subpath = ""
# <v0.3 won't have a subpath
if date > datetime.date(year=2020, month=10, day=10):
#
# re-create subpath that was not added to DB previously due to a bug
#
nr_files_row = c.execute("SELECT count(*) FROM temp WHERE reddit_id = ?",
(reddit_id,)).fetchone()
nr_files = int(nr_files_row[0])
# since we don't add non-audio files to the DB we might have more files
# than nr_files and thus have a subpath
# -> test if the file exists without else assume a subpath
# user might have moved file to a backup but that's the best we can do
file_without_subpath = os.path.join(config.get_root(),
r['author_subdir'],
r['local_filename'])
file_found_without_subpath = os.path.isfile(file_without_subpath)
if nr_files >= 3 or not file_found_without_subpath:
subpath = sanitize_filename("", r['reddit_title'])[:70].strip()
c.execute("INSERT INTO RedditInfo(created_utc) VALUES (?)",
(r['created_utc'],))
reddit_info_id = c.lastrowid
file_collection_dict = {
"url": submission_self_url,
"id_on_page": reddit_id,
"title": r['reddit_title'],
"subpath": subpath,
"reddit_info_id": reddit_info_id,
# RedditInfo can't have a parent
"parent_id": None,
"alias_id": reddit_user_alias_id
}
c.execute("""
INSERT INTO FileCollection(
url, id_on_page, title, subpath, reddit_info_id, parent_id, alias_id
)
VALUES (
:url, :id_on_page, :title, :subpath, :reddit_info_id, :parent_id, :alias_id
)
""", file_collection_dict)
collection_id = c.lastrowid
reddit_id_collection_id[reddit_id] = c.lastrowid
filename = r['local_filename']
filename = filename if filename else ''
audio_file_dict = {
"id": r['id'],
"collection_id": collection_id,
"downloaded_with_collection": 1 if collection_id is not None else 0,
"date": date,
"description": r['description'],
"filename": filename,
"title": r['title'],
"url": r['url'] if r['url'] else r['url_file'],
"alias_id": alias_id,
"rating": r['rating'],
"favorite": r['favorite']
}
try:
c.execute("""
INSERT INTO AudioFile(
id, collection_id, downloaded_with_collection, date, description,
filename, title, url, alias_id, rating, favorite
)
VALUES(
:id, :collection_id, :downloaded_with_collection, :date, :description,
:filename, :title, :url, :alias_id, :rating, :favorite
)""", audio_file_dict)
except sqlite3.IntegrityError as err:
if "UNIQUE constraint failed" in str(err):
print("Skipped unhandled duplicate AudioFile with URL:", audio_file_dict['url'])
else:
raise
c.execute("DROP TABLE temp")
def create_tables(c):
c.execute("PRAGMA foreign_keys=off")
c.execute("""
CREATE TABLE AudioFile(
id INTEGER PRIMARY KEY ASC,
collection_id INTEGER,
downloaded_with_collection INTEGER NOT NULL DEFAULT 0,
date DATE NOT NULL,
-- removed: time TEXT,
description TEXT,
filename TEXT NOT NULL,
title TEXT,
-- removed: url_file TEXT,
url TEXT UNIQUE NOT NULL,
alias_id INTEGER NOT NULL,
rating REAL,
favorite INTEGER NOT NULL DEFAULT 0,
FOREIGN KEY (collection_id) REFERENCES FileCollection(id)
-- can't delete a FileCollection if there are still rows with
-- it's id as collection_id here
ON DELETE RESTRICT,
FOREIGN KEY (alias_id) REFERENCES Alias(id)
ON DELETE RESTRICT
)""")
c.execute("CREATE INDEX audio_file_collection_id_idx ON AudioFile(collection_id)")
c.execute("CREATE INDEX audio_file_alias_id_idx ON AudioFile(alias_id)")
# so we can match aliases to an artist and use the artist name for displaying
# all the files of it's aliases
# files will still be stored under the alias name though since if we don't have
# reddit information we can't match an audio host user name (alias) to an artist
# without user interaction and we also can't match on similarity
# matching later when we have reddit info that links an alias an artist is also
# not an option since we'd have to move the files which might not be present
# anymore (e.g. backed up somewhere else)
c.execute("""
CREATE TABLE Artist(
id INTEGER PRIMARY KEY ASC,
name TEXT UNIQUE NOT NULL
)""")
c.execute("""
CREATE TABLE Alias(
id INTEGER PRIMARY KEY ASC,
artist_id INTEGER,
name TEXT UNIQUE NOT NULL,
FOREIGN KEY (artist_id) REFERENCES Artist(id)
ON DELETE RESTRICT
)""")
# Indexes are implicitly created only in the case of PRIMARY KEY and UNIQUE statements
# so these are not needed
# c.execute("CREATE UNIQUE INDEX alias_name_idx ON Alias(name)")
# c.execute("CREATE UNIQUE INDEX artist_name_idx ON Artist(name)")
# on foreign keys they are not created automatically
c.execute("CREATE INDEX alias_artist_id_idx ON Alias(artist_id)")
c.execute("""
CREATE TABLE FileCollection(
id INTEGER PRIMARY KEY ASC,
url TEXT UNIQUE NOT NULL,
id_on_page TEXT,
title TEXT,
subpath TEXT NOT NULL,
reddit_info_id INTEGER,
parent_id INTEGER,
alias_id INTEGER NOT NULL,
FOREIGN KEY (reddit_info_id) REFERENCES RedditInfo(id)
ON DELETE RESTRICT,
FOREIGN KEY (parent_id) REFERENCES FileCollection(id)
ON DELETE RESTRICT,
FOREIGN KEY (alias_id) REFERENCES Alias(id)
ON DELETE RESTRICT
)""")
# we only need created_utc here (if at all) but put the structure in place anyway
# since we might expand it later
c.execute("""
CREATE TABLE RedditInfo(
id INTEGER PRIMARY KEY ASC,
created_utc REAL
-- basically a duplicate of the only AudioFile child's url
-- removed since ^: r_post_url TEXT, -- outgoing post url or link to self
-- replaced by FileCollection.id_on_page: reddit_id TEXT,
-- replaced by FileCollection.title: reddit_title TEXT,
-- replaced by FileCollection.url: reddit_url TEXT, -- permalink
-- replaced by FileCollection.alias_id: reddit_user TEXT,
-- could be extracted from url
-- subreddit TEXT
-- should we safe the selftext in the db?
-- selftext TEXT
)""")
# https://stackoverflow.com/a/9282556 <NAME>:
# usually using views will have slightly less overhead as the query
# parser/planner doesn't have to reparse the raw sql on each execution. It
# can parse it once, store its execution strategy, and then use that each
# time the query is actually run.
#
# The performance boost you see with this will generally be small, in the grand
# scheme of things. It really only helps if its a fast query that you're
# executing frequently. If its a slow query you execute infrequently, the
# overhead associated with parsing the query is insignificant.
c.execute("""
CREATE VIEW v_audio_and_collection_combined
AS
SELECT
AudioFile.id,
AudioFile.collection_id,
AudioFile.downloaded_with_collection,
AudioFile.date,
AudioFile.description,
AudioFile.filename,
AudioFile.title,
AudioFile.url,
AudioFile.alias_id,
AudioFile.rating,
AudioFile.favorite,
Alias.name as alias_name,
Artist.name as artist_name,
FileCollection.id as fcol_id,
FileCollection.url as fcol_url,
FileCollection.id_on_page as fcol_id_on_page,
FileCollection.title as fcol_title,
FileCollection.subpath as fcol_subpath,
FileCollection.reddit_info_id as fcol_reddit_info_id,
FileCollection.parent_id as fcol_parent_id,
FileCollection.alias_id as fcol_alias_id,
-- get alias name for FileCollection
-- artist_id of fcol and audiofile will be the same so we don't
-- have to query for that
(SELECT
Alias.name
FROM Alias WHERE Alias.id = FileCollection.alias_id) as fcol_alias_name,
RedditInfo.created_utc as reddit_created_utc
FROM AudioFile
LEFT JOIN FileCollection ON AudioFile.collection_id = FileCollection.id
LEFT JOIN RedditInfo ON FileCollection.reddit_info_id = RedditInfo.id
JOIN Alias ON Alias.id = AudioFile.alias_id
LEFT JOIN Artist ON Artist.id = Alias.artist_id
""")
# NOTE: sql fts view testing code below; idea is that since we split up
# title and collection/reddit_title into separate tables we'd
# use a view as content table for the fts index using a LEFT JOIN
#
# this way would probably require more disk space since we're inserting the
# collection title for every audio in a collection but since most collections
# (5791 - 337/5791 in my db) only have one audio (avg 1.12) it's fine
#
# other solution would be to have two fts5 indices, do two separate queries
# and then combine the result
c.execute("""
CREATE VIEW v_audio_and_collection_titles
AS
SELECT
AudioFile.id as audio_id,
FileCollection.title as collection_title,
AudioFile.title as audio_title
FROM AudioFile
LEFT JOIN FileCollection ON AudioFile.collection_id = FileCollection.id
""")
c.execute("""
-- full text-search virtual table
-- only stores the idx due to using parameter content='..'
-- -> external content table (here using a view)
-- but then we have to keep the content table and the idx up-to-date ourselves
CREATE VIRTUAL TABLE IF NOT EXISTS Titles_fts_idx USING fts5(
title, collection_title,
content='v_audio_and_collection_titles',
content_rowid='audio_id')""")
# since if or case/when are only allowed to be used to select between other expressions
# and not with INSERT:
# use TRIGGER's WHEN condition and then create multiple triggers with that
# c.execute("""-- Triggers to keep the FTS index up to date.
# CREATE TRIGGER AudioFile_ai_with_collection AFTER INSERT ON AudioFile
# WHEN new.collection_id IS NOT NULL
# BEGIN
# INSERT INTO Titles_fts_idx(rowid, title, collection_title)
# VALUES (
# new.id,
# new.title,
# -- subquery for collection title
# (SELECT title FROM FileCollection WHERE id = new.collection_id)
# );
# END""")
# c.execute("""
# CREATE TRIGGER AudioFile_ai_without_collection AFTER INSERT ON AudioFile
# WHEN new.collection_id IS NULL
# BEGIN
# INSERT INTO Titles_fts_idx(rowid, title, collection_title)
# VALUES (
# new.id,
# new.title,
# NULL
# );
# END""")
# in this case also possible using one trigger with case/when since we're
# inserting into the same table etc.
# WHEN NULL does not work it just appeared to work since the subquery
# with WHERE FileCollection.id = NULL returned no rows which means NULL will
# be inserted (which we could use but then the subquery would be run every time)
# use WHEN new.collection_id IS NULL instead
c.execute("""
CREATE TRIGGER AudioFile_ai AFTER INSERT ON AudioFile
BEGIN
INSERT INTO Titles_fts_idx(rowid, title, collection_title)
VALUES (
new.id,
new.title,
(CASE
WHEN new.collection_id IS NULL THEN NULL
ELSE (SELECT title FROM FileCollection WHERE id = new.collection_id)
END)
);
END
""")
# the values inserted into the other columns must match the values
# currently stored in the table otherwise the results may be unpredictable
c.execute("""
CREATE TRIGGER AudioFile_ad AFTER DELETE ON AudioFile
BEGIN
INSERT INTO Titles_fts_idx(Titles_fts_idx, rowid, title, collection_title)
VALUES(
'delete',
old.id,
old.title,
(CASE
WHEN old.collection_id IS NULL THEN NULL
ELSE (SELECT title FROM FileCollection WHERE id = old.collection_id)
END)
);
END
""")
c.execute("""
CREATE TRIGGER AudioFile_au AFTER UPDATE ON AudioFile
BEGIN
-- delete old entry
INSERT INTO Titles_fts_idx(Titles_fts_idx, rowid, title, collection_title)
VALUES(
'delete',
old.id,
old.title,
(CASE
WHEN old.collection_id IS NULL THEN NULL
ELSE (SELECT title FROM FileCollection WHERE id = old.collection_id)
END)
);
-- insert new one
INSERT INTO Titles_fts_idx(rowid, title, collection_title)
VALUES (
new.id,
new.title,
(CASE
WHEN new.collection_id IS NULL THEN NULL
ELSE (SELECT title FROM FileCollection WHERE id = new.collection_id)
END)
);
END
""")
c.execute("PRAGMA foreign_keys=on")
| 3 | 3 |
glazier/lib/logs_test.py | ItsMattL/glazier | 1,233 | 12788000 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.logs."""
import os
import zipfile
from absl.testing import absltest
from glazier.lib import constants
from glazier.lib import file_util
from glazier.lib import logs
import mock
from pyfakefs.fake_filesystem_unittest import Patcher
TEST_ID = '1A19SEL90000R90DZN7A-1234567'
class LoggingTest(absltest.TestCase):
def testCollect(self):
with Patcher() as patcher:
files = [
os.path.join(constants.SYS_LOGS_PATH, 'log1.log'),
os.path.join(constants.SYS_LOGS_PATH, 'log2.log'),
]
patcher.fs.create_dir(constants.SYS_LOGS_PATH)
patcher.fs.create_file(files[0], contents='log1 content')
patcher.fs.create_file(files[1], contents='log2 content')
logs.Collect(r'C:\glazier.zip')
with zipfile.ZipFile(r'C:\glazier.zip', 'r') as out:
with out.open(files[1].lstrip('/')) as f2:
self.assertEqual(f2.read(), b'log2 content')
def testCollectIOErr(self):
with Patcher() as patcher:
patcher.fs.create_dir(constants.SYS_LOGS_PATH)
with self.assertRaises(logs.LogError):
logs.Collect(constants.SYS_LOGS_PATH)
@mock.patch.object(zipfile.ZipFile, 'write', autospec=True)
def testCollectValueErr(self, wr):
wr.side_effect = ValueError('ZIP does not support timestamps before 1980')
with Patcher() as patcher:
patcher.fs.create_dir(constants.SYS_LOGS_PATH)
patcher.fs.create_file(os.path.join(constants.SYS_LOGS_PATH, 'log1.log'))
with self.assertRaises(logs.LogError):
logs.Collect(r'C:\glazier.zip')
@mock.patch.object(logs.winpe, 'check_winpe', autospec=True)
def testGetLogsPath(self, wpe):
# WinPE
wpe.return_value = True
self.assertEqual(logs.GetLogsPath(), logs.constants.WINPE_LOGS_PATH)
# Host
wpe.return_value = False
self.assertEqual(logs.GetLogsPath(), logs.constants.SYS_LOGS_PATH)
@mock.patch.object(file_util, 'CreateDirectories')
@mock.patch.object(logs.buildinfo.BuildInfo, 'ImageID', autospec=True)
@mock.patch.object(logs.winpe, 'check_winpe', autospec=True)
@mock.patch.object(logs.logging, 'FileHandler')
def testSetup(self, fh, wpe, ii, create_dir):
ii.return_value = TEST_ID
wpe.return_value = False
logs.Setup()
create_dir.assert_called_with(r'%s\glazier.log' %
logs.constants.SYS_LOGS_PATH)
fh.assert_called_with(r'%s\glazier.log' % logs.constants.SYS_LOGS_PATH)
@mock.patch.object(file_util, 'CreateDirectories')
@mock.patch.object(logs.buildinfo.BuildInfo, 'ImageID', autospec=True)
@mock.patch.object(logs.winpe, 'check_winpe', autospec=True)
@mock.patch.object(logs.logging, 'FileHandler')
def testSetupError(self, fh, wpe, ii, create_dir):
ii.return_value = TEST_ID
wpe.return_value = False
fh.side_effect = IOError
with self.assertRaises(logs.LogError):
logs.Setup()
self.assertTrue(create_dir.called)
if __name__ == '__main__':
absltest.main()
| 1.851563 | 2 |
SubscribeMQTT.py | NipunaMadhushan/Location_from_beacon_signals | 0 | 12788001 | import paho.mqtt.client as mqtt
import json
import numpy as np
import pandas as pd
import cv2 as cv
from New.TrainModel import test_model, predict_location, linear_train_model, logistic_train_model, svm_train_model, \
rf_train_model
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe("IoT_Project")
def on_message(client, userdata, message):
msg = str(message.payload.decode("utf-8"))
print("Message: ", msg)
data = get_data(msg)
pred = predict_location(classifier, data)
print("Current Location: ", pred)
map_location(pred)
def get_data(msg):
data = {"f0:ec:af:cf:6c:e1": -150, "c9:a6:4d:9b:c0:8c": -150, "c2:b6:6e:70:fa:f7": -150,
"d9:5f:f5:4f:10:89": -150, "c4:52:32:5c:31:e7": -150, "e9:3c:4a:34:13:fb": -150,
"ed:61:e4:e8:22:30": -150, "ea:01:26:75:a4:c3": -150, "d0:4e:10:2e:cb:84": -150,
"e4:e0:0a:ae:fd:e2": -150, "fa:35:76:56:6f:e3": -150, "d5:b7:dc:69:ca:ae": -150,
"ca:81:7a:d7:55:49": -150, "e7:2b:ea:2f:95:c5": -150, "d4:32:fc:b5:f0:b5": -150}
all_beacons = list(data.keys())
msg_json = json.loads(msg)
beacons = list(msg_json.keys())
for x in beacons:
data[x] = msg_json[x]
data_list = []
for y in all_beacons:
data_list.append(data[y])
return data_list
def map_location(prediction):
map = cv.imread("map.jpeg")
locations = [(275, 215), (75, 240), (135, 300), (208, 270), (355, 270), (420, 390), (320, 335), (535, 215),
(520, 275), (410, 260), (430, 215), (580, 180), (200, 230), (440, 360), (250, 255), (395, 290),
(320, 240), (360, 340), (380, 390), (250, 320), (410, 330), (480, 190), (460, 260)]
cv.circle(map, locations[prediction-1], 10, (0, 0, 255), thickness=5)
cv.imshow("Location", map)
cv.waitKey()
cv.destroyAllWindows()
# Train the model
classifier = rf_train_model()
test_model(classifier)
# Subscribe to topic
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("mqtt.eclipse.org", 1883, 60)
client.loop_forever()
| 2.578125 | 3 |
LogLevel.py | adamcrowe242/logprune | 0 | 12788002 |
import main, os
class LogLevel:
def __init__(self, levelName, directory):
self.levelName = levelName
self.unzippedFilePath = os.path.join(directory, levelName + ".log")
self.zippedFiles = []
self.totalZippedSize = 0
# add each zipped file of the logLevel to the list 'zippedFiles'
for fileName, filePath in main.listLogArchives(levelName, directory):
if ".gz" in fileName:
self.addZippedFile(filePath)
def addZippedFile(self, filePath):
self.zippedFiles.append({
"path": filePath,
"size": main.size(filePath),
"index": main.index(filePath)
})
self.totalZippedSize += main.size(filePath) # add the size to the total size of zipped logs
self.zippedFiles.sort(key=lambda x: x["index"]) # sort list by index
| 3.265625 | 3 |
pdf_bot/files/file.py | Joy-nath/telegram-pdf-bot | 1 | 12788003 | from telegram.constants import MAX_FILESIZE_DOWNLOAD
from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler
from pdf_bot.consts import (
BACK,
BEAUTIFY,
BY_PERCENT,
BY_SIZE,
CANCEL,
COMPRESS,
COMPRESSED,
CROP,
DECRYPT,
ENCRYPT,
EXTRACT_IMAGE,
EXTRACT_TEXT,
IMAGES,
OCR,
PDF_INFO,
PREVIEW,
RENAME,
ROTATE,
SCALE,
SPLIT,
TEXT_FILE,
TEXT_FILTER,
TEXT_MESSAGE,
TO_DIMENSIONS,
TO_IMAGES,
TO_PDF,
WAIT_CROP_OFFSET,
WAIT_CROP_PERCENT,
WAIT_CROP_TYPE,
WAIT_DECRYPT_PW,
WAIT_DOC_TASK,
WAIT_ENCRYPT_PW,
WAIT_EXTRACT_IMAGE_TYPE,
WAIT_FILE_NAME,
WAIT_IMAGE_TASK,
WAIT_ROTATE_DEGREE,
WAIT_SCALE_DIMENSION,
WAIT_SCALE_PERCENT,
WAIT_SCALE_TYPE,
WAIT_SPLIT_RANGE,
WAIT_TEXT_TYPE,
WAIT_TO_IMAGE_TYPE,
)
from pdf_bot.files.compress import compress_pdf
from pdf_bot.files.crop import (
ask_crop_type,
ask_crop_value,
check_crop_percent,
check_crop_size,
)
from pdf_bot.files.crypto import (
ask_decrypt_pw,
ask_encrypt_pw,
decrypt_pdf,
encrypt_pdf,
)
from pdf_bot.files.document import ask_doc_task
from pdf_bot.files.image import (
ask_image_results_type,
ask_image_task,
get_pdf_images,
get_pdf_preview,
pdf_to_images,
process_image_task,
)
from pdf_bot.files.ocr import add_ocr_to_pdf
from pdf_bot.files.rename import ask_pdf_new_name, rename_pdf
from pdf_bot.files.rotate import ask_rotate_degree, check_rotate_degree
from pdf_bot.files.scale import (
ask_scale_type,
ask_scale_value,
check_scale_dimension,
check_scale_percent,
)
from pdf_bot.files.split import ask_split_range, split_pdf
from pdf_bot.files.text import ask_text_type, get_pdf_text
from pdf_bot.language import set_lang
from pdf_bot.utils import cancel
def file_cov_handler():
conv_handler = ConversationHandler(
entry_points=[
MessageHandler(Filters.document, check_doc),
MessageHandler(Filters.photo, check_image),
],
states={
WAIT_DOC_TASK: [MessageHandler(TEXT_FILTER, check_doc_task)],
WAIT_IMAGE_TASK: [MessageHandler(TEXT_FILTER, check_image_task)],
WAIT_CROP_TYPE: [MessageHandler(TEXT_FILTER, check_crop_task)],
WAIT_CROP_PERCENT: [MessageHandler(TEXT_FILTER, check_crop_percent)],
WAIT_CROP_OFFSET: [MessageHandler(TEXT_FILTER, check_crop_size)],
WAIT_DECRYPT_PW: [MessageHandler(TEXT_FILTER, decrypt_pdf)],
WAIT_ENCRYPT_PW: [MessageHandler(TEXT_FILTER, encrypt_pdf)],
WAIT_FILE_NAME: [MessageHandler(TEXT_FILTER, rename_pdf)],
WAIT_ROTATE_DEGREE: [MessageHandler(TEXT_FILTER, check_rotate_degree)],
WAIT_SPLIT_RANGE: [MessageHandler(TEXT_FILTER, split_pdf)],
WAIT_TEXT_TYPE: [MessageHandler(TEXT_FILTER, check_text_task)],
WAIT_SCALE_TYPE: [MessageHandler(TEXT_FILTER, check_scale_task)],
WAIT_SCALE_PERCENT: [MessageHandler(TEXT_FILTER, check_scale_percent)],
WAIT_SCALE_DIMENSION: [MessageHandler(TEXT_FILTER, check_scale_dimension)],
WAIT_EXTRACT_IMAGE_TYPE: [
MessageHandler(TEXT_FILTER, check_get_images_task)
],
WAIT_TO_IMAGE_TYPE: [MessageHandler(TEXT_FILTER, check_to_images_task)],
},
fallbacks=[CommandHandler("cancel", cancel)],
allow_reentry=True,
)
return conv_handler
def check_doc(update, context):
doc = update.effective_message.document
if doc.mime_type.startswith("image"):
return ask_image_task(update, context, doc)
if not doc.mime_type.endswith("pdf"):
return ConversationHandler.END
if doc.file_size >= MAX_FILESIZE_DOWNLOAD:
_ = set_lang(update, context)
update.effective_message.reply_text(
"{desc_1}\n\n{desc_2}".format(
desc_1=_("Your file is too big for me to download and process"),
desc_2=_(
"Note that this is a Telegram Bot limitation and there's "
"nothing I can do unless Telegram changes this limit"
),
),
)
return ConversationHandler.END
context.user_data[PDF_INFO] = doc.file_id, doc.file_name
return ask_doc_task(update, context)
def check_image(update, context):
return ask_image_task(update, context, update.effective_message.photo[-1])
def check_doc_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text == _(CROP):
return ask_crop_type(update, context)
if text == _(DECRYPT):
return ask_decrypt_pw(update, context)
if text == _(ENCRYPT):
return ask_encrypt_pw(update, context)
if text in [_(EXTRACT_IMAGE), _(TO_IMAGES)]:
return ask_image_results_type(update, context)
if text == _(PREVIEW):
return get_pdf_preview(update, context)
if text == _(RENAME):
return ask_pdf_new_name(update, context)
if text == _(ROTATE):
return ask_rotate_degree(update, context)
if text in [_(SCALE)]:
return ask_scale_type(update, context)
if text == _(SPLIT):
return ask_split_range(update, context)
if text == _(EXTRACT_TEXT):
return ask_text_type(update, context)
if text == OCR:
return add_ocr_to_pdf(update, context)
if text == COMPRESS:
return compress_pdf(update, context)
if text == _(CANCEL):
return cancel(update, context)
return WAIT_DOC_TASK
def check_image_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(BEAUTIFY), _(TO_PDF)]:
return process_image_task(update, context)
if text == _(CANCEL):
return cancel(update, context)
return WAIT_IMAGE_TASK
def check_crop_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(BY_PERCENT), _(BY_SIZE)]:
return ask_crop_value(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_CROP_TYPE
def check_scale_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(BY_PERCENT), _(TO_DIMENSIONS)]:
return ask_scale_value(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_SCALE_TYPE
def check_text_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text == _(TEXT_MESSAGE):
return get_pdf_text(update, context, is_file=False)
if text == _(TEXT_FILE):
return get_pdf_text(update, context, is_file=True)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_TEXT_TYPE
def check_get_images_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(IMAGES), _(COMPRESSED)]:
return get_pdf_images(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_EXTRACT_IMAGE_TYPE
def check_to_images_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(IMAGES), _(COMPRESSED)]:
return pdf_to_images(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_TO_IMAGE_TYPE
| 1.476563 | 1 |
pysa/nhht.py | holwech/pysa | 0 | 12788004 | <gh_stars>0
import numpy as np
import scipy.signal as signal
from . import utils
# Calculates the normalized HHt.
# Takes in all the IMFs, but not the residue. That is; the last row of the the return value
# of the EMD function should not be included in the input variable "imfs"
def nhht(imfs, sample_frequency):
# Non-optimal fix to some array overwrite issue
imfs = np.copy(imfs)
n_imfs = len(imfs)
max_freq = sample_frequency / 2.0
amplitudes = np.zeros(imfs.shape, np.float32)
scaled_imfs = np.zeros(imfs.shape, np.float32)
frequencies = np.zeros(imfs.shape, np.float32)
for i in range(n_imfs):
scaled_imf, am = utils.scale_amplitudes(imfs[i])
scaled_imfs[i] = scaled_imf
h = signal.hilbert(scaled_imf)
amplitudes[i] = am
frequencies[i] = np.r_[
0.0,
0.5*(np.angle(-h[2:]*np.conj(h[0:-2]))+np.pi)/(2.0*np.pi) * np.float32(sample_frequency),
0.0
]
frequencies[i, 0] = frequencies[i, 1]
frequencies[i, -1] = frequencies[i, -2]
frequencies[i] = utils.check_rapid_changes_in_frequency(frequencies[i], max_freq)
return frequencies, amplitudes
def get_instantaneous_frequency(imfs, sample_frequency=500.0):
sample_frequency = float(sample_frequency)
max_freq = sample_frequency / 2.0
freq = np.zeros(imfs.shape, np.float)
for i in range(len(imfs)):
# Do Hilbert Transform - NB! Must be normalized (scaled amplitudes)
hi = signal.hilbert(imfs[i])
freq[i, :] = np.r_[
0.0,
0.5*(np.angle(-hi[2:]*np.conj(hi[0:-2]))+np.pi)/(2.0*np.pi) * sample_frequency,
0.0
]
freq[i, 0] = freq[i, 1]
freq[i, -1] = freq[i, -2]
for k in range(len(freq[i])):
if freq[i, k] > max_freq:
if k > 0:
freq[i, k] = freq[i, k-1]
else:
freq[i, k] = max_freq
# Check if change in frequency is unrealistic (too rapid change):
if k > 0:
if np.fabs(freq[i, k] - freq[i, k-1]) > 50.0:
if freq[i, k] > freq[i, k-1]:
freq[i, k] = freq[i, k-1]
else:
freq[i, k-1] = freq[i, k]
return freq
| 2.34375 | 2 |
mpas_analysis/docs/parse_quick_start.py | ytakano3/MPAS-Analysis | 43 | 12788005 | #!/usr/bin/env python
"""
A script for converting the README.md to a quick-start guide for inclusion
in the documentation
"""
from m2r import convert
def build_quick_start():
replace = {'# MPAS-Analysis': '# Quick Start Guide\n',
'[![Build Status]': '',
'[![Documentation Status]': '',
'':
'\n'}
skip = [('## conda-forge', '## Installation')]
outContent = ''
skipMode = False
with open('../README.md', 'r') as inFile:
for line in inFile.readlines():
for skipStart, skipEnd in skip:
if not skipMode and skipStart in line:
skipMode = True
if skipMode and skipEnd in line:
skipMode = False
if not skipMode:
for replaceString in replace:
if replaceString in line:
line = replace[replaceString]
break
outContent = outContent + line
outContent = convert(outContent)
with open('quick_start.rst', 'w') as outFile:
outFile.write('.. _quick_start:\n\n')
outFile.write(outContent)
| 2.6875 | 3 |
scripts/singledataset_convert.py | chadrick-kwag/labeldat.a | 0 | 12788006 | <filename>scripts/singledataset_convert.py
#!/bin/python3
# this is a script file that converted all the saves json to actual trainable annotation json files.
# modified to work with single dataset
import argparse
import json
import os
import sys
from PIL import Image
import sqlite3
import shutil
SQLITE_DB_LOCATION='/home/chadrick/github/test/mysite/db.sqlite3'
STATIC_PATH='/home/chadrick/github/test/mysite/homepage/static'
DS_IMG_PATH='/home/chadrick/github/test/mysite/homepage/static/datasets/imgs'
CURRENT_DIR=os.getcwd()
ds_finished_count=0
swap_count=0
below_zero_case=0
finished_count=0
no_label_count=0
def prepare_output_dir(dirpath):
# remove the dir
if os.path.exists(dirpath):
shutil.rmtree(dirpath)
os.makedirs(dirpath)
def convert_and_save(spjsondata,imgdir,dirfilelist,outputdir,dsid):
global ds_finished_count, no_label_count
ds_finished_count=0
for i in range(0,len(spjsondata)):
get_data_of_index = spjsondata[str(i)]
# if there are no labeles, then skip
if len(get_data_of_index)==0:
no_label_count+=1
continue
dsidformat = format(dsid,'03')
paddedindex = format(i,'03')
# outputfilename = outputdir+'_'+paddedindex+'.json'
basefilename = dsidformat+'_'+paddedindex+'.json'
outputfilename = os.path.join(outputdir,basefilename)
outfd = open(outputfilename,'w')
createjson={}
createjson['imgfile']=dirfilelist[i]
imgpath = os.path.join(imgdir,dirfilelist[i])
im=Image.open(imgpath)
w,h=im.size
createjson['w']=w
createjson['h']=h
arrlist=[]
for item in get_data_of_index:
startx=item['startX']
starty=item['startY']
widthx=item['widthX']
widthy=item['widthY']
x1=startx
y1=starty
x2=startx+widthx
y2=starty+widthy
# adjust x1,y1,x2,y2 larger values
# x1<x2, y1<y2
swap_occured_flag =False
if x1>x2:
temp=x1
x1=x2
x2=temp
swap_occured_flag=True
if y1>y2:
temp=y1
y1=y2
y2=temp
swap_occured_flag=True
if swap_occured_flag:
# print("swap occured!")
global swap_count
swap_count+=1
if x1<0 or x2<0 or y1<0 or y2<0:
# print("below zero case occured!!")
global below_zero_case
below_zero_case+=1
objects_json={}
objects_json['name']=item['label']
rectjson={}
rectjson['x1']=x1
rectjson['y1']=y1
rectjson['x2']=x2
rectjson['y2']=y2
objects_json['rect']=rectjson
arrlist.append(objects_json)
createjson['objects']=arrlist
# outfd.write(str(createjson))
# outfd.flush()
## the proper way to write json to file is using dump not str(jsonobject)
## the prior will save with double apostrophies, while the latter will save with single apostrophies. This does matter.
json.dump(createjson,outfd)
outfd.close()
global finished_count
finished_count+=1
ds_finished_count+=1
parser = argparse.ArgumentParser(description="converting json to json that is compatible with training")
# parser.add_argument('--prepareonelot',action='store_const',const=True,default=False,help="use it when the user wants to save all the converted json and images in one dir")
# parser.add_argument('--zipprefix',nargs=1,type=str)
# parser.add_argument('--zipbasename',action='store', default="outputzip", help="zip file base name")
# parser.add_argument('--saveinonedir',action='store_const', const=True, default=False,help="save all video screenshots in one directory")
parser.add_argument('dsid',help="dsid to convert")
parseargs = parser.parse_args()
targetdsid = int(parseargs.dsid)
conn = sqlite3.connect(SQLITE_DB_LOCATION)
c = conn.cursor()
c.execute("SELECT * from homepage_save_progress where dsid=?",(targetdsid,))
results = c.fetchall()
results = sorted(results,key=lambda x: x[2])
print("result = {}".format(results))
if len(results) is not 1:
print("query result size is not 1.")
sys.exit(0)
result = results[0]
savedprogressjsonfile = os.path.join(STATIC_PATH,result[3])
dsid = result[2]
datasetimgdir=os.path.join(DS_IMG_PATH,str(dsid))
conv_output_dir = os.path.join(CURRENT_DIR,str(dsid))
prepare_output_dir(conv_output_dir)
fd = open(savedprogressjsonfile,'r')
rawline=fd.readline()
firstconv=json.loads(rawline)
spjson=json.loads(firstconv)
spjsondata = spjson['data']
dirfilelist = sorted(os.listdir(datasetimgdir))
# check length
if len(spjsondata) != len(dirfilelist):
print("length do not match. abort")
sys.exit(1)
# now convert and save in to conv_output_dir
convert_and_save(spjsondata,datasetimgdir,dirfilelist,conv_output_dir,dsid)
print("finished convert and saving for dsid={} -- finish count={}".format(dsid,ds_finished_count))
# find the saved progress table.
# find the file that matches the dsid and username
#print stat
print("finished count={}".format(finished_count))
print("swap_count={}".format(swap_count))
print("below_zero_count={}".format(below_zero_case))
print("no label count={}".format(no_label_count))
fd.close()
print("finish code")
| 2.4375 | 2 |
orangecontrib/wavepy2/util/gui/ow_wavepy_process_widget.py | APS-XSD-OPT-Group/OASYS1-WavePy2 | 0 | 12788007 | # #########################################################################
# Copyright (c) 2020, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2020. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
from orangewidget import gui
from oasys.widgets import gui as oasysgui
from orangecontrib.wavepy2.util.gui.ow_wavepy_widget import WavePyWidget
from orangecontrib.wavepy2.util.wavepy_objects import OasysWavePyData
class WavePyProcessWidget(WavePyWidget):
CONTROL_AREA_HEIGTH = 900
CONTROL_AREA_WIDTH = 1600
MAX_WIDTH_NO_MAIN = CONTROL_AREA_WIDTH + 10
MAX_HEIGHT = CONTROL_AREA_HEIGTH + 10
inputs = [("WavePy Data", OasysWavePyData, "set_input"),]
outputs = [{"name": "WavePy Data",
"type": OasysWavePyData,
"doc": "WavePy Data",
"id": "WavePy_Data"}]
must_clean_layout = True
show_results_when_ready = True
def __init__(self, show_general_option_box=True, show_automatic_box=True, show_results_when_ready_box=True):
super(WavePyProcessWidget, self).__init__(show_general_option_box=show_general_option_box, show_automatic_box=show_automatic_box)
self.setFixedWidth(self.MAX_WIDTH_NO_MAIN)
self.setFixedHeight(self.MAX_HEIGHT)
if show_results_when_ready_box : gui.checkBox(self._general_options_box, self, 'show_results_when_ready', 'Show results when ready')
else: self.show_results_when_ready = False
gui.rubber(self.controlArea)
def set_input(self, data):
if not data is None:
data = data.duplicate()
self._initialization_parameters = data.get_initialization_parameters()
self._calculation_parameters = data.get_calculation_parameters()
self._process_manager = data.get_process_manager()
if self.is_automatic_run: self._execute()
def _execute(self):
self.progressBarInit()
if self.must_clean_layout: self._clear_wavepy_layout()
self.progressBarSet(10)
output_calculation_parameters = self._get_output_parameters()
self.progressBarSet(90)
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.controlArea.setFixedHeight(self.CONTROL_AREA_HEIGTH)
gui.rubber(self.controlArea)
output = OasysWavePyData()
output.set_process_manager(self._process_manager)
output.set_initialization_parameters(self._initialization_parameters)
output.set_calculation_parameters(output_calculation_parameters)
self.progressBarSet(100)
self.progressBarFinished()
self.send("WavePy Data", output)
if self.show_results_when_ready: self.show()
def _get_output_parameters(self):
raise NotImplementedError()
from orangecontrib.wavepy2.util.gui.ow_wavepy_widget import clear_layout
from wavepy2.util.plot.plot_tools import DefaultContextWidget
class WavePyProcessWidgetWithOptions(WavePyProcessWidget):
def __init__(self, show_general_option_box=True, show_automatic_box=True, show_results_when_ready_box=True):
super(WavePyProcessWidgetWithOptions, self).__init__(show_general_option_box=show_general_option_box, show_automatic_box=show_automatic_box, show_results_when_ready_box=show_results_when_ready_box)
self._options_area = oasysgui.widgetBox(self._wavepy_widget_area, "Options", addSpace=False, orientation="vertical",
width=self._get_option_area_width())
self._lateral_wavepy_widget_area = oasysgui.widgetBox(self._wavepy_widget_area, "", addSpace=False, orientation="vertical",
width=self.CONTROL_AREA_WIDTH - self._get_option_area_width())
def _get_option_area_width(self):
return 200
def _clear_wavepy_layout(self):
clear_layout(self._lateral_wavepy_widget_area.layout())
def _get_default_context(self):
return DefaultContextWidget(self._lateral_wavepy_widget_area)
| 0.953125 | 1 |
setup.py | regarmukesh3g/pythonPackages | 0 | 12788008 | #!/usr/bin/env python
"""
Setup for distribution package.
"""
from setuptools import setup
setup(name='dist_pdf',
version='1.0',
description='Distribution of data',
packages=['dist_pdf'],
zip_sage=False)
| 1.195313 | 1 |
652.py | wilbertgeng/LeetCode_exercise | 0 | 12788009 | """652. Find Duplicate Subtrees"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def findDuplicateSubtrees(self, root):
"""
:type root: TreeNode
:rtype: List[TreeNode]
"""
hashmap = {}
res = []
self.solve(root, hashmap)
# Concept - We can use a hashmap to store the all the trees and their roots
for val, node in hashmap.values():
if val > 1:
res.append(node) ## append node
return res
def solve(self, node, hashmap):
if not node:
return "X"
a = self.solve(node.left, hashmap)
b = self.solve(node.right, hashmap)
# Pre-order tree representation for storing the tree
temp = str(node.val) + " " + a + " " + b
if temp not in hashmap:
hashmap[temp] = [1, node]
else:
hashmap[temp][0] += 1
return temp
| 3.84375 | 4 |
sysidentpy/polynomial_basis/tests/test_simulation.py | neylsoncrepalde/sysidentpy | 107 | 12788010 | from numpy.testing._private.utils import assert_allclose
from sysidentpy.polynomial_basis import PolynomialNarmax
from sysidentpy.utils.generate_data import get_miso_data, get_siso_data
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from numpy.testing import assert_raises
from sysidentpy.polynomial_basis import SimulatePolynomialNarmax
def test_get_index_from_regressor_code():
s = SimulatePolynomialNarmax()
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
regressor_space = np.array(
[
[0, 0],
[1001, 0],
[2001, 0],
[2002, 0],
[1001, 1001],
[2001, 1001],
[2002, 1001],
[2001, 2001],
[2002, 2001],
[2002, 2002],
]
)
index = s._get_index_from_regressor_code(
regressor_code=regressor_space, model_code=model
)
assert (index == np.array([1, 3, 5])).all()
def test_list_output_regressor():
s = SimulatePolynomialNarmax()
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
y_code = s._list_output_regressor_code(model)
assert (y_code == np.array([1001, 1001])).all()
def test_list_input_regressor():
s = SimulatePolynomialNarmax()
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
x_code = s._list_input_regressor_code(model)
assert (x_code == np.array([2001, 2002])).all()
def test_get_lag_from_regressor_code():
s = SimulatePolynomialNarmax()
list_regressor1 = np.array([2001, 2002])
list_regressor2 = np.array([1004, 1002])
max_lag1 = s._get_lag_from_regressor_code(list_regressor1)
max_lag2 = s._get_lag_from_regressor_code(list_regressor2)
assert max_lag1 == 2
assert max_lag2 == 4
def test_simulate():
x_train, x_valid, y_train, y_valid = get_siso_data(
n=1000, colored_noise=False, sigma=0.001, train_percentage=90
)
s = SimulatePolynomialNarmax()
# the model must be a numpy array
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
# theta must be a numpy array of shape (n, 1) where n is the number of regressors
theta = np.array([[0.2, 0.9, 0.1]]).T
yhat, results = s.simulate(
X_test=x_valid, y_test=y_valid, model_code=model, theta=theta, plot=False
)
assert yhat.shape == (100, 1)
assert len(results) == 3
def test_simulate_theta():
x_train, x_valid, y_train, y_valid = get_siso_data(
n=1000, colored_noise=False, sigma=0.001, train_percentage=90
)
s = SimulatePolynomialNarmax(estimate_parameter=True)
# the model must be a numpy array
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
yhat, results = s.simulate(
X_train=x_train,
y_train=y_train,
X_test=x_valid,
y_test=y_valid,
model_code=model,
plot=False,
)
theta = np.array([[0.2, 0.9, 0.1]]).T
assert_almost_equal(s.theta, theta, decimal=1)
def test_estimate_parameter():
assert_raises(TypeError, SimulatePolynomialNarmax, estimmate_parameter=1)
| 2.125 | 2 |
hokudai_furima/chat/migrations/0005_auto_20180514_0455.py | TetsuFe/hokuma | 1 | 12788011 | # Generated by Django 2.0.3 on 2018-05-14 04:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0004_auto_20180319_1220'),
]
operations = [
migrations.AddField(
model_name='chat',
name='product_seller',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='product_seller', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='chat',
name='product_wanting_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='product_wanting_user', to=settings.AUTH_USER_MODEL),
),
]
| 1.554688 | 2 |
build/lib/ThermoElectric/tests/test_tau_screened_coulomb.py | ariahosseini/Thermoelectric | 2 | 12788012 | """
Unit and regression test for the tau_screened_coulomb method.
"""
from ThermoElectric import tau_screened_coulomb
import numpy as np
from pytest import approx
def test_tau_screened_coulomb():
energy = np.array([[0.1]])
e_eff_mass = np.array([[0.23 * 9.109e-31]])
dielectric = 11.7
imp = np.array([[1e23]])
screen_len = np.array([[1e-7]])
expected_tau = 1.8e-10
calculated_tau = tau_screened_coulomb(energy=energy, mass_c=e_eff_mass,
n_imp=imp, dielectric=dielectric, screen_len = screen_len)
assert approx(expected_tau, abs=1e-11) == calculated_tau
| 2.3125 | 2 |
custom_components/hpprinter/managers/config_flow_manager.py | tiberiushunter/hassio-conf | 2 | 12788013 | <gh_stars>1-10
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.helpers import config_validation as cv
from .. import LoginError
from ..api.HPPrinterAPI import ProductConfigDynDataAPI
from ..helpers.const import *
from ..managers.configuration_manager import ConfigManager
from ..models import AlreadyExistsError
from ..models.config_data import ConfigData
_LOGGER = logging.getLogger(__name__)
_CONF_ARR = [CONF_NAME, CONF_HOST]
class ConfigFlowManager:
config_manager: ConfigManager
options: Optional[dict]
data: Optional[dict]
config_entry: ConfigEntry
def __init__(self, config_entry: Optional[ConfigEntry] = None):
self.config_entry = config_entry
self.options = None
self.data = None
self._pre_config = False
if config_entry is not None:
self._pre_config = True
self.update_data(self.config_entry.data)
self._is_initialized = True
self._auth_error = False
self._hass = None
def initialize(self, hass):
self._hass = hass
if not self._pre_config:
self.options = {}
self.data = {}
self.config_manager = ConfigManager()
self._update_entry()
@property
def config_data(self) -> ConfigData:
return self.config_manager.data
async def update_options(self, options: dict, update_entry: bool = False):
new_options = {}
validate_login = False
config_entries = None
if update_entry:
config_entries = self._hass.config_entries
data = self.config_entry.data
name_changed = False
for conf in _CONF_ARR:
if data.get(conf) != options.get(conf):
validate_login = True
if conf == CONF_NAME:
name_changed = True
if name_changed:
entries = config_entries.async_entries(DOMAIN)
for entry in entries:
entry_item: ConfigEntry = entry
if entry_item.unique_id == self.config_entry.unique_id:
continue
if options.get(CONF_NAME) == entry_item.data.get(CONF_NAME):
raise AlreadyExistsError(entry_item)
new_options = {}
for key in options:
new_options[key] = options[key]
if update_entry:
for conf in _CONF_ARR:
if conf in new_options:
self.data[conf] = new_options[conf]
del new_options[conf]
self.options = new_options
self._update_entry()
if validate_login:
errors = await self.valid_login()
if errors is None:
config_entries.async_update_entry(self.config_entry, data=self.data)
else:
raise LoginError(errors)
return new_options
def update_data(self, data: dict, update_entry: bool = False):
new_data = None
if data is not None:
new_data = {}
for key in data:
new_data[key] = data[key]
self.data = new_data
if update_entry:
self._update_entry()
def _update_entry(self):
entry = ConfigEntry(0, "", "", self.data, "", "", {}, options=self.options)
self.config_manager.update(entry)
@staticmethod
def get_default_data():
fields = {
vol.Required(CONF_NAME, default=DEFAULT_NAME): str,
vol.Required(CONF_HOST): str,
}
data_schema = vol.Schema(fields)
return data_schema
def get_default_options(self):
config_data = self.config_data
fields = {
vol.Required(CONF_NAME, default=config_data.name): str,
vol.Required(CONF_HOST, default=config_data.host): str,
vol.Optional(CONF_STORE_DATA, default=config_data.should_store): bool,
vol.Required(
CONF_UPDATE_INTERVAL, default=config_data.update_interval
): cv.positive_int,
vol.Required(CONF_LOG_LEVEL, default=config_data.log_level): vol.In(
LOG_LEVELS
),
}
data_schema = vol.Schema(fields)
return data_schema
async def valid_login(self):
errors = None
config_data = self.config_manager.data
api = ProductConfigDynDataAPI(self._hass, self.config_manager)
try:
await api.async_get(True)
except LoginError as ex:
_LOGGER.info(
f"Unable to access {DEFAULT_NAME} ({config_data.host}), HTTP Status Code {ex.status_code}"
)
status_code = ex.status_code
if status_code not in [400, 404]:
status_code = 400
errors = {"base": f"error_{status_code}"}
return errors
| 2.046875 | 2 |
maml_vs_adapted_maml_src/dataloaders/union_sl_dataloaders.py | brando90/Does-MAML-Only-Work-via-Feature-Re-use-A-Data-Set-Centric-Perspective | 0 | 12788014 | <reponame>brando90/Does-MAML-Only-Work-via-Feature-Re-use-A-Data-Set-Centric-Perspective<filename>maml_vs_adapted_maml_src/dataloaders/union_sl_dataloaders.py
"""
Union of data sets for SL training.
"""
from typing import Union
import torchvision
from torch import Tensor
from torch.utils.data import Dataset
from pathlib import Path
import torch
from task2vec import Task2Vec
from models import get_model
import datasets
import task_similarity
def get_datasets(root: Union[str, Path], dataset_names: list[str]) -> list[torchvision.datasets]:
import datasets
root: Path = Path(root).expanduser() if isinstance(root, str) else root.expanduser()
data_sets: list[torchvision.datasets] = [datasets.__dict__[name](root=root)[0] for name in dataset_names]
return data_sets
class UnionDatasets(Dataset):
"""
todo:
- bisect into the right data set
- make sure we are using the right split
"""
def __init__(self, root: Union[str, Path], dataset_names: list[str], split: str):
root: Path = Path(root).expanduser() if isinstance(root, str) else root.expanduser()
# - set fields
self.root: Path = root
self.dataset_names: list[str] = dataset_names
self.split
# - get data sets
self.data_sets: list[torchvision.datasets] = get_datasets(dataset_names, root)
def __len__(self):
total_numer_of_data_examples: int = sum([len(dataset) for dataset in self.data_sets])
return total_numer_of_data_examples
def __getitem__(self, idx: int):
pass
# - tests
def go_through_hdml1_test():
# - get data set list
# dataset_names = ('stl10', 'mnist', 'cifar10', 'cifar100', 'letters', 'kmnist')
# dataset_names = ('mnist',)
dataset_names = ('stl10', 'letters', 'kmnist')
root: Path = Path('~/data').expanduser()
print(f'{root=}')
dataset_list: list[torchvision.datasets] = [datasets.__dict__[name](root=root)[0] for name in dataset_names]
print(f'{dataset_list=}')
device = torch.device(f"cuda:{0}" if torch.cuda.is_available() else "cpu")
print(f'{device=}')
# - get union data loader
union_datasets: UnionDatasets = UnionDatasets(root, dataset_names)
# - go through the union data loader
if __name__ == '__main__':
go_through_hdml1_test()
print('Done!\n\a')
| 2.375 | 2 |
audiocards/__main__.py | ptrstn/anki-audiocards | 0 | 12788015 | <filename>audiocards/__main__.py
import argparse
from audiocards import __version__
from audiocards.core import create_deck
def parse_arguments():
parser = argparse.ArgumentParser(description="An Anki audio flash card generator")
parser.add_argument(
"--version", action="version", version="%(prog)s {}".format(__version__)
)
parser.add_argument("csv_path", help="Path to the csv file")
parser.add_argument("language", help="Language of flash cards")
parser.add_argument("deck_name", help="Name of the Anki deck")
parser.add_argument("--deck-id", help="Unique deck identifier")
parser.add_argument("--model-id", help="Unique model identifier")
return parser.parse_args()
def main():
args = parse_arguments()
print(args)
kwargs = {
"csv_path": args.csv_path,
"language": args.language,
"deck_name": args.deck_name,
}
if args.deck_id:
kwargs["deck_id"] = int(args.deck_id)
if args.model_id:
kwargs["model_id"] = int(args.model_id)
create_deck(**kwargs)
if __name__ == "__main__":
main()
| 3.03125 | 3 |
fishpass/migrations/0009_auto_20181003_1402.py | Ecotrust/FishPass | 3 | 12788016 | <reponame>Ecotrust/FishPass<filename>fishpass/migrations/0009_auto_20181003_1402.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-03 21:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fishpass', '0008_project_ownership_input'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='focus_region',
),
migrations.RemoveField(
model_name='project',
name='target_area',
),
migrations.AddField(
model_name='project',
name='target_area_input',
field=models.TextField(blank=True, default=None, help_text='list of FocusArea IDs that make up the target area geometry', null=True),
),
]
| 1.289063 | 1 |
text_to_command_conversion/model_creating.py | TolimanStaR/Virushack | 0 | 12788017 | import word2vec
from config import *
word2vec.word2phrase(filename_start, filename_phrases, verbose=True)
word2vec.word2vec(filename_phrases, filename_bin, size=100, verbose=True)
word2vec.word2clusters(filename_start, filename_clusters, 100, verbose=True)
| 2.28125 | 2 |
tests/test_metrics_updater.py | CloudWebManage/cwm-worker-operator | 2 | 12788018 | import json
import pytz
import datetime
from cwm_worker_operator import metrics_updater
from cwm_worker_operator import common
from .mocks.metrics import MockMetricsUpdaterMetrics
def iterate_redis_pools(dc):
for pool in ['ingress', 'internal', 'metrics']:
with getattr(dc, 'get_{}_redis'.format(pool))() as r:
yield r
def delete_all_redis_pools_keys(dc):
for r in iterate_redis_pools(dc):
for key in r.keys("*"):
r.delete(key)
def test_update_agg_metrics():
agg_metrics = {}
now = datetime.datetime(2020, 11, 5, 3, 0).astimezone(pytz.UTC)
metrics_updater.update_agg_metrics(agg_metrics, now, {}, limit=2)
assert agg_metrics == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': now.strftime("%Y%m%d%H%M%S")}]
}
for i in range(5):
now = now + datetime.timedelta(minutes=1)
metrics_updater.update_agg_metrics(agg_metrics, now, {}, limit=2)
assert agg_metrics == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': (now - datetime.timedelta(minutes=1)).strftime("%Y%m%d%H%M%S")},
{'t': now.strftime("%Y%m%d%H%M%S")}]
}
def test_update_release_metrics(domains_config, deployments_manager):
worker_id = 'worker1'
namespace_name = common.get_namespace_name_from_worker_id(worker_id)
aggregated_metrics_key = 'worker:aggregated-metrics:{}'.format(worker_id)
minio_metrics_base_key = 'deploymentid:minio-metrics:{}:'.format(namespace_name)
metrics_updater_metrics = MockMetricsUpdaterMetrics()
deployments_manager.prometheus_metrics[namespace_name] = {}
deployments_manager.kube_metrics[namespace_name] = {
'ram_limit_bytes': 0,
'ram_requests_bytes': 0
}
now = datetime.datetime(2020, 1, 5, 4, 3, 2).astimezone(pytz.UTC)
delete_all_redis_pools_keys(domains_config)
domains_config._set_mock_volume_config(worker_id)
# no aggregated metrics, no current metrics - aggregated metrics are updated with empty metrics for current minute
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': now.strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0}]
}
# fast forward 61 seconds, another empty current metric is recorded in aggregated metrics
now = now + datetime.timedelta(seconds=61)
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': (now-datetime.timedelta(seconds=61)).strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0},
{'t': now.strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0}]
}
# clear all keys and set some current metrics (cpu and ram) - they are added to aggregated metrics
with domains_config.get_internal_redis() as r:
[r.delete(key) for key in [aggregated_metrics_key]]
with domains_config.get_metrics_redis() as r:
[r.delete(key) for key in r.keys(minio_metrics_base_key + '*')]
r.set(minio_metrics_base_key+'cpu', '500')
r.set(minio_metrics_base_key+'ram', '700.5')
now = now + datetime.timedelta(seconds=61)
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': now.strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 500, 'ram': 700.5}]
}
# set different current metrics and fast-forward 61 seconds - they are appended to the aggregated metrics
# in this case we also set the cpu and ram in different buckets which are also summed as all metrics for each bucket are summed
# we also add some prometheus metrics this time
deployments_manager.prometheus_metrics[namespace_name] = {
'cpu_seconds': '1234',
'ram_bytes': '5678'
}
with domains_config.get_metrics_redis() as r:
r.set(minio_metrics_base_key + 'cpu', '600')
r.set(minio_metrics_base_key + 'ram', '800.5')
now = now + datetime.timedelta(seconds=61)
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [
{'t': (now-datetime.timedelta(seconds=61)).strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 500.0, 'ram': 700.5},
{
't': now.strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 600.0, 'ram': 800.5,
'cpu_seconds': '1234', 'ram_bytes': '5678'
}
]
}
# fast forward 50 seconds (less than 1 minute), aggregated metrics are not updated
now = now + datetime.timedelta(seconds=50)
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': (now - datetime.timedelta(seconds=50)).strftime("%Y%m%d%H%M%S"),
'm': [
{'t': (now - datetime.timedelta(seconds=50+61)).strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 500.0, 'ram': 700.5},
{
't': (now - datetime.timedelta(seconds=50)).strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 600.0, 'ram': 800.5,
'cpu_seconds': '1234', 'ram_bytes': '5678'
}
]
}
| 2.015625 | 2 |
api/viewsets.py | thinkAmi-sandbox/django-rules-sample | 0 | 12788019 | from rest_framework import mixins
from rest_framework.viewsets import ReadOnlyModelViewSet, GenericViewSet
from rules.contrib.rest_framework import AutoPermissionViewSetMixin
from .serializers import NewsSerializer
from myapp.models import DrfNews
class NewsReadOnlyModelViewSet(AutoPermissionViewSetMixin, ReadOnlyModelViewSet):
queryset = DrfNews.objects.all()
serializer_class = NewsSerializer
class NewsRetrieveModelViewSet(AutoPermissionViewSetMixin,
mixins.RetrieveModelMixin,
GenericViewSet):
queryset = DrfNews.objects.all()
serializer_class = NewsSerializer
| 1.867188 | 2 |
neurokit2/ecg/ecg_fixpeaks.py | TiagoTostas/NeuroKit | 1 | 12788020 | # - * - coding: utf-8 - * -
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches
def ecg_fixpeaks(rpeaks, sampling_rate=1000, iterative=True, show=False):
"""Correct R-peaks location based on their interval (RRi).
Identify erroneous inter-beat-intervals. Lipponen & Tarvainen (2019).
Parameters
----------
rpeaks : dict
The samples at which the R-peak occur. Dict returned by
`ecg_findpeaks()`.
sampling_rate : int
The sampling frequency of the signal that contains the peaks (in Hz,
i.e., samples/second).
iterative : bool
Whether or not to apply the artifact correction repeatedly (results
in superior artifact correction).
show : bool
Whether or not to visualize artifacts and artifact thresholds.
Returns
-------
artifacts : dict
A dictionary containing the indices of artifacts, accessible with the
keys "ectopic", "missed", "extra", and "longshort".
See Also
--------
ecg_clean, ecg_findpeaks, ecg_peaks, ecg_rate, ecg_process, ecg_plot
Examples
--------
>>> import neurokit2 as nk
>>> import matplotlib.pyplot as plt
>>> ecg = nk.ecg_simulate(duration=240, noise=0.1, heart_rate=70,
>>> random_state=41)
>>> rpeaks_uncorrected = nk.ecg_findpeaks(ecg)
>>> artifacts, rpeaks_corrected = nk.ecg_fixpeaks(rpeaks_uncorrected,
>>> iterative=True,
>>> show=True)
>>> rate_corrected = nk.ecg_rate(rpeaks_uncorrected,
>>> desired_length=len(ecg))
>>> rate_uncorrected = nk.ecg_rate(rpeaks, desired_length=len(ecg_signal))
>>>
>>> fig, ax = plt.subplots()
>>> ax.plot(rate_uncorrected, label="heart rate without artifact correction")
>>> ax.plot(rate_corrected, label="heart rate with artifact correction")
>>> ax.legend(loc="upper right")
References
----------
- <NAME>., & <NAME>. (2019). A robust algorithm for heart
rate variability time series artefact correction using novel beat
classification. Journal of medical engineering & technology, 43(3),
173-181. 10.1080/03091902.2019.1640306
"""
# Format input.
rpeaks = rpeaks["ECG_R_Peaks"]
# Get corrected peaks and normal-to-normal intervals.
artifacts, subspaces = _find_artifacts(rpeaks, sampling_rate=sampling_rate)
peaks_clean = _correct_artifacts(artifacts, rpeaks)
if iterative:
# Iteratively apply the artifact correction until the number of artifact
# reaches an equilibrium (i.e., the number of artifacts does not change
# anymore from one iteration to the next).
n_artifacts_previous = np.inf
n_artifacts_current = sum([len(i) for i in artifacts.values()])
previous_diff = 0
while n_artifacts_current - n_artifacts_previous != previous_diff:
previous_diff = n_artifacts_previous - n_artifacts_current
artifacts, subspaces = _find_artifacts(peaks_clean,
sampling_rate=sampling_rate)
peaks_clean = _correct_artifacts(artifacts, peaks_clean)
n_artifacts_previous = n_artifacts_current
n_artifacts_current = sum([len(i) for i in artifacts.values()])
if show:
_plot_artifacts_lipponen2019(artifacts, subspaces)
return artifacts, {"ECG_R_Peaks": peaks_clean}
# =============================================================================
# Lipponen & Tarvainen (2019).
# =============================================================================
def _find_artifacts(rpeaks, c1=0.13, c2=0.17, alpha=5.2, window_width=91,
medfilt_order=11, sampling_rate=1000):
peaks = np.ravel(rpeaks)
# Compute period series (make sure it has same numer of elements as peaks);
# peaks are in samples, convert to seconds.
rr = np.ediff1d(peaks, to_begin=0) / sampling_rate
# For subsequent analysis it is important that the first element has
# a value in a realistic range (e.g., for median filtering).
rr[0] = np.mean(rr[1:])
# Artifact identification #################################################
###########################################################################
# Compute dRRs: time series of differences of consecutive periods (dRRs).
drrs = np.ediff1d(rr, to_begin=0)
drrs[0] = np.mean(drrs[1:])
# Normalize by threshold.
th1 = _compute_threshold(drrs, alpha, window_width)
drrs /= th1
# Cast dRRs to subspace s12.
# Pad drrs with one element.
padding = 2
drrs_pad = np.pad(drrs, padding, "reflect")
s12 = np.zeros(drrs.size)
for d in np.arange(padding, padding + drrs.size):
if drrs_pad[d] > 0:
s12[d - padding] = np.max([drrs_pad[d - 1], drrs_pad[d + 1]])
elif drrs_pad[d] < 0:
s12[d - padding] = np.min([drrs_pad[d - 1], drrs_pad[d + 1]])
# Cast dRRs to subspace s22.
s22 = np.zeros(drrs.size)
for d in np.arange(padding, padding + drrs.size):
if drrs_pad[d] >= 0:
s22[d - padding] = np.min([drrs_pad[d + 1], drrs_pad[d + 2]])
elif drrs_pad[d] < 0:
s22[d - padding] = np.max([drrs_pad[d + 1], drrs_pad[d + 2]])
# Compute mRRs: time series of deviation of RRs from median.
df = pd.DataFrame({'signal': rr})
medrr = df.rolling(medfilt_order, center=True,
min_periods=1).median().signal.to_numpy()
mrrs = rr - medrr
mrrs[mrrs < 0] = mrrs[mrrs < 0] * 2
# Normalize by threshold.
th2 = _compute_threshold(mrrs, alpha, window_width)
mrrs /= th2
# Artifact classification #################################################
###########################################################################
# Artifact classes.
extra_idcs = []
missed_idcs = []
ectopic_idcs = []
longshort_idcs = []
i = 0
while i < rr.size - 2: # The flow control is implemented based on Figure 1
if np.abs(drrs[i]) <= 1: # Figure 1
i += 1
continue
eq1 = np.logical_and(drrs[i] > 1, s12[i] < (-c1 * drrs[i] - c2)) # Figure 2a
eq2 = np.logical_and(drrs[i] < -1, s12[i] > (-c1 * drrs[i] + c2)) # Figure 2a
if np.any([eq1, eq2]):
# If any of the two equations is true.
ectopic_idcs.append(i)
i += 1
continue
# If none of the two equations is true.
if ~np.any([np.abs(drrs[i]) > 1, np.abs(mrrs[i]) > 3]): # Figure 1
i += 1
continue
longshort_candidates = [i]
# Check if the following beat also needs to be evaluated.
if np.abs(drrs[i + 1]) < np.abs(drrs[i + 2]):
longshort_candidates.append(i + 1)
for j in longshort_candidates:
# Long beat.
eq3 = np.logical_and(drrs[j] > 1, s22[j] < -1) # Figure 2b
# Long or short.
eq4 = np.abs(mrrs[j]) > 3 # Figure 1
# Short beat.
eq5 = np.logical_and(drrs[j] < -1, s22[j] > 1) # Figure 2b
if ~np.any([eq3, eq4, eq5]):
# If none of the three equations is true: normal beat.
i += 1
continue
# If any of the three equations is true: check for missing or extra
# peaks.
# Missing.
eq6 = np.abs(rr[j] / 2 - medrr[j]) < th2[j] # Figure 1
# Extra.
eq7 = np.abs(rr[j] + rr[j + 1] - medrr[j]) < th2[j] # Figure 1
# Check if extra.
if np.all([eq5, eq7]):
extra_idcs.append(j)
i += 1
continue
# Check if missing.
if np.all([eq3, eq6]):
missed_idcs.append(j)
i += 1
continue
# If neither classified as extra or missing, classify as "long or
# short".
longshort_idcs.append(j)
i += 1
# Prepare output
artifacts = {"ectopic": ectopic_idcs, "missed": missed_idcs,
"extra": extra_idcs, "longshort": longshort_idcs}
subspaces = {"rr": rr, "drrs": drrs, "mrrs": mrrs, "s12": s12, "s22": s22,
"c1": c1, "c2": c2}
return artifacts, subspaces
def _compute_threshold(signal, alpha, window_width):
df = pd.DataFrame({'signal': np.abs(signal)})
q1 = df.rolling(window_width, center=True,
min_periods=1).quantile(.25).signal.to_numpy()
q3 = df.rolling(window_width, center=True,
min_periods=1).quantile(.75).signal.to_numpy()
th = alpha * ((q3 - q1) / 2)
return th
def _correct_artifacts(artifacts, peaks):
# Artifact correction
#####################
# The integrity of indices must be maintained if peaks are inserted or
# deleted: for each deleted beat, decrease indices following that beat in
# all other index lists by 1. Likewise, for each added beat, increment the
# indices following that beat in all other lists by 1.
extra_idcs = artifacts["extra"]
missed_idcs = artifacts["missed"]
ectopic_idcs = artifacts["ectopic"]
longshort_idcs = artifacts["longshort"]
# Delete extra peaks.
if extra_idcs:
peaks = _correct_extra(extra_idcs, peaks)
# Update remaining indices.
missed_idcs = _update_indices(extra_idcs, missed_idcs, -1)
ectopic_idcs = _update_indices(extra_idcs, ectopic_idcs, -1)
longshort_idcs = _update_indices(extra_idcs, longshort_idcs, -1)
# Add missing peaks.
if missed_idcs:
peaks = _correct_missed(missed_idcs, peaks)
# Update remaining indices.
ectopic_idcs = _update_indices(missed_idcs, ectopic_idcs, 1)
longshort_idcs = _update_indices(missed_idcs, longshort_idcs, 1)
if ectopic_idcs:
peaks = _correct_misaligned(ectopic_idcs, peaks)
if longshort_idcs:
peaks = _correct_misaligned(longshort_idcs, peaks)
return peaks
def _correct_extra(extra_idcs, peaks):
corrected_peaks = peaks.copy()
corrected_peaks = np.delete(corrected_peaks, extra_idcs)
return corrected_peaks
def _correct_missed(missed_idcs, peaks):
corrected_peaks = peaks.copy()
missed_idcs = np.array(missed_idcs)
# Calculate the position(s) of new beat(s). Make sure to not generate
# negative indices. prev_peaks and next_peaks must have the same
# number of elements.
valid_idcs = np.logical_and(missed_idcs > 1,
missed_idcs < len(corrected_peaks))
missed_idcs = missed_idcs[valid_idcs]
prev_peaks = corrected_peaks[[i - 1 for i in missed_idcs]]
next_peaks = corrected_peaks[missed_idcs]
added_peaks = prev_peaks + (next_peaks - prev_peaks) / 2
# Add the new peaks before the missed indices (see numpy docs).
corrected_peaks = np.insert(corrected_peaks, missed_idcs, added_peaks)
return corrected_peaks
def _correct_misaligned(misaligned_idcs, peaks):
corrected_peaks = peaks.copy()
misaligned_idcs = np.array(misaligned_idcs)
# Make sure to not generate negative indices, or indices that exceed
# the total number of peaks. prev_peaks and next_peaks must have the
# same number of elements.
valid_idcs = np.logical_and(misaligned_idcs > 1,
misaligned_idcs < len(corrected_peaks))
misaligned_idcs = misaligned_idcs[valid_idcs]
prev_peaks = corrected_peaks[[i - 1 for i in misaligned_idcs]]
next_peaks = corrected_peaks[[i + 1 for i in misaligned_idcs]]
half_ibi = (next_peaks - prev_peaks) / 2
peaks_interp = prev_peaks + half_ibi
# Shift the R-peaks from the old to the new position.
corrected_peaks = np.delete(corrected_peaks, misaligned_idcs)
corrected_peaks = np.concatenate((corrected_peaks,
peaks_interp)).astype(int)
corrected_peaks.sort(kind="mergesort")
return corrected_peaks
def _update_indices(source_idcs, update_idcs, update):
"""
For every element s in source_idcs, change every element u in update_idcs
according to update, if u is larger than s.
"""
if not update_idcs:
return update_idcs
for s in source_idcs:
update_idcs = [u + update if u > s else u for u in update_idcs]
return update_idcs
def _plot_artifacts_lipponen2019(artifacts, info):
"""
"""
# Extract parameters
longshort_idcs = artifacts["longshort"]
ectopic_idcs = artifacts["ectopic"]
extra_idcs = artifacts["extra"]
missed_idcs = artifacts["missed"]
rr = info["rr"]
drrs = info["drrs"]
mrrs = info["mrrs"]
s12 = info["s12"]
s22 = info["s22"]
c1 = info["c1"]
c2 = info["c2"]
# Visualize artifact type indices.
# Set grids
gs = matplotlib.gridspec.GridSpec(ncols=4, nrows=3,
width_ratios=[1, 2, 2, 2])
fig = plt.figure(constrained_layout=False)
ax0 = fig.add_subplot(gs[0, :-2])
ax1 = fig.add_subplot(gs[1, :-2])
ax2 = fig.add_subplot(gs[2, :-2])
ax3 = fig.add_subplot(gs[:, -1])
ax4 = fig.add_subplot(gs[:, -2])
ax0.set_title("Artifact types", fontweight="bold")
ax0.plot(rr, label="heart period")
ax0.scatter(longshort_idcs, rr[longshort_idcs], marker='x', c='m',
s=100, zorder=3, label="long/short")
ax0.scatter(ectopic_idcs, rr[ectopic_idcs], marker='x', c='g', s=100,
zorder=3, label="ectopic")
ax0.scatter(extra_idcs, rr[extra_idcs], marker='x', c='y', s=100,
zorder=3, label="false positive")
ax0.scatter(missed_idcs, rr[missed_idcs], marker='x', c='r', s=100,
zorder=3, label="false negative")
ax0.legend(loc="upper right")
# Visualize first threshold.
ax1.set_title("Consecutive-difference criterion", fontweight="bold")
ax1.plot(np.abs(drrs), label="difference consecutive heart periods")
ax1.axhline(1, c='r', label="artifact threshold")
ax1.legend(loc="upper right")
# Visualize second threshold.
ax2.set_title("Difference-from-median criterion", fontweight="bold")
ax2.plot(np.abs(mrrs), label="difference from median over 11 periods")
ax2.axhline(3, c="r", label="artifact threshold")
ax2.legend(loc="upper right")
# Visualize subspaces.
ax4.set_title("Subspace 1", fontweight="bold")
ax4.set_xlabel("S11")
ax4.set_ylabel("S12")
ax4.scatter(drrs, s12, marker="x", label="heart periods")
verts0 = [(min(drrs), max(s12)),
(min(drrs), -c1 * min(drrs) + c2),
(-1, -c1 * -1 + c2),
(-1, max(s12))]
poly0 = matplotlib.patches.Polygon(verts0, alpha=0.3, facecolor="r",
edgecolor=None, label="ectopic periods")
ax4.add_patch(poly0)
verts1 = [(1, -c1 * 1 - c2),
(1, min(s12)),
(max(drrs), min(s12)),
(max(drrs), -c1 * max(drrs) - c2)]
poly1 = matplotlib.patches.Polygon(verts1, alpha=0.3, facecolor="r",
edgecolor=None)
ax4.add_patch(poly1)
ax4.legend(loc="upper right")
ax3.set_title("Subspace 2", fontweight="bold")
ax3.set_xlabel("S21")
ax3.set_ylabel("S22")
ax3.scatter(drrs, s22, marker="x", label="heart periods")
verts2 = [(min(drrs), max(s22)),
(min(drrs), 1),
(-1, 1),
(-1, max(s22))]
poly2 = matplotlib.patches.Polygon(verts2, alpha=0.3, facecolor="r",
edgecolor=None, label="short periods")
ax3.add_patch(poly2)
verts3 = [(1, -1),
(1, min(s22)),
(max(drrs), min(s22)),
(max(drrs), -1)]
poly3 = matplotlib.patches.Polygon(verts3, alpha=0.3, facecolor="y",
edgecolor=None, label="long periods")
ax3.add_patch(poly3)
ax3.legend(loc="upper right")
| 2.96875 | 3 |
lib/BlockerChecker.py | geirem/python-bom-tracker | 0 | 12788021 | import json
from lib.Component import Component
class BlockerChecker:
def __init__(self, file: str):
self.__blocked = {}
self.__populate_blocked_list(file)
def __populate_blocked_list(self, file: str):
with open(file, 'r') as inimage:
blocked_list = json.load(inimage)
for blocked in blocked_list:
component = Component(blocked['purl'])
self.__blocked[component.get_purl()] = component
def check(self, components: list) -> list:
return [x for x in components if x.get_purl() in self.__blocked]
| 3.015625 | 3 |
fileprovider/utils.py | sideffect0/django-fileprovider | 4 | 12788022 | <filename>fileprovider/utils.py<gh_stars>1-10
from django.http import HttpResponse
def sendfile(path, view_mode=False, download_mode=False, download_fname=None):
"""
Create a xfile compatible Django response
:param str path: absolute path to file
:param view_mode: inform web browser/client the file can be displayed
:param download_mode: inform web browser/client the file should be downloaded
:param download_fname: provide a different filename for better UX ( when `download_only` option is set)
"""
response = HttpResponse()
response["X-File"] = path
content_dispos_optn = {"v_mod": "inline", "d_mod": "attachment"}
content_dispos = content_dispos_optn.get(view_mode and "v_mod", None)
content_dispos = content_dispos or content_dispose_optn.get(
download_mode and "d_mod", None
)
content_dispos = (
(content_dispos + '; filename="%s"' % (download_fname))
if (download_mode and download_fname)
else content_dispos
)
if content_dispos is not None:
response["Content-Disposition"] = content_dispos
return response
| 2.640625 | 3 |
crons/navitron_crons/exceptions.py | j9ac9k/navitron | 2 | 12788023 | """exceptions.py: navitron-cron custom exceptions"""
class NavitronCronException(Exception):
"""base exception for navitron-cron project"""
pass
class ConnectionException(NavitronCronException):
"""base exception for connections issues"""
pass
class FatalCLIExit(NavitronCronException):
"""general exception for fatal issue"""
pass
class MissingMongoConnectionInfo(ConnectionException):
"""Unable to connect to mongo, missing connection info"""
pass
class NoSDEDataFound(ConnectionException):
"""Blank collection found where SDE was expected"""
pass
| 2.40625 | 2 |
test_cli/offline_docs/test_cli.py | top-on/offline-docs-py | 0 | 12788024 | """Functional tests of CLI."""
from offline_docs.cli import clean, python
def test_python():
"""Run 'python' command of CLI."""
python()
def test_clean():
"""Run 'clean' command of CLI."""
clean()
| 1.460938 | 1 |
categories/models.py | ajmasia/wordplease | 0 | 12788025 | from django.contrib.auth.models import User
from django.db import models
class Category(models.Model):
"""
Categories data model per user/blog
"""
owner = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=150)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = 'Categories'
def __str__(self):
"""
Define like an object is showed in admin panel
:return: category name
"""
return '{0}'.format(self.name) | 2.6875 | 3 |
reflexif/framework/depresolver.py | chschmitt/reflexif | 0 | 12788026 | # -*- coding: utf-8 -*-
'''
.. created on 06.09.2016
.. by <NAME>
'''
from __future__ import print_function, absolute_import, division, unicode_literals
from reflexif.compat import *
class Resolver(object):
def __init__(self, nodes, expand):
self.nodes = nodes
self.expand = expand
self.depth_cache = {}
self.misses = 0
self.hits = 0
def depth(self, node):
return self._depth(node)
def _depth(self, node, startnode=None):
if node in self.depth_cache:
self.hits += 1
return self.depth_cache[node]
else:
self.misses += 1
if startnode is None:
startnode = node
elif node is startnode:
raise ValueError('%r has circular dependency' % node)
children = self.expand(node)
if children:
res = 1 + max(self._depth(c, startnode) for c in children)
else:
res = 0
self.depth_cache[node] = res
return res
def dependecy_sorted(self):
return sorted(self.nodes, key=self.depth)
def depsort(nodes, expand):
return Resolver(nodes, expand).dependecy_sorted()
| 2.703125 | 3 |
dst/survey/management/commands/systemcheck.py | Ecotrust/floodplain-restoration | 2 | 12788027 | <gh_stars>1-10
from django.core.management.base import BaseCommand, CommandError
from survey.validate import systemcheck, SystemCheckError
class Command(BaseCommand):
help = 'Checks the current system for data integrity'
def handle(self, *args, **options):
try:
systemcheck()
except SystemCheckError as error:
raise CommandError(error)
self.stdout.write('Successfully checked system!')
| 2.46875 | 2 |
listener.py | blacktrub/simple_git_hook_listener | 1 | 12788028 | <reponame>blacktrub/simple_git_hook_listener
import os
import subprocess
import configparser
from bottle import route, run
PATH = os.path.dirname(os.path.abspath(__file__))
TOKEN = None
@route('/deploy/<token>', method='GET')
@route('/deploy/<token>', method='POST')
def view(token):
if token != TOKEN:
return 'no ok'
subprocess.Popen(os.path.join(PATH, 'deploy.sh'), shell=True)
return 'ok'
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read(os.path.join(PATH, 'config.ini'))
port = config['settings']['port']
TOKEN = config['settings']['token']
run(host='localhost', port=port)
| 2.25 | 2 |
testapp/main.py | RobotHanzo/flaskfilemanager | 21 | 12788029 | <reponame>RobotHanzo/flaskfilemanager
"""
Main blueprint for test app
"""
import logging
from flask import Blueprint, render_template
__author__ = '<NAME> (Little Fish Solutions LTD)'
log = logging.getLogger(__name__)
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template('index.html')
| 1.867188 | 2 |
mw/lib/title/tests/test_functions.py | frankier/python-mediawiki-utilities | 23 | 12788030 | from nose.tools import eq_
from ..functions import normalize
def test_normalize():
eq_("Foobar", normalize("Foobar")) # Same
eq_("Foobar", normalize("foobar")) # Capitalize
eq_("FooBar", normalize("fooBar")) # Late capital
eq_("Foo_bar", normalize("Foo bar")) # Space
| 2.328125 | 2 |
client.py | ChargedMonk/Server-Client-communication-using-UDP-Protocol | 0 | 12788031 | import socket
def sendmsg(msgFromClient):
bytesToSend = str.encode(msgFromClient)
serverAddressPort = ("127.0.0.1", 20001)
bufferSize = 5120
UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
UDPClientSocket.sendto(bytesToSend, serverAddressPort)
msgFromServer = UDPClientSocket.recvfrom(bufferSize)[0]
return msgFromServer.decode("utf-8")
| 2.578125 | 3 |
output/xml_out.py | pan-webis-de/duan20 | 1 | 12788032 | from xml.etree.ElementTree import Element,ElementTree,tostring
def create_author(autor_id,language,author_type):
author =Element('author', id =autor_id,lang =language, type= author_type)
return author
def write_authors():
author1=create_author("1","en","0")
author2=create_author("2","en","0")
with open('out.xml','a') as f:
f.write(tostring(author2).decode("utf-8") + "\n")
f.write(tostring(author1).decode("utf-8") +"\n" )
write_authors() | 3 | 3 |
bmcs_beam/mxn/matresdev/db/exdb/loadtxt_novalue.py | bmcs-group/bmcs_beam | 1 | 12788033 | <reponame>bmcs-group/bmcs_beam
'''
Created on Apr 9, 2010
@author: alexander
'''
from numpy import array, vstack, zeros
from os.path import \
join
from matresdev.db.simdb.simdb import simdb
# The implementation works but is too time consuming.
# @todo: check for a faster and more simple solution!
def loadtxt_novalue(file_name):
'''Return an data array similar to loadtxt.
"NOVALUE" entries are replaced by the value of the previous line.
'''
file = open(file_name, 'r')
lines = file.readlines()
n_columns = len(lines[0].split(';'))
data_array = zeros(n_columns)
n = 0
for line in lines:
line_split = lines[n].split(';')
m = 0
for value in line_split:
if value == 'NOVALUE':
print('---------------------------------------------------------------')
print('NOVALUE entry in line', n, 'position', m, 'found')
print('For faster processing replace values directly in the data file!')
print('---------------------------------------------------------------')
line_split[m] = lines[n - 1].split(';')[m]
m += 1
line_array = array(line_split, dtype=float)
data_array = vstack([data_array, line_array])
n += 1
return data_array
if __name__ == '__main__':
ex_path = join(
simdb.exdata_dir, 'plate_tests', 'PT-10a', 'PT11-10a_original.ASC')
data_array = loadtxt_novalue(ex_path)
print('\n')
print('data_array', data_array)
| 2.140625 | 2 |
example_11.py | iljuhas7/lab-15 | 0 | 12788034 | <filename>example_11.py
import os
os.mkdir("NewDir")
| 1.570313 | 2 |
src/estimate_cutoff.py | hjhornbeck/fsg_token_speedup | 0 | 12788035 | <reponame>hjhornbeck/fsg_token_speedup
#!/usr/bin/env python3
from math import ceil, log, log1p
import re
import sys
token = re.compile(r'^(..)(..)-[\da-f]{16}-[\da-f]{16}-(.*)-(.*)-[\da-f]+$', re.IGNORECASE)
db = dict()
for line in sys.stdin:
match = token.search(line)
if match is None:
continue
version = int( match.group(1), 16 )
filterS = int( match.group(2), 16 )
biomes = int( match.group(4), 16 )
structs = int( match.group(3), 16 ) - biomes
if version not in db:
db[version] = dict()
if filterS not in db[version]:
db[version][filterS] = [0.5, 0, 0.5]
db[version][filterS][0] += 1 # alpha for both conjugate priors
db[version][filterS][1] += ((biomes - 1) % 65536) + 1 # beta, finding biome
db[version][filterS][2] += max((biomes-1) // 65536, 0) # beta, biome possible
# print out the updated estimate for this version
print( f"uint32_t biome_cutoffs[{len(db[version])+1}] = " + "{", end='' )
for key in sorted( db[version].keys() ):
fb_mean = db[version][key][0] / db[version][key][1]
bp_mean = db[version][key][0] / (db[version][key][0] + db[version][key][2])
try:
cutoff = ceil( (log1p( -bp_mean ) - log( bp_mean ))/log1p( -fb_mean ) )
except:
cutoff = 65536
print( f"{cutoff}, ", end='' )
print( "0};\t// " + f"VERSION == {version}", flush=True )
# print( f"// DEBUG: ({version},{key}) = {db[version][key]}" )
| 2.21875 | 2 |
ryzen/launcher.py | akhilguliani/daemon | 0 | 12788036 | <gh_stars>0
""" Helper functions to parse and launch programs presented in an input file """
import os
from multiprocessing import Process
from time import time
import shlex
import subprocess
import psutil
def parse_file(file_path):
"""Parse input file and return list of programs with thier dir annd shares"""
retval = []
with open(file_path) as pfile:
count = 1
local = []
for line in pfile:
if line[0] == '#':
# Adding comments in file
continue
if "@" in line:
if local != []:
retval.append(local)
local = []
count = 1
continue
if count == 1:
# append directory
local.append(line.strip())
elif count == 2:
# extract CMD line parameters
local.append(list(shlex.split(line.strip())))
elif count == 3:
# extract shares
shares = int(line.strip())
if shares < 0:
shares = 0
if shares > 100:
shares = 100
local.append(shares)
elif count == 4:
# extract shares
prio = None
theline = line.strip()
if theline == "High":
prio = -19
elif theline == "Medium":
prio = 0
elif theline == "Low":
prio = 20
local.append(prio)
# print(local)
elif count == 5:
# extract perf normalization value
local.append(float(line.strip()))
# print(local)
count += 1
# print("__\n", retval)
return retval
def launch_on_core(process_info, cpu=0):
""" Take the output from parse_file and launch the process on a core=cpu """
pcwd = process_info[0]
pargs = process_info[1]
ret = psutil.Popen(args=pargs, cwd=pcwd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
ret.cpu_affinity([cpu])
ret.nice(process_info[3]) # if we need to add priorities
return ret
def launch_on_multiple_cores(process_info, cpus=[0]):
""" Take the output from parse_file and launch the process with affinity to cores=[cpus] """
pcwd = process_info[0]
pargs = process_info[1]
ret = psutil.Popen(args=pargs, cwd=pcwd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
ret.cpu_affinity(cpus)
ret.nice(process_info[3]) # if we need to add priorities
return ret
def run_on_core(process_info, cpu=0):
""" Take the output from parse_file and launch the process on a core=cpu """
p = launch_on_core(process_info, cpu)
p_dict_loc = p.as_dict()
proc_dict = {}
proc_dict[p_dict_loc['pid']] = p_dict_loc
def print_time(proc):
""" Print Process Info on compeletion """
end_time = time()
p_dic = proc_dict[proc.pid]
print(p_dic['name'], p_dic['pid'], p_dic['cpu_num'], str(end_time - p_dic['create_time']))
psutil.wait_procs([p], timeout=None, callback=print_time)
return
def run_on_multiple_cores(process_info, cpus=[i for i in range(8)]):
""" Take the output from parse_file and launch the process on a cores=[cpus] """
p = launch_on_multiple_cores(process_info, cpus)
p_dict_loc = p.as_dict()
proc_dict = {}
proc_dict[p_dict_loc['pid']] = p_dict_loc
def print_time(proc):
""" Print Process Info on compeletion """
end_time = time()
p_dic = proc_dict[proc.pid]
print(p_dic['name'], p_dic['pid'], p_dic['cpu_num'], str(end_time - p_dic['create_time']))
psutil.wait_procs([p], timeout=None, callback=print_time)
return
def wait_for_procs(procs, callback_fn):
gone, alive = psutil.wait_procs(procs, timeout=None, callback=callback_fn)
for _p in alive:
_p.kill()
def run_on_all_cores(process_info, cores=[0]):
""" Take the output from parse_file and launch the process on a cores=cpu """
p_list = []
proc_dict = {}
for cpu in cores:
p = launch_on_core(process_info, cpu)
p_dict_loc = p.as_dict()
proc_dict[p_dict_loc['pid']] = p_dict_loc
p_list.append(p)
def print_time(proc):
""" Print Process Info on compeletion """
end_time = time()
p_dic = proc_dict[proc.pid]
print(p_dic['name'], p_dic['pid'], p_dic['cpu_num'], str(end_time - p_dic['create_time']))
psutil.wait_procs(p_list, timeout=None, callback=print_time)
return
def run_on_core_forever(process_info, cpu=0):
""" Take the output from parse_file and launch the process on a core=cpu """
p = launch_on_core(process_info, cpu)
def restart(proc):
""" Infinate recursive callback"""
print("restarting ", str(proc.pid))
run_on_core_forever(process_info, cpu)
psutil.wait_procs([p], timeout=None, callback=restart)
return
def run_multiple_on_cores(process_info_list, cores=None):
""" Take the output from parse_file and launch the processes on cores=[cpu,...] """
# Ensure size of cores and process_info_list is same
if len(process_info_list) > psutil.cpu_count(logical=False):
print("More Processess than cores, can't run em all")
exit(1)
# one more check for len(process_info_list) == len(cores)
if cores is None:
cores = range(len(process_info_list))
p_list = []
proc_dict = {}
for cpu, process_info in zip(cores, process_info_list):
p = launch_on_core(process_info, cpu)
p_dict_loc = p.as_dict()
proc_dict[p_dict_loc['pid']] = p_dict_loc
p_list.append(p)
def print_time(proc):
""" Print Process Info on compeletion """
end_time = time()
p_dic = proc_dict[proc.pid]
print(p_dic['name'], p_dic['pid'], p_dic['cpu_num'], str(end_time - p_dic['create_time']))
psutil.wait_procs(p_list, timeout=None, callback=print_time)
return
def run_on_cores_restart(process_info_list, copies=1, cores=None, rstrt_even=False):
""" Take the output from parse_file and launch the processes on cores=[cpu,...] """
# Ensure size of cores and process_info_list is same
num_procs = len(process_info_list)
if num_procs > 2:
print("More than 2 processes")
exit(1)
# one more check for len(process_info_list) == len(cores)
if cores is None:
cores = range(copies)
restarted = []
p_list = []
proc_dict = {}
for cpu in cores:
process_info = process_info_list[cpu % num_procs]
p = launch_on_core(process_info, cpu)
p_dict_loc = p.as_dict()
p_dict_loc['work_info'] = process_info_list[cpu % num_procs]
proc_dict[p_dict_loc['pid']] = p_dict_loc
p_list.append(p)
def print_time(proc):
""" Print Process Info on compeletion """
end_time = time()
p_dic = proc_dict[proc.pid]
print(p_dic['name'], p_dic['pid'], p_dic['cpu_num'], str(end_time - p_dic['create_time']))
_p_rst = None
if rstrt_even and p_dic['cpu_num']%2 == 0:
_p_rst = Process(target=run_on_core_forever, args=(process_info_list[p_dic['cpu_num'] % num_procs], p_dic['cpu_num']))
_p_rst.start()
restarted.append(_p_rst)
gone, alive = psutil.wait_procs(p_list, timeout=None, callback=print_time)
for _p in alive:
_p.kill()
if len(restarted) >= 1:
# kill all restrted processes
for _proc in restarted:
try:
_proc.terminate()
except:
pass
return
def run_on_multiple_cores_forever(process_info_list, cores=None):
""" Take the output from parse_file and launch the processes on cores=[cpu,...] """
# check if proc list is None
if process_info_list is None:
return
# one more check for len(process_info_list) == len(cores)
if cores is None:
cores = [i*2 for i in range(len(process_info_list))]
restarted = []
for i, cpu in enumerate(cores):
process_info = process_info_list[i]
_p_rst = None
_p_rst = Process(target=run_on_core_forever, args=(process_info, cpu))
_p_rst.start()
restarted.append(_p_rst)
return
| 3.015625 | 3 |
superai/cli.py | mysuperai/superai-sdk | 1 | 12788037 | import os
import boto3
import click
import json
import signal
import sys
import yaml
from botocore.exceptions import ClientError
from datetime import datetime
from typing import List
from warrant import Cognito
from superai import __version__
from superai.client import Client
from superai.config import get_config_dir, list_env_configs, set_env_config, settings
from superai.exceptions import SuperAIAuthorizationError
from superai.log import logger
from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user
from superai.utils.pip_config import pip_configure
from superai.meta_ai.dockerizer import build_image, push_image
from superai.meta_ai.dockerizer.sagemaker_endpoint import (
upload_model_to_s3,
invoke_sagemaker_endpoint,
create_endpoint,
invoke_local,
)
BASE_FOLDER = get_config_dir()
COGNITO_USERPOOL_ID = settings.get("cognito", {}).get("userpool_id")
COGNITO_CLIENT_ID = settings.get("cognito", {}).get("client_id")
COGNITO_REGION = settings.get("cognito", {}).get("region")
log = logger.get_logger(__name__)
def _signal_handler(s, f):
sys.exit(1)
@click.group()
def cli():
pass
@cli.command()
@click.option("--verbose/--no-verbose", "-vvv", help="Verbose output", default=False)
def info(verbose):
"""Print CLI Configuration"""
click.echo("=================")
click.echo("Super.AI CLI Info:")
click.echo("=================")
load_api_key()
click.echo(f"VERSION: {__version__}")
click.echo(f"ENVIRONMENT: {settings.current_env}")
click.echo(f"USER: {settings.get('user',{}).get('username')}")
if verbose:
click.echo(yaml.dump(settings.as_dict(env=settings.current_env), default_flow_style=False))
@cli.group()
@click.pass_context
def env(ctx):
"""
super.AI Config operations
"""
pass
@env.command(name="list")
@click.pass_context
def env_list(ctx):
"""
:param ctx:
:return:
"""
list_env_configs(printInConsole=True)
@env.command(name="set")
@click.option("--api-key", help="Your super.AI API KEY", required=False)
@click.option("--environment", "-e", help="Set environment", required=False)
@click.pass_context
def env_set(ctx, api_key, environment):
"""
Set configuration
"""
if environment:
set_env_config(name=environment)
if api_key:
save_api_key(api_key)
@cli.group()
@click.pass_context
def client(ctx):
"""
super.AI API operations
"""
api_key = ""
try:
api_key = load_api_key()
except Exception as e:
pass
if len(api_key) == 0:
print("User needs to login or set api key")
exit()
ctx.obj = {}
ctx.obj["client"] = Client(api_key=api_key)
@client.command(name="create_jobs")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("--callback_url", "-c", help="Callback URL for post when jobs finish")
@click.option("--inputs", "-i", help="Json list with inputs")
@click.option("--inputs_file", "-if", help="URL pointing to JSON file")
@click.pass_context
def create_jobs(ctx, app_id: str, callback_url: str, inputs: str, inputs_file: str):
"""
Submit jobs
"""
client = ctx.obj["client"]
print("Submitting jobs")
json_inputs = None
if inputs is not None:
try:
json_inputs = json.loads(inputs)
except:
print("Couldn't read json inputs")
exit()
print(client.create_jobs(app_id, callback_url, json_inputs, inputs_file))
@client.command(name="fetch_job")
@click.option("--job_id", "-j", help="Job id", required=True)
@click.pass_context
def fetch_job(ctx, job_id: str):
"""
Get Job given job id
"""
client = ctx.obj["client"]
print(f"Fetching job {job_id}")
print(client.fetch_job(job_id))
@client.command(name="fetch_batches_job")
@click.option("--app_id", "-a", help="App id", required=True)
@click.pass_context
def fetch_batches_job(ctx, app_id: str):
"""
Get not processed Batches given app id
"""
client = ctx.obj["client"]
print(f"Fetching batches {app_id}")
print(client.fetch_batches_job(app_id))
@client.command(name="fetch_batch_job")
@click.option("--app_id", "-a", help="App id", required=True)
@click.option("--batch_id", "-b", help="Batch id", required=True)
@click.pass_context
def fetch_batch_job(ctx, app_id: str, batch_id: str):
"""
Get Batch given app id and batch id
"""
client = ctx.obj["client"]
print(f"Fetching batch {app_id} {batch_id}")
print(client.fetch_batch_job(app_id, batch_id))
@client.command(name="get_job_response")
@click.option("--job_id", "-j", help="Job id", required=True)
@click.pass_context
def get_job_response(ctx, job_id: str):
"""
Get Job response given job id
"""
client = ctx.obj["client"]
print(f"Getting job response {job_id}")
print(client.get_job_response(job_id))
@client.command(name="cancel_job")
@click.option("--job_id", "-j", help="Job id", required=True)
@click.pass_context
def cancel_job(ctx, job_id: str):
"""
Cancel a job given job id. Only for jobs in SCHEDULED, IN_PROGRESS or SUSPENDED state.
"""
client = ctx.obj["client"]
print(f"Cancelling job {job_id}")
print(client.cancel_job(job_id))
@client.command(name="list_jobs")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("--page", "-p", help="Page number", type=int)
@click.option("--size", "-s", help="Size of page", type=int)
@click.option("--sort_by", "-sort", help="Job field to sort by", type=str, default="id", show_default=True)
@click.option(
"--order_by",
"-order",
help="Sort direction (asc or desc)",
type=click.Choice(["asc", "desc"]),
default="asc",
show_default=True,
)
@click.option(
"--created_start_date",
"-c0",
help="Created start date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--created_end_date",
"-c1",
help="Created end date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--completed_start_date",
"-e0",
help="Completed start date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--completed_end_date",
"-e1",
help="Completed end date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--status_in",
"-s_in",
help="Status of jobs",
multiple=True,
type=click.Choice(["SCHEDULED", "IN_PROGRESS", "FAILED", "SUSPENDED", "CANCELED", "EXPIRED", "COMPLETED"]),
)
@click.pass_context
def list_jobs(
ctx,
app_id: str,
page: int,
size: int,
sort_by: str,
order_by: str,
created_start_date: datetime,
created_end_date: datetime,
completed_start_date: datetime,
completed_end_date: datetime,
status_in: List[str] = None,
):
"""
Get a paginated list of jobs (without response) given an application id
"""
client = ctx.obj["client"]
print(f"Fetching jobs per application {app_id}")
if len(status_in) == 0:
status_in = None
print(
client.list_jobs(
app_id,
page,
size,
sort_by,
order_by,
created_start_date,
created_end_date,
completed_start_date,
completed_end_date,
status_in,
)
)
@client.command(name="download_jobs")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option(
"--created_start_date",
"-c0",
help="Created start date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--created_end_date",
"-c1",
help="Created end date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--completed_start_date",
"-e0",
help="Completed start date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--completed_end_date",
"-e1",
help="Completed end date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--status_in",
"-s_in",
help="Status of jobs",
multiple=True,
type=click.Choice(["SCHEDULED", "IN_PROGRESS", "FAILED", "SUSPENDED", "CANCELED", "EXPIRED", "COMPLETED"]),
)
@click.pass_context
def download_jobs(
ctx,
app_id: str,
created_start_date: datetime,
created_end_date: datetime,
completed_start_date: datetime,
completed_end_date: datetime,
status_in: List[str] = None,
):
"""
Trigger processing of job responses that is sent to customer email once is finished.
"""
client = ctx.obj["client"]
print(f"Triggering job responses processing per application {app_id}")
if len(status_in) == 0:
status_in = None
print(
client.download_jobs(
app_id, created_start_date, created_end_date, completed_start_date, completed_end_date, status_in
)
)
@client.command(name="create_ground_truth")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("--input_json", "-i", help="Input json of ground truth", required=True)
@click.option("--label", "-l", help="Label (or output) json of ground truth", required=True)
@click.option("--tag", "-t", help="Tag ground truth data")
@click.option("--metadata", "-m", help="Metadata json")
@click.pass_context
def create_ground_truth(
ctx, app_id: str, input_json: str = None, label: str = None, tag: str = None, metadata: str = None
):
"""
Submit fresh ground truth data
"""
client = ctx.obj["client"]
print("Submitting fresh ground truth data")
input_dict = None
metadata_dict = None
label_dict = None
if input_json is not None:
try:
input_dict = json.loads(input_json)
except:
print("Couldn't load input json of ground truth")
exit()
if metadata is not None:
try:
metadata_dict = json.loads(metadata)
except:
print("Couldn't load metadata json of ground truth")
exit()
if label is not None:
try:
label_dict = json.loads(label)
except:
print("Couldn't load label json of ground truth")
exit()
print(client.create_ground_truth(app_id, input_dict, label_dict, tag, metadata_dict))
@client.command(name="update_ground_truth")
@click.option("--ground_truth_data_id", "-g", help="Ground truth data id", required=True)
@click.option("--input_json", "-i", help="Input json of ground truth")
@click.option("--label", "-l", help="Label (or output) json of ground truth")
@click.option("--tag", "-t", help="Tag ground truth data")
@click.option("--metadata", "-m", help="Metadata json")
@click.pass_context
def update_ground_truth(
ctx, ground_truth_data_id: str, input_json: str = None, label: str = None, tag: str = None, metadata: str = None
):
"""
Update (patch) ground truth data
"""
client = ctx.obj["client"]
print(f"Updating ground truth data {ground_truth_data_id}")
input_dict = None
metadata_dict = None
label_dict = None
if input_json is not None:
try:
input_dict = json.loads(input_json)
except:
print("Couldn't load input json of ground truth")
exit()
if metadata is not None:
try:
metadata_dict = json.loads(metadata)
except:
print("Couldn't load metadata json of ground truth")
exit()
if label is not None:
try:
label_dict = json.loads(label)
except:
print("Couldn't load label json of ground truth")
exit()
print(client.update_ground_truth(ground_truth_data_id, input_dict, label_dict, tag, metadata_dict))
@client.command(name="list_ground_truth_data")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("--page", "-p", help="Page number", type=int)
@click.option("--size", "-s", help="Size of page", type=int)
@click.pass_context
def list_ground_truth_data(ctx, app_id: str, page: int, size: int):
"""
List all ground truth data for an application
"""
client = ctx.obj["client"]
print(f"Fetching ground truth data per application {app_id}")
print(client.list_ground_truth_data(app_id, page, size))
@client.command(name="get_ground_truth_data")
@click.option("--ground_truth_data_id", "-g", help="Ground truth data id", required=True)
@click.pass_context
def get_ground_truth_data(ctx, ground_truth_data_id: str):
"""
Fetch single ground truth data object
"""
client = ctx.obj["client"]
print(f"Fetching ground truth data {ground_truth_data_id}")
print(client.get_ground_truth_data(ground_truth_data_id))
@client.command(name="delete_ground_truth_data")
@click.option("--ground_truth_data_id", "-g", help="Ground truth data id", required=True)
@click.pass_context
def delete_ground_truth_data(ctx, ground_truth_data_id: str):
"""
Mark ground truth data as deleted
"""
client = ctx.obj["client"]
print(f"Deleting ground truth data {ground_truth_data_id}")
print(client.delete_ground_truth_data(ground_truth_data_id))
@client.command(name="create_ground_truth_from_job")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("-job_id", "-j", help="Job id", required=True)
@click.pass_context
def create_ground_truth_from_job(ctx, app_id: str, job_id: str):
client = ctx.obj["client"]
print(f"Converting job {job_id} to ground truth data")
print(client.create_ground_truth_from_job(app_id, job_id))
@cli.command()
@click.option("--api-key", help="Your super.AI API KEY", required=True)
def config(api_key):
"""
Set api key.
"""
save_api_key(api_key)
@cli.command()
@click.option("--username", "-u", help="super.AI Username", required=True)
@click.option("--password", "-p", prompt=True, hide_input=True)
@click.option("--show-pip/--no-show-pip", "-pip", default=False, help="Shows how to set pip configuration manually")
def login(username, password, show_pip):
"""
Use username and password to get super.AI api key.
"""
user = Cognito(
access_key="AKIAIOSFODNN7EXAMPLE",
secret_key="<KEY>",
user_pool_id=COGNITO_USERPOOL_ID,
client_id=COGNITO_CLIENT_ID,
user_pool_region=COGNITO_REGION,
username=username,
)
try:
user.authenticate(password)
except ClientError as e:
if (
e.response["Error"]["Code"] == "UserNotFoundException"
or e.response["Error"]["Code"] == "NotAuthorizedException"
):
print("Incorrect username or password")
return
else:
print(f"Unexpected error: {e}")
return
client = Client(auth_token=user.access_token, id_token=user.id_token)
api_keys = client.get_apikeys()
if len(api_keys) > 0:
save_api_key(api_keys[0], username=username)
save_cognito_user(user)
print(f"Api key {api_keys[0]} was set")
else:
print(f"User {username} doesn't have any api keys")
try:
aws_credentials = client.get_awskeys()
if aws_credentials:
save_aws_credentials(aws_credentials)
pip_configure(show_pip=show_pip)
except SuperAIAuthorizationError as authorization_error:
logger.debug(f"ERROR Authorization: {str(authorization_error)}")
remove_aws_credentials()
except Exception as exception:
logger.debug(f"ERROR: {str(exception)}")
remove_aws_credentials()
@cli.command()
def logout():
"""
Remove stored api key
"""
save_api_key("")
print("Stored api key was removed")
@cli.group()
def ai():
"""Build and push your model docker images"""
pass
@ai.group()
def docker():
"""Docker specific commands"""
pass
@docker.command(name="build", help="Build a docker image for a sagemaker model.")
@click.option("--image-name", "-i", required=True, help="Name of the image to be built")
@click.option(
"--entry-point",
"-e",
required=True,
help="Path to file which will serve as entrypoint to the sagemaker model. Generally this is a method which calls "
"the predict method",
)
@click.option("--dockerfile", "-d", help="Path to Dockerfile. Default: Dockerfile", default="Dockerfile")
@click.option(
"--command", "-c", help="Command to run after the entrypoint in the image. Default: serve", default="serve"
)
@click.option("--worker-count", "-w", help="Number of workers to run. Default: 1", default=1)
@click.option(
"--entry-point-method",
"-em",
help="Method to be called inside the entry point. Make sure this method accepts the input data and context. "
"Default: handle",
default="handle",
)
@click.option(
"--use-shell", "-u", help="Use shell to run the build process, which is more verbose. Used by default", default=True
)
def build_docker_image(image_name, entry_point, dockerfile, command, worker_count, entry_point_method, use_shell):
build_image(
image_name=image_name,
entry_point=entry_point,
dockerfile=dockerfile,
command=command,
worker_count=worker_count,
entry_point_method=entry_point_method,
use_shell=use_shell,
)
@docker.command(name="push", help="Push the docker image built by `superai model docker-build` to ECR. ")
@click.option(
"--image-name", "-i", required=True, help="Name of the image to be pushed. You can get this from `docker image ls`"
)
@click.option("--region", "-r", help="AWS region. Default: us-east-1", default="us-east-1")
def push_docker_image(image_name, region):
push_image(image_name=image_name, region=region)
@docker.command(
"run-local",
help="Run a docker container built by `superai model docker-build` locally. "
"We assume here that the ports 8080 & 8081 are available",
)
@click.option("--image-name", "-i", required=True, help="Name of the image to be run")
@click.option(
"--model-path",
"-m",
required=True,
help="Path to the folder containing weights file to be used for getting inference",
)
@click.option(
"--gpu",
"-g",
default=False,
help="Run docker with GPUs enabled. Make sure this is a GPU container with cuda enabled, "
"and nvidia-container-runtime installed",
)
def docker_run_local(image_name, model_path, gpu):
options = [f"-v {os.path.abspath(model_path)}:/opt/ml/model/", "-p 80:8080", "-p 8081:8081 "]
if gpu:
options.append("--rm --gpus all")
options = " ".join(options)
command = f"docker run {options} {image_name}"
logger.info(f"Running command: {command}")
os.system(command)
@docker.command(
"invoke-local",
help="Invoke the locally deployed container. The API description of the local container can be found at "
"http://localhost/api-description",
)
@click.option(
"--mime",
"-mm",
default="application/json",
help="MIME type of the payload. `application/json` will be sent to the invocation directly. For other MIME types, "
"you can pass the path to file with --body. If its a valid path, it will be loaded and sent to the request. "
"Default: `application/json`",
)
@click.option(
"--body", "-b", required=True, help="Body of payload to be sent to the invocation. Can be a path to a file as well."
)
def docker_invoke_local(mime, body):
invoke_local(mime, body)
def main():
signal.signal(signal.SIGINT, _signal_handler)
sys.exit(cli())
if __name__ == "__main__":
main()
| 1.6875 | 2 |
unified_social_api/abstract/feed.py | kanishkarj/unified-social-api | 0 | 12788038 | import json
from abc import ABCMeta, abstractmethod
class Feed(metaclass=ABCMeta):
def __init__(self, keyword):
self._stories = []
self._len = len(self._stories)
self.__iter__ = self._stories.__iter__
self._stories = self._getStories(keyword)
try:
self.sources = [self._stories[0].source]
except IndexError:
# TODO: No results
raise
self._len = len(self._stories)
@abstractmethod
def _getStories(self, keyword):
pass
def __repr__(self):
sources = ', '.join(source for source in self.sources)
length = self._len
rep = "Sources: {0}\nLength: {1}".format(sources, length)
return rep
def __len__(self):
return self._len
def __getitem__(self, index):
return self._stories[index]
def __setitem__(self, index, value):
self._stories[index] = value
def append(self, story):
self._stories.append(story)
self._len += 1
if story.source not in self.sources:
self.sources.append(story.source)
def extend(self, feed):
self._stories.extend(feed)
self._len += len(self._stories)
for source in feed.sources:
if source not in self.sources:
self.sources.append(source)
def sortByTime(self, reverse):
self._stories.sort(key=lambda x: x.published, reverse=reverse)
def sortByPopularity(self):
pass
def toJson(self):
res = []
for x in self._stories:
res.append(x.__dict__)
return json.dumps(res)
| 3.03125 | 3 |
interactive_check.py | jknielse/termtable | 0 | 12788039 | <gh_stars>0
import termtable
cols = ['Name', 'Position', 'Thingy']
rows = [
['Joe', 'CEO', 'A thing'],
['Fred', 'CFO', 'Another thing'],
['Bob', 'CTO', 'One more thing'],
['Bloop', '<NAME>', 'Additional thing'],
]
tt = termtable.TerminalTable(cols, rows)
print 'Showing table:\n'
tt.show()
print 'Single selection from table:\n'
index = tt.prompt_selection('Please select a row:')
selection = rows[index]
print 'Selected:'
print selection
print ''
print 'Multi selection from table:\n'
selections = [rows[r] for r in tt.prompt_selection('Please select some rows:', multi_select=True)]
print 'Selected:'
for s in selections:
print s
print ''
print 'Dangerous selection from table:\n'
selections = [rows[r] for r in tt.prompt_selection('Please select a row... but be careful!', multi_select=True, danger=True)]
print 'Selected:'
for s in selections:
print s
print ''
print 'Sub-editing from table:\n'
subcols = ['Position']
subrows = [
['CEO'],
['CFO'],
['CTO'],
['<NAME>'],
['Something much longer! wow such long much amaze']
]
subtable = termtable.TerminalTable(subcols, subrows, header=False)
def controller(key, index):
global subtable
if key == 'q':
return 'ABANDON', []
if key == ' ':
sub_selection = subtable.prompt_selection(startx=tt.column_position(1) - 1, starty=tt.row_position(index) - 1)
if sub_selection is not None:
newrow = [rows[index][0], subrows[sub_selection][0], rows[index][2]]
return 'REPLACE', [newrow]
else:
return 'REDRAW', []
if key == 'BACKSPACE':
return 'REMOVE', [index]
if key == '+':
return 'INSERT', [0, ['<NAME>', 'Magician', 'Of the high realm']]
if key == 'ENTER':
return 'COMMIT', []
tt.interact(controller)
| 3.109375 | 3 |
tools/compare_humann2_output/compare_humann2_output.py | bernt-matthias/ASaiM-galaxytools | 1 | 12788040 | <reponame>bernt-matthias/ASaiM-galaxytools<filename>tools/compare_humann2_output/compare_humann2_output.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
def extract_abundances(fp, nb_charact_to_extract):
abundances = {}
more_abund_charact = []
abund_sum = 0
with open(fp, 'r') as abundance_f:
for line in abundance_f.readlines()[1:]:
split_line = line[:-1].split('\t')
charact_id = split_line[0]
abund = float(split_line[1])
abundances[charact_id] = 100*abund
abund_sum += abundances[charact_id]
if len(more_abund_charact) < nb_charact_to_extract:
more_abund_charact.append(charact_id)
else:
best_pos = None
for i in range(len(more_abund_charact)-1, -1, -1):
if abundances[more_abund_charact[i]] < abund:
best_pos = i
else:
break
if best_pos is not None:
tmp_more_abund_charact = more_abund_charact
more_abund_charact = tmp_more_abund_charact[:best_pos]
more_abund_charact += [charact_id]
more_abund_charact += tmp_more_abund_charact[best_pos:-1]
return abundances, more_abund_charact
def format_characteristic_name(all_name):
if all_name.find(':') != -1:
charact_id = all_name.split(':')[0]
char_name = all_name.split(':')[1][1:]
else:
charact_id = all_name
char_name = ''
char_name = char_name.replace('/', ' ')
char_name = char_name.replace('-', ' ')
char_name = char_name.replace("'", '')
if char_name.find('(') != -1 and char_name.find(')') != -1:
open_bracket = char_name.find('(')
close_bracket = char_name.find(')')+1
char_name = char_name[:open_bracket] + char_name[close_bracket:]
return charact_id, char_name
def write_more_abundant_charat(abundances, more_abund_charact, output_fp):
with open(output_fp, 'w') as output_f:
output_f.write('id\tname\t%s\n' % '\t'.join(abundances.keys()))
for mac in more_abund_charact:
charact_id, charact_name = format_characteristic_name(mac)
output_f.write('%s\t%s' % (charact_id, charact_name))
for sample in abundances:
abund = abundances[sample].get(mac, 0)
output_f.write('\t%s' % (abund))
output_f.write('\n')
def extract_similar_characteristics(abund, sim_output_fp, output_files):
abund_keys = list(abund)
sim_characteristics = set(abund[abund_keys[0]].keys())
for sample in abund_keys[1:]:
sim_characteristics.intersection_update(abund[sample].keys())
print('Similar between all samples: %s' % len(sim_characteristics))
with open(sim_output_fp, 'w') as sim_output_f:
sim_output_f.write('id\tname\t%s\n' % '\t'.join(abund_keys))
for charact in list(sim_characteristics):
charact_id, charact_name = format_characteristic_name(charact)
sim_output_f.write('%s\t%s' % (charact_id, charact_name))
for sample in abund_keys:
sim_output_f.write('\t%s' % abund[sample][charact])
sim_output_f.write('\n')
print('Specific to samples:')
diff_char = {}
for i in range(len(abund_keys)):
sample = abund_keys[i]
print(' %s' % sample )
print(' All: %s' % len(abund[sample].keys()))
diff_char[sample] = set(abund[sample].keys())
diff_char[sample].difference_update(sim_characteristics)
perc = 100*len(diff_char[sample])/(1.*len(abund[sample].keys()))
print(' Number of specific characteristics: %s' % len(diff_char[sample]))
print(' Percentage of specific characteristics: %s' % perc)
relative_abundance = 0
with open(output_files[i], 'w') as output_f:
output_f.write('id\tname\tabundances\n')
for charact in list(diff_char[sample]):
charact_id, charact_name = format_characteristic_name(charact)
output_f.write('%s\t%s' % (charact_id, charact_name))
output_f.write('%s\n' % abund[sample][charact])
relative_abundance += abund[sample][charact]
print(' Relative abundance of specific characteristics: %s' % relative_abundance)
return sim_characteristics
def compare_humann2_output(args):
abund = {}
more_abund_charact = []
for i in range(len(args.sample_name)):
abund[args.sample_name[i]], mac = extract_abundances(
args.charact_input_fp[i],
args.most_abundant_characteristics_to_extract)
more_abund_charact += mac
write_more_abundant_charat(
abund,
list(set(more_abund_charact)),
args.more_abundant_output_fp)
extract_similar_characteristics(
abund,
args.similar_output_fp,
args.specific_output_fp)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--sample_name', required=True, action='append')
parser.add_argument('--charact_input_fp', required=True, action='append')
parser.add_argument(
'--most_abundant_characteristics_to_extract',
required=True,
type=int)
parser.add_argument('--more_abundant_output_fp', required=True)
parser.add_argument('--similar_output_fp', required=True)
parser.add_argument(
'--specific_output_fp',
required=True,
action='append')
args = parser.parse_args()
if len(args.sample_name) != len(args.charact_input_fp):
string = "Same number of values (in same order) are expected for "
string += "--sample_name and --charact_input_fp"
raise ValueError(string)
if len(args.sample_name) != len(args.specific_output_fp):
string = "Same number of values (in same order) are expected for "
string += "--sample_name and --specific_output_fp"
raise ValueError(string)
compare_humann2_output(args)
| 2.640625 | 3 |
apps/accounts/adapter.py | cloudartisan/dojomaster | 1 | 12788041 | from allauth.account.adapter import DefaultAccountAdapter
from django.conf import settings
from django.utils import timezone
from django.shortcuts import resolve_url
class AccountAdapter(DefaultAccountAdapter):
def get_login_redirect_url(self, request):
"""
If the user has never logged in before we need them to
create their club. However, last_login will be set before
this is called, so we check if now() - last_login is suitably
short to indicate a first-time login.
"""
threshold = 90
assert request.user.is_authenticated()
if (timezone.now() - request.user.last_login).seconds < threshold:
url = '/clubs/add/'
else:
url = settings.LOGIN_REDIRECT_URL
return resolve_url(url)
| 2.484375 | 2 |
teraserver/python/tests/modules/FlaskModule/API/user/test_UserRefreshToken.py | introlab/opentera | 10 | 12788042 | from tests.modules.FlaskModule.API.BaseAPITest import BaseAPITest
import datetime
class UserRefreshTokenTest(BaseAPITest):
login_endpoint = '/api/user/login'
test_endpoint = '/api/user/refresh_token'
def setUp(self):
pass
def tearDown(self):
pass
def test_no_token_http_auth_refresh(self):
response = self._request_with_http_auth(username='admin', password='<PASSWORD>')
self.assertEqual(401, response.status_code)
def test_valid_token_refresh(self):
response = self._login_with_http_auth(username='admin', password='<PASSWORD>')
self.assertEqual(200, response.status_code)
login_info = response.json()
self.assertTrue(login_info.__contains__('user_token'))
token = login_info['user_token']
response = self._request_with_token_auth(token=token)
self.assertEqual(200, response.status_code)
token_info = response.json()
self.assertTrue(token_info.__contains__('refresh_token'))
refresh_token = token_info['refresh_token']
self.assertGreater(len(refresh_token), 0)
def test_invalid_token_refresh_with_disabled_token(self):
response = self._login_with_http_auth(username='admin', password='<PASSWORD>')
self.assertEqual(200, response.status_code)
login_info = response.json()
self.assertTrue(login_info.__contains__('user_token'))
login_token = login_info['user_token']
response = self._request_with_token_auth(token=login_token)
self.assertEqual(200, response.status_code)
token_info = response.json()
self.assertTrue(token_info.__contains__('refresh_token'))
refresh_token = token_info['refresh_token']
self.assertGreater(len(refresh_token), 0)
# This should not work, token should be disabled
response = self._request_with_token_auth(token=login_token)
self.assertEqual(401, response.status_code)
def test_invalid_token_refresh_with_no_token(self):
response = self._login_with_http_auth(username='admin', password='<PASSWORD>')
self.assertEqual(200, response.status_code)
login_info = response.json()
self.assertTrue(login_info.__contains__('user_token'))
login_token = login_info['user_token']
response = self._request_with_token_auth(token='')
self.assertEqual(401, response.status_code)
| 2.5625 | 3 |
tests/test_snoo_pubnub.py | joncar/pysnoo | 6 | 12788043 | <reponame>joncar/pysnoo<filename>tests/test_snoo_pubnub.py
"""TestClass for the Snoo Pubnub"""
import json
from pubnub.enums import PNOperationType, PNStatusCategory
from pubnub.models.consumer.common import PNStatus
from pubnub.models.consumer.pubsub import PNMessageResult
from asynctest import TestCase, patch, MagicMock
from pysnoo import SnooPubNub, SessionLevel, ActivityState
from pysnoo.const import SNOO_PUBNUB_PUBLISH_KEY, SNOO_PUBNUB_SUBSCRIBE_KEY
from tests.helpers import load_fixture
class TestSnooPubnub(TestCase):
"""Snoo Client PubNub class"""
def setUp(self):
self.pubnub = SnooPubNub('ACCESS_TOKEN',
'SERIAL_NUMBER',
'UUID',
custom_event_loop=self.loop)
async def tearDown(self):
# pylint: disable=invalid-overridden-method
if self.pubnub:
await self.pubnub.stop()
@patch('pubnub.pubnub_asyncio.PubNubAsyncio.request_future')
async def test_publish_start(self, mocked_request):
"""Test publish_start"""
# pylint: disable=protected-access
# Setup
await self.pubnub.publish_start()
mocked_request.assert_called_once()
self.assertIsNone(mocked_request.mock_calls[0][2]['cancellation_event'])
options = mocked_request.mock_calls[0][2]['options_func']()
self.assertEqual(options.path, f'/publish/{SNOO_PUBNUB_PUBLISH_KEY}/{SNOO_PUBNUB_SUBSCRIBE_KEY}/0/'
f'ControlCommand.SERIAL_NUMBER/0/%7B%22command%22%3A%20%22start_snoo%22%7D')
self.assertEqual(options.operation_type, PNOperationType.PNPublishOperation)
self.assertIsNone(options.data)
self.assertEqual(options.method_string, 'GET')
# This needs to be called to retrieve the params from the configuration
options.merge_params_in({})
self.assertEqual(options.query_string,
f'auth=ACCESS_TOKEN&pnsdk=PubNub-Python-Asyncio%2F{self.pubnub._pubnub.SDK_VERSION}&uuid=UUID')
@patch('pubnub.pubnub_asyncio.PubNubAsyncio.request_future')
async def test_publish_goto_state(self, mocked_request):
"""Test publish_goto_state"""
# pylint: disable=protected-access
# Setup
await self.pubnub.publish_goto_state(SessionLevel.LEVEL1)
mocked_request.assert_called_once()
self.assertIsNone(mocked_request.mock_calls[0][2]['cancellation_event'])
options = mocked_request.mock_calls[0][2]['options_func']()
self.assertEqual(options.path, f'/publish/{SNOO_PUBNUB_PUBLISH_KEY}/{SNOO_PUBNUB_SUBSCRIBE_KEY}/0/'
f'ControlCommand.SERIAL_NUMBER/0/'
f'%7B%22command%22%3A%20%22go_to_state%22%2C%20%22state%22%3A%20%22LEVEL1%22%7D')
self.assertEqual(options.operation_type, PNOperationType.PNPublishOperation)
self.assertIsNone(options.data)
self.assertEqual(options.method_string, 'GET')
# This needs to be called to retrieve the params from the configuration
options.merge_params_in({})
self.assertEqual(options.query_string,
f'auth=ACCESS_TOKEN&pnsdk=PubNub-Python-Asyncio%2F{self.pubnub._pubnub.SDK_VERSION}&uuid=UUID')
@patch('pubnub.pubnub_asyncio.PubNubAsyncio.request_future')
async def test_publish_goto_state_with_hold(self, mocked_request):
"""Test publish_goto_state with hold parameter"""
# pylint: disable=protected-access
# Setup
await self.pubnub.publish_goto_state(SessionLevel.LEVEL2, hold=False)
mocked_request.assert_called_once()
self.assertIsNone(mocked_request.mock_calls[0][2]['cancellation_event'])
options = mocked_request.mock_calls[0][2]['options_func']()
self.assertEqual(options.path, f'/publish/{SNOO_PUBNUB_PUBLISH_KEY}/{SNOO_PUBNUB_SUBSCRIBE_KEY}/0/'
f'ControlCommand.SERIAL_NUMBER/0/'
f'%7B%22command%22%3A%20%22go_to_state%22%2C%20%22state%22%3A%20%22LEVEL2'
f'%22%2C%20%22hold%22%3A%20%22off%22%7D')
self.assertEqual(options.operation_type, PNOperationType.PNPublishOperation)
self.assertIsNone(options.data)
self.assertEqual(options.method_string, 'GET')
# This needs to be called to retrieve the params from the configuration
options.merge_params_in({})
self.assertEqual(options.query_string,
f'auth=ACCESS_TOKEN&pnsdk=PubNub-Python-Asyncio%2F{self.pubnub._pubnub.SDK_VERSION}&uuid=UUID')
@patch('pubnub.managers.SubscriptionManager.adapt_subscribe_builder')
async def test_subscribe_and_await_connect(self, mocked_subscribe_builder):
"""Test subscribe"""
# pylint: disable=protected-access
# Call Connect Status.
pn_status = PNStatus()
pn_status.category = PNStatusCategory.PNConnectedCategory
# Call after 1s: listener.status(self.pubnub._pubnub, pn_status)
self.loop.call_later(1, self.pubnub._listener.status,
self.pubnub._pubnub, pn_status)
await self.pubnub.subscribe_and_await_connect()
mocked_subscribe_builder.assert_called_once()
subscribe_operation = mocked_subscribe_builder.mock_calls[0][1][0]
self.assertEqual(subscribe_operation.channels, ['ActivityState.SERIAL_NUMBER'])
self.assertEqual(subscribe_operation.channel_groups, [])
self.assertEqual(subscribe_operation.presence_enabled, False)
self.assertEqual(subscribe_operation.timetoken, 0)
@patch('pubnub.managers.SubscriptionManager.adapt_subscribe_builder')
def test_prevent_multiple_subscription(self, mocked_subscribe_builder):
"""Test prevent multiple subscriptions"""
# pylint: disable=protected-access
# Set Listener as connected
self.pubnub._listener.connected_event.set()
self.pubnub.subscribe()
mocked_subscribe_builder.assert_not_called()
@patch('pubnub.managers.SubscriptionManager.adapt_unsubscribe_builder')
async def test_unsubscribe_and_await_disconnect(self, mocked_unsubscribe_builder):
"""Test unsubscribe"""
# pylint: disable=protected-access
# Call Connect Status.
pn_status = PNStatus()
pn_status.category = PNStatusCategory.PNAcknowledgmentCategory
pn_status.operation = PNOperationType.PNUnsubscribeOperation
# Call after 1s: listener.status(self.pubnub._pubnub, pn_status)
self.loop.call_later(1, self.pubnub._listener.status, self.pubnub._pubnub, pn_status)
# Listener is connected:
self.pubnub._listener.connected_event.set()
await self.pubnub.unsubscribe_and_await_disconnect()
mocked_unsubscribe_builder.assert_called_once()
unsubscribe_operation = mocked_unsubscribe_builder.mock_calls[0][1][0]
self.assertEqual(unsubscribe_operation.channels, ['ActivityState.SERIAL_NUMBER'])
self.assertEqual(unsubscribe_operation.channel_groups, [])
@patch('pubnub.managers.SubscriptionManager.adapt_unsubscribe_builder')
def test_prevent_multiple_unsubscription(self, mocked_unsubscribe_builder):
"""Test prevent multiple unsubscriptions"""
# Listener is disconnected (initial state)
self.pubnub.unsubscribe()
mocked_unsubscribe_builder.assert_not_called()
@patch('pubnub.pubnub_asyncio.PubNubAsyncio.request_future')
async def test_history(self, mocked_request):
"""Test history"""
# pylint: disable=protected-access
count = 55
await self.pubnub.history(count)
mocked_request.assert_called_once()
self.assertIsNone(mocked_request.mock_calls[0][2]['cancellation_event'])
options = mocked_request.mock_calls[0][2]['options_func']()
self.assertEqual(options.path, f'/v2/history/sub-key/{SNOO_PUBNUB_SUBSCRIBE_KEY}/channel/'
f'ActivityState.SERIAL_NUMBER')
self.assertEqual(options.operation_type, PNOperationType.PNHistoryOperation)
self.assertIsNone(options.data)
self.assertEqual(options.method_string, 'GET')
# This needs to be called to retrieve the params from the configuration
options.merge_params_in({})
self.assertEqual(
options.query_string, f'count={count}&pnsdk=PubNub-Python-Asyncio%2F{self.pubnub._pubnub.SDK_VERSION}&'
f'uuid=UUID&auth=ACCESS_TOKEN')
async def test_message_callback(self):
"""Test listener Callback on Message"""
# pylint: disable=protected-access
activity_state_msg_payload = json.loads(
load_fixture('', 'pubnub_message_ActivityState.json'))
activity_state = ActivityState.from_dict(activity_state_msg_payload)
# Add callback
callback = MagicMock()
remove_cb = self.pubnub.add_listener(callback)
self.assertEqual(self.pubnub._external_listeners, [callback])
# Trigger callback
self.pubnub._listener.message(self.pubnub._pubnub, PNMessageResult(
activity_state_msg_payload, None, None, 0))
callback.assert_called_once_with(activity_state)
# Remove callback
remove_cb()
self.assertEqual(self.pubnub._external_listeners, [])
| 2.109375 | 2 |
geolocations_app/models.py | sivasankar-dev/latlong-django | 0 | 12788044 | from django.db import models
# Create your models here.
class AddressFile(models.Model):
excel_file = models.FileField(upload_to='excel/')
| 1.929688 | 2 |
easier68k/core/enum/__init__.py | bpas247/Easier68k | 16 | 12788045 | <gh_stars>10-100
__all__ = ['condition',
'condition_status_code',
'ea_mode',
'ea_mode_bin',
'op_size',
'register',
'srecordtype',
'system_status_code',
'trap_task',
'trap_vector']
| 1.0625 | 1 |
youtube-commenter.py | voidabhi/python-scripts | 2 | 12788046 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python bot for comment a list of urls in YouTube
import time
import numpy as np
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
def youtube_login(email,password):
# Browser
driver = webdriver.Firefox()
driver.get('https://accounts.google.com/ServiceLogin?hl=en&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fhl%3Den%26feature%3Dsign_in_button%26app%3Ddesktop%26action_handle_signin%3Dtrue%26next%3D%252F&uilel=3&passive=true&service=youtube#identifier')
# log in
driver.find_element_by_id('Email').send_keys(email)
driver.find_element_by_id('next').click()
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "Passwd")))
driver.find_element_by_id('Passwd').send_keys(password)
driver.find_element_by_id('signIn').click()
return driver
def comment_page(driver,urls,comment):
# Check if there still urls
if len( urls ) == 0:
print 'Youtube Comment Bot: Finished!'
return []
# Pop a URL from the array
url = urls.pop()
# Visite the page
driver.get(url)
driver.implicitly_wait(1)
# Is video avaliable (deleted,private) ?
if not check_exists_by_xpath(driver,'//*[@id="movie_player"]'):
return comment_page(driver, urls, random_comment())
# Scroll, wait for load comment box
driver.execute_script("window.scrollTo(0, 500);")
# Comments are disabled?
if check_exists_by_xpath(driver,'//*[@id="comments-disabled-message"]/div/span'):
return comment_page(driver, urls, random_comment())
# Lets wait for comment box
WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.ID, "comment-section-renderer")))
# Activate box for comments
driver.find_element_by_xpath("//div[@id='comment-section-renderer']/div/div[2]/div").click()
# Send comment and post
driver.implicitly_wait(5)
driver.find_element_by_xpath('//*[@id="comment-simplebox"]/div[1]').send_keys(comment)
driver.find_element_by_xpath('//*[@id="comment-simplebox"]/div[1]').send_keys(Keys.ENTER + Keys.ENTER)
# Is post ready to be clicked?
post = WebDriverWait(driver, 15).until(
EC.element_to_be_clickable((By.XPATH,'//*[@id="comment-simplebox"]/div[3]/button[2]'))
)
post.click()
# Lets wait a bit
r = np.random.randint(2,5)
time.sleep(r)
# Recursive
return comment_page(driver, urls, random_comment())
def random_comment():
messages = [
'Whats up?',
'Nice video!',
'Yoyoyo'
]
r = np.random.randint(0, len(messages))
return messages[r]
def check_exists_by_xpath(driver,xpath):
try:
driver.find_element_by_xpath(xpath)
except NoSuchElementException:
return False
return True
if __name__ == '__main__':
# Credentials
email = 'XXXXXXX'
password = '<PASSWORD>'
# List of Urls
urls = [
'https://www.youtube.com/watch?v=N-tUZXrZcyo',
'https://www.youtube.com/watch?v=07iiV3CMo5I'
]
# You can add in a file and import from there
'''
inp = open ("urls.txt","r")
for line in inp.readlines():
urls.append(line.split())
'''
# Login in youtube
driver = youtube_login(email, password)
# Random comment
comment_page(driver,urls,random_comment())
| 3.09375 | 3 |
iaso/models/comment.py | BLSQ/iaso-copy | 29 | 12788047 | <reponame>BLSQ/iaso-copy
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.db import models
from django_comments.abstracts import CommentAbstractModel
from django.utils.translation import ugettext_lazy as _
class CommentIaso(CommentAbstractModel):
"""Allow user to leave comment on orgunit
For now
* we only allow comment on orgunit
* any user that has access to orgunit can comment on them
* any user that has access to orgunit can view the comments on them
* comment can be nested but only one level depth
These restriction are enforced at the API level
"""
parent = models.ForeignKey("self", on_delete=models.CASCADE, related_name="children", null=True, db_index=True)
content_type = models.ForeignKey(
ContentType,
verbose_name=_("content type"),
related_name="content_type_set_for_%(class)s2",
on_delete=models.CASCADE,
limit_choices_to={"model": "orgunit"},
)
| 2.15625 | 2 |
modules/check/tests/test_check_c.py | intel/diagnostics-utility | 5 | 12788048 | <gh_stars>1-10
#!/usr/bin/env python3
# /*******************************************************************************
# Copyright Intel Corporation.
# This software and the related documents are Intel copyrighted materials, and your use of them
# is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose
# or transmit this software or the related documents without Intel's prior written permission.
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.
#
# *******************************************************************************/
# NOTE: workaround to import modules
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../'))
import importlib # noqa: E402
import unittest # noqa: E402
from unittest.mock import Mock, MagicMock, patch # noqa: E402
from modules.check.check import CheckMetadataPy, CheckSummary # noqa: E402
from modules.check import check_c # noqa: E402
test_filename = "test.so"
c_metadata = Mock()
c_metadata.name = "c_example".encode()
c_metadata.type = "Data".encode()
c_metadata.tags = "cpu".encode()
c_metadata.descr = "This is example of c module".encode()
c_metadata.dataReq = "{}".encode()
c_metadata.rights = "user".encode()
c_metadata.timeout = 1
c_metadata.version = "0.5".encode()
c_api_version = "0.1".encode()
c_run_output = Mock()
c_run_output.error_code = 0
result_str = """{"Value": {"C example check": {"Value": "C example value", "RetVal": "PASS"}}}"""
c_run_output.result = result_str.encode()
c_run = Mock()
c_run.return_value = c_run_output
c_check = Mock()
c_check.check_metadata = c_metadata
c_check.run = c_run
c_check_list = MagicMock()
c_check_list.__iter__.return_value = [c_check]
c_check_list.api_version = c_api_version
class TestClassCheckC(unittest.TestCase):
def setUp(self):
# NOTE: workaround to patching timeout exit
self.timeout_exit_patch = patch("modules.check.check.timeout_exit", lambda func: func)
self.timeout_exit_patch.start()
importlib.reload(check_c)
self.check = check_c.CheckC(c_check, c_check_list)
def tearDown(self):
# NOTE: workaround to patching timeout exit
self.timeout_exit_patch.stop()
importlib.reload(check_c)
def test_class_init_correct(self):
expected = CheckMetadataPy(
name='c_example',
type='Data',
tags='cpu',
descr='This is example of c module',
dataReq='{}',
rights='user',
timeout=1,
version='0.5',
run='run'
)
value = self.check.get_metadata()
self.assertEqual(expected.__dict__, value.__dict__)
def test_get_api_version_positive_correct(self):
expected = c_api_version
value = self.check.get_api_version()
self.assertEqual(expected, value)
def test_get_summury_positive_correct(self):
expected = CheckSummary(
result="""{"Value": {"C example check": {"Value": "C example value", "RetVal": "PASS"}}}"""
)
value = self.check.run({})
self.assertEqual(expected.__dict__, value.__dict__)
class TestGetCheckC(unittest.TestCase):
@patch("modules.check.check_c.CheckList", return_value=c_check_list)
def test_get_checks_c_correct_with_correct_argument(self, mock_check_list):
expected = CheckMetadataPy(
name='c_example',
type='Data',
tags='cpu',
descr='This is example of c module',
dataReq='{}',
rights='user',
timeout=1,
version='0.5',
run='run'
)
mock_file = MagicMock()
mock_file.__str__.return_value = test_filename
mock_file.exists.return_value = True
value = check_c.getChecksC(mock_file)[0].get_metadata()
self.assertEqual(expected.__dict__, value.__dict__)
@patch("logging.error")
def test_get_checks_c_raise_error_if_file_not_exist(self, mock_log):
mock_file = MagicMock()
mock_file.__str__.return_value = test_filename
mock_file.exists.return_value = False
self.assertRaises(OSError, check_c.getChecksC, mock_file)
mock_log.assert_called()
if __name__ == '__main__':
unittest.main()
| 1.804688 | 2 |
misc/task_time_calc.py | butla/experiments | 1 | 12788049 | <filename>misc/task_time_calc.py<gh_stars>1-10
"""
Old script that I was using to calculate how much time I spent on a task based on my notes.
"""
import functools
import re
from datetime import datetime
MATCH_ABSOLUTE_TIME = r'(?P<time>\d\d?:\d\d)'
MATCH_TIME_RANGE = r'(?P<start_time>\d\d?:\d\d)\s?-\s?(?P<end_time>\d\d?:\d\d)'
TIME_MATCH = MATCH_TIME_RANGE + '|' + MATCH_ABSOLUTE_TIME
TIME_ZERO = datetime.strptime('0', '%H')
ONE_DAY_DELTA = datetime.strptime('2', '%d') - TIME_ZERO
def calculate_entire_time(time_ranges: str) -> str:
def date_from_hour_string(hour_string):
return datetime.strptime(hour_string, '%H:%M')
time_deltas = []
for match in re.finditer(TIME_MATCH, time_ranges):
if match.group('time'):
time_delta = date_from_hour_string(match.group('time')) - TIME_ZERO
time_deltas.append(time_delta)
else:
start_time = date_from_hour_string(match.group('start_time'))
end_time = date_from_hour_string(match.group('end_time'))
if end_time > start_time:
time_deltas.append(end_time - start_time)
else:
time_deltas.append((end_time - start_time) + ONE_DAY_DELTA)
overall_timedelta = functools.reduce(lambda x, y: x+y, time_deltas)
return str(overall_timedelta)
if __name__ == '__main__':
import sys
print(calculate_entire_time(' '.join(sys.argv[1:])))
| 3.078125 | 3 |
backend/events/model/matching.py | IanSteenstra/TherapyNow | 3 | 12788050 | import numpy as np
import matplotlib.pyplot as plt
import sys
import math
import random
import operator
def euclidean(x, x_p):
return ((x[0] - x_p[0]) ** 2 + (x[1] - x_p[1]) ** 2) ** 0.5
def greatest_euclidean(data, centers):
maxi = {}
for x in centers:
for x_p in data:
euc = euclidean(x, x_p)
if x_p not in maxi:
maxi[x_p] = 0
maxi[x_p] += euc
return max(maxi.items(), key=operator.itemgetter(1))[0]
# Uses a greedy approach, selects a data point at random and assigns this as a center for a classification
# it then finds the furthest data point from this and assigns this as a center and places it in the set
# the next center will be the furthest datapoint from all centers until all regions have a center
def gen_centers(M, data):
centers = []
N = len(data)
rand = random.randint(0, N - 1)
centers.append(data.pop(rand))
center = (0, 0)
classifiers = []
for i in range(M - 1):
center = greatest_euclidean(data, centers)
data.remove(center)
centers.append(center)
for x in data:
num = voronoi(x, centers)
classifiers.append(num)
return centers, classifiers
# Determine the Voronoi region for the data point. This basically just decides how to classify all the data points
# assigning it to the closest center by euclidean distance
def voronoi(x, centers):
order = []
for i in range(len(centers)):
datapoint = centers[i]
# Euclidean to x
order.append((euclidean(x, datapoint), i))
order.sort()
g = order[0][1]
return g
# Generates 10,000 random datapoints with x and y values between 0 and 1
def generate_data():
data = []
for x1_ in range(100):
for x2_ in range(100):
x1 = np.random.uniform(0, 1)
x2 = np.random.uniform(0, 1)
data.append((x1, x2))
return data
def plot(M):
data = generate_data()
centers, classifers = gen_centers(M, data)
unique=set(classifers)
print(unique)
plt.scatter(*zip(*data), c=classifers, cmap='rainbow')
plt.scatter(*zip(*centers), c='black')
plt.title('Greedy with {} Regions'.format(M))
plt.xlabel('x1', color='#1C2833')
plt.ylabel('x2', color='#1C2833')
plt.grid()
plt.show()
if __name__ == "__main__":
# 10 Clusters for users
regions = 10
plot(regions)
# Assumption: Users will be datapoints, users will create a voronoi region and counselors
# will be assigned to their closest associated region.
# Just using greedy. May add in branch and bound. | 3.78125 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.