max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
iq_project/iq_app/urls.py | donkripton/IQ-test | 0 | 12785851 | <gh_stars>0
from django.conf.urls import patterns, url
from iq_app import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^signup/$', views.signup, name='signup'),
#user auth urls
url(r'^login/$', views.user_login, name='login'),
url(r'^home/$', views.home, name='home'),
url(r'^iq_test/$', views.iq_test, name='iq_test'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^result_check/$', views.result_check, name='result_check'),
) | 1.601563 | 2 |
tests/test_dbinspector.py | cgons/dbinspector | 0 | 12785852 | <gh_stars>0
from dbinspector import DBInspector
class TestDBInspector:
def test_get_count(self, connection):
"""Ensure DBInspector.get_count() returns accurate count of queries executed"""
with DBInspector(connection) as inspector:
connection.execute("SELECT 1")
connection.execute("SELECT 1")
assert inspector.get_count() == 2
def test_print_queries(self, capsys, connection):
"""Ensure DBInspector.print_queries() prints all queires executed"""
with DBInspector(connection) as inspector:
connection.execute("SELECT 1")
connection.execute("SELECT 1")
assert inspector.get_count() == 2
inspector.print_queries()
printed_output = capsys.readouterr().out
assert printed_output == "SELECT 1\nSELECT 1\n"
def test_print_queries_with_print_pretty_true(self, capsys, connection):
"""Ensure DBInspector.print_queries() pretty prints all queires executed"""
with DBInspector(connection) as inspector:
connection.execute("SELECT 1")
connection.execute("SELECT 1")
assert inspector.get_count() == 2
inspector.print_queries(pretty=True)
printed_output = capsys.readouterr().out
assert (
printed_output
== "\nQUERY #1\n----------\nSELECT 1\n\nQUERY #2\n----------\nSELECT 1\n"
)
| 2.578125 | 3 |
bot/com/oth/mix.py | VoxelPrismatic/prizai | 2 | 12785853 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
#/// DEPENDENCIES
import os, signal
import typing, asyncio
import discord #python3.7 -m pip install -U discord.py
import logging, subprocess
from util import embedify, pages
from discord.ext import commands
from discord.ext.commands import Bot, MissingPermissions, has_permissions
from chk.enbl import enbl
from PIL import Image
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
##///---------------------///##
##/// BOT COMMANDS ///##
##///---------------------///##
@commands.command(
aliases = [],
help = 'ai',
brief = 'A noise generator with 2 inputs',
usage = ';]mix {action} {?args}',
description = '''\
ACTION [TEXT] - [slot1, slot2, imgs, view, slots, kill]
> slot1 - Load an attached image into the "image" file
> slot2 - Load an attached image into the "pattern" file
> imgs - View currently loaded images
> view - View the status of the current mix
> slots - Start the mixing
> kill - Kills the current process if you started it
ARGS [TEXT] - [-iter=, -np]
> -iter= - Set the number of iterations [max 120, min 5]
> -np - Do not ping when finished
'''
)
@commands.check(enbl)
async def mix(ctx, *, action):
if action.split()[0] in ['slot1','s1','1','img1']:
if open('mix/status').read().strip() != 'DONE':
return await ctx.send(
'```diff\n-] I AM GENERATING SOMETHING, CHECK BACK LATER\n'
'=] Or use \';]mix view\' to see the latest iter```'
)
try:
if ctx.message.attachments[0].size > 1024*1024*4:
return await ctx.send('```diff\n-] TOO BIG\n=] Max file size is 4MB```')
await ctx.message.attachments[0].save("mix/s1.jpg")
await ctx.message.attachments[0].save("mix/now.png")
await ctx.message.attachments[0].save("mix/s1.png")
await ctx.message.add_reaction('<:wrk:608810652756344851>')
except IndexError:
return await ctx.send('```diff\n-] PLEASE SEND AN IMAGE```')
elif action.split()[0] in ['slot2','s2','2','img2']:
if open('mix/status').read().strip() != 'DONE':
return await ctx.send(
'```diff\n-] I AM GENERATING SOMETHING, CHECK BACK LATER\n'
'=] Or use \';]mix view\' to see the latest iter```'
)
try:
if ctx.message.attachments[0].size > 1024*1024*4:
return await ctx.send('```diff\n-] TOO BIG\n=] Max file size is 4MB```')
await ctx.message.attachments[0].save("mix/s2.jpg")
await ctx.message.attachments[0].save("mix/s2.png")
await ctx.message.add_reaction('<:wrk:608810652756344851>')
except IndexError:
return await ctx.send('```diff\n-] PLEASE SEND AN IMAGE```')
elif action.split()[0] in ['run','start','slots']:
if open('mix/status').read().strip() != 'DONE':
return await ctx.send(
'```diff\n-] I AM GENERATING SOMETHING, CHECK BACK LATER\n'
'=] Or use \';]mix view\' to see the latest iter```'
)
mc = ctx.message.content
settings = {
'iter': 60,
'ping': True
}
mc += ' '
if '-i=' in mc:
settings['iter'] = int(
mc[mc.find('-i=') + 3: mc.find(' ', mc.find('-i='))]
)
elif '-iter=' in mc:
settings['iter'] = int(
mc[mc.find('-iter=')+6: mc.find(' ', mc.find('-iter='))]
)
elif 'max' in mc or '-m' in mc: settings['iter'] = 60
if '-no' in mc or '-np' in mc: settings['ping'] = False
it = settings['iter']
it = max([5,min([it,120])])
settings['iter'] = it
open('mix/status','w+').write('WAIT')
open('mix/loc','w+').write('INIT')
open('mix/iter','w+').write(str(settings['iter']))
proc = subprocess.Popen(["python3.7", "mixGEN.py"])
ratio_h = 1
ratio_w = 1
image_w, image_h = Image.open('mix/s1.jpg').size
if image_h > 600: ratio_h = image_h/600
image_h /= ratio_h
image_w /= ratio_h
if image_w > 800: ratio_w = image_w/800
image_h /= ratio_w
image_w /= ratio_w
image_h = int(image_h)
image_w = int(image_w)
time = int(open("mix/iter").read())+5
res = (image_h*image_w) / (800*600)
time = int(time*res*3/4)
await ctx.send(f'''```md
#] I AM GENERATING THE IMAGE, I WILL SEND IT WHEN ITS DONE ;]
> Or use \';]mix kill\' to stop it now
=] THIS WILL TAKE UP TO {time} MIN, PLEASE STAND BY```''')
while open('mix/status').read() != 'DONE':
loc = open("mix/loc").read()
def chek(m): return m.author == ctx.author and m.channel == ctx.channel
while loc == open('mix/loc').read() and open('mix/status').read() != 'DONE':
try: m = await ctx.bot.wait_for('message',check=chek,timeout=10.0)
except asyncio.TimeoutError as ex: pass
else:
if m.content.lower() in [
';]mix kill', ';]mix stop', ';]mix clear',
';]mix end', ';]mix break', ';]mix complete',
';]mix empty',';]smix send'
]:
open('mix/status','w').write('DONE')
os.kill(proc.pid,signal.SIGKILL)
return await ctx.send(
(f'<@{<EMAIL>}> ' if settings['ping'] else '') + f'```md\n#] GENERATED!```',
file=discord.File('mix/now.png')
)
elif action.split()[0] in ['see','view','where']:
loc = open("mix/loc").read()
ttl = open("mix/iter").read()
stt = open('mix/status').read()
try:
content = f'`[{int(loc)/int(ttl)*100:.2f}% - {loc}/{ttl}] {stt}`'
except:
content=f'`[{loc}] {stt}`'
await ctx.send(content,file=discord.File(fp=open('mix/now.png','rb')))
elif action.split()[0] == 'reset' and ctx.author.id == 481591703959240706:
open('mix/status','w').write('DONE')
await ctx.message.add_reaction('<:wrk:608810652756344851>')
elif action.split()[0] in ['kill','stop','clear','end','break','complete','empty','send']:
pass
elif action.split()[0] in ['slots','imgs','images']:
await ctx.send(
files = [
discord.File(fp=open('mix/s1.png','rb')),
discord.File(fp=open('mix/s2.png','rb'))
]
)
else:
return await ctx.send(f'```diff\n-] KEY {action} WAS NOT FOUND [s1, s2, kill, view]```')
##///---------------------///##
##/// OTHER STUFF ///##
##///---------------------///##
def setup(bot):
print('+COM')
bot.add_command(mix)
print('GOOD')
def teardown(bot):
print('-COM')
bot.remove_command('mix')
print('GOOD')
| 2.234375 | 2 |
03-algorithms/03-k-nearest-neighbors/codebase/kd_tree/tests/test_kd_tree_kclosest.py | jameszhan/notes-ml | 0 | 12785854 | <filename>03-algorithms/03-k-nearest-neighbors/codebase/kd_tree/tests/test_kd_tree_kclosest.py<gh_stars>0
# -*- coding: utf-8 -*-
import os
import sys
import logging
import unittest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
parent_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(parent_path)
from kd_tree import KDTree
logger = logging.getLogger("unittestLogger")
colors = ['r', 'b', 'g', 'y', 'm', 'c', 'k']
figure = plt.figure(figsize=(12, 8))
ax = figure.add_subplot(111, aspect=True)
ax.grid(True)
figure.subplots_adjust(left=0.05, bottom=0.05, right=0.99, top=0.99, wspace=None, hspace=None)
def draw_point(n):
if n.is_leaf():
marker = 'o'
else:
marker = 's'
color = colors[n.axis]
ax.scatter(*n.point, c=color, marker=marker, s=30, alpha=0.8)
ax.text(n.point[0] - 0.36, n.point[1] - 0.25, "({0}, {1})".format(*n.point), color='g', alpha=0.8)
_x = np.linspace(0, 10, 10)
_y = np.linspace(0, 10, 10)
pn = n.parent
if n.axis == 0:
if pn:
if n.point[1] < pn.point[1]:
_y = np.linspace(0, pn.point[1], 10)
else:
_y = np.linspace(pn.point[1], 10, 10)
ax.plot([n.point[0]] * 10, _y, c=color, label='Splitter', alpha=0.5)
else:
if pn:
if n.point[0] < pn.point[0]:
_x = np.linspace(0, pn.point[0], 10)
else:
_x = np.linspace(pn.point[0], 10, 10)
ax.plot(_x, [n.point[1]] * 10, c=color, label='Splitter', alpha=0.5)
def show_closest(tree, point, k, c):
nodes, count, visited_nodes = tree.kclosest(point, k)
ax.scatter(*point, c=c, marker='*', s=10, alpha=0.7)
logger.info("expected {0}, touched {1}, candidates: {2}".format(len(nodes), count, len(visited_nodes)))
i = 10
for d, _ in nodes:
alpha = 0.1 * i
if alpha <= 0:
alpha = 0.1
logger.info("draw circle with radius {0} with point {1}".format(d, point))
ax.add_patch(Circle(point, d, color=c, fill=False, alpha=alpha))
i -= 2
class TestKDTree2d(unittest.TestCase):
def test_random(self):
count, sigma1, sigma2 = 10000, 0.6, 0.5
np.random.seed(0)
x = np.random.normal(3, sigma1, count)
y = np.random.normal(3, sigma2, count)
point = [3.01, 3.01]
for i in range(count):
if 2.98 < x[i] < 3.03 and 2.98 < y[i] < 3.03:
ax.scatter(x[i], y[i], c='b', marker='s', s=10, alpha=0.7)
# ax.scatter(x, y, c='b', marker='s', s=10, alpha=0.7)
points = np.c_[x, y]
tree = KDTree(points)
show_closest(tree, point, 50, 'm')
plt.show()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
unittest.main()
| 2.546875 | 3 |
apps/convert_apt_data.py | euctrl-pru/rt-python | 0 | 12785855 | #!/usr/bin/env python
#
# Copyright (c) 2017-2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
"""
Software to read Eurocontrol APDS files.
"""
import sys
import os
import bz2
import csv
import errno
import pandas as pd
from enum import IntEnum, unique
from pru.trajectory_fields import \
FLIGHT_FIELDS, FLIGHT_EVENT_FIELDS, POSITION_FIELDS, FlightEventType, \
is_valid_iso8601_date, iso8601_datetime_parser, has_bz2_extension, \
split_dual_date
from pru.trajectory_files import create_convert_apds_filenames
from pru.logger import logger
log = logger(__name__)
@unique
class ApdsField(IntEnum):
'The fields of an APDS line.'
APDS_ID = 0
AP_C_FLTID = 1
AP_C_REG = 2
ADEP_ICAO = 3
ADES_ICAO = 4
SRC_PHASE = 5
MVT_TIME_UTC = 6
BLOCK_TIME_UTC = 7
SCHED_TIME_UTC = 8
ARCTYP = 9
AP_C_RWY = 10
AP_C_STND = 11
C40_CROSS_TIME = 12
C40_CROSS_LAT = 13
C40_CROSS_LON = 14
C40_CROSS_FL = 15
C40_BEARING = 16
C100_CROSS_TIME = 17
C100_CROSS_LAT = 18
C100_CROSS_LON = 19
C100_CROSS_FL = 20
C100_BEARING = 21
class ApdsEvent:
'A class for storing and outputting a APDS event'
def __init__(self, id, event, date_time):
self.id = id
self.event = event
self.date_time = date_time
def __lt__(self, other):
return self.event < other.event
def __repr__(self):
return '{},{},{}Z'. \
format(self.id, self.event, self.date_time.isoformat())
class ApdsPosition:
'A class for storing and outputting a APDS poistion'
def __init__(self, id, date_time, latitude, longitude, airport, stand):
self.id = id
self.date_time = date_time
self.latitude = latitude
self.longitude = longitude
self.airport = airport
self.stand = stand
def __lt__(self, other):
return self.date_time < other.date_time
def __repr__(self):
return '{},,{}Z,{:.5f},{:.5f},,,,,1,APDS {} {},,'. \
format(self.id, self.date_time.isoformat(),
self.latitude, self.longitude, self.airport, self.stand)
class ApdsFlight:
'A class for reading, storing and outputting data for an APDS flight'
def __init__(self, apds_fields, airport_stands):
self.id = apds_fields[ApdsField.APDS_ID]
self.callsign = apds_fields[ApdsField.AP_C_FLTID]
self.registration = apds_fields[ApdsField.AP_C_REG]
self.aircraft_type = apds_fields[ApdsField.ARCTYP]
self.departure = apds_fields[ApdsField.ADEP_ICAO]
self.destination = apds_fields[ApdsField.ADES_ICAO]
self.events = []
self.positions = []
is_arrival = (apds_fields[ApdsField.SRC_PHASE] == 'ARR')
airport = self.destination if (is_arrival) else self.destination
# Get the take-off or landing event
if apds_fields[ApdsField.MVT_TIME_UTC]:
movement_event = FlightEventType.WHEELS_ON if (is_arrival) \
else FlightEventType.WHEELS_OFF
movement_time = iso8601_datetime_parser(apds_fields[ApdsField.MVT_TIME_UTC])
self.events.append(ApdsEvent(self.id, movement_event, movement_time))
# if the airport and runway is known, create a position
# if airport and apds_fields[ApdsField.AP_C_RWY]:
# Get the actual off-block or in-block event
if apds_fields[ApdsField.BLOCK_TIME_UTC]:
block_event = FlightEventType.GATE_IN if (is_arrival) \
else FlightEventType.GATE_OUT
block_time = iso8601_datetime_parser(apds_fields[ApdsField.BLOCK_TIME_UTC])
self.events.append(ApdsEvent(self.id, block_event, block_time))
# if the airport and stand is known, create a position
if len(airport_stands):
stand = apds_fields[ApdsField.AP_C_STND]
if airport and stand:
if (airport, stand) in airport_stands.index:
pos = airport_stands.loc[airport, stand]
latitude = pos['LAT']
longitude = pos['LON']
self.positions.append(ApdsPosition(self.id, block_time,
latitude, longitude,
airport, stand))
# Get the scheduled off-block or in-block event
if apds_fields[ApdsField.SCHED_TIME_UTC]:
scheduled_event = FlightEventType.SCHEDULED_IN_BLOCK if (is_arrival) \
else FlightEventType.SCHEDULED_OFF_BLOCK
scheduled_time = iso8601_datetime_parser(apds_fields[ApdsField.SCHED_TIME_UTC])
self.events.append(ApdsEvent(self.id, scheduled_event, scheduled_time))
def __repr__(self):
return '{},{},{},{},,{},{}'. \
format(self.id, self.callsign, self.registration, self.aircraft_type,
self.departure, self.destination)
def convert_apds_data(filename, stands_filename):
# Extract the start and finish date strings from the filename
start_date, finish_date = split_dual_date(os.path.basename(filename))
if not is_valid_iso8601_date(start_date):
log.error('apds data file: %s, invalid start date: %s',
filename, start_date)
return errno.EINVAL
# validate the finish date string from the filename
if not is_valid_iso8601_date(finish_date):
log.error('apds data file: %s, invalid finish date: %s',
filename, finish_date)
return errno.EINVAL
log.info('apds data file: %s', filename)
airport_stands_df = pd.DataFrame()
if stands_filename:
try:
airport_stands_df = pd.read_csv(stands_filename,
index_col=['ICAO_ID', 'STAND_ID'],
memory_map=True)
airport_stands_df.sort_index()
except EnvironmentError:
log.error('could not read file: %s', stands_filename)
return errno.ENOENT
log.info('airport stands file: %s', stands_filename)
else:
log.info('airport stands not provided')
# A dict to hold the APDS flights
flights = {}
# Read the APDS flights file into flights
try:
is_bz2 = has_bz2_extension(filename)
with bz2.open(filename, 'rt', newline="") if (is_bz2) else \
open(filename, 'r') as file:
reader = csv.reader(file, delimiter=',')
next(reader, None) # skip the headers
for row in reader:
flights.setdefault(row[ApdsField.APDS_ID],
ApdsFlight(row, airport_stands_df))
except EnvironmentError:
log.error('could not read file: %s', filename)
return errno.ENOENT
log.info('apds flights read ok')
valid_flights = 0
# Output the APDS flight data
# finish_date
output_files = create_convert_apds_filenames(start_date, finish_date)
flight_file = output_files[0]
try:
with open(flight_file, 'w') as file:
file.write(FLIGHT_FIELDS)
for key, value in sorted(flights.items()):
print(value, file=file)
valid_flights += 1
log.info('written file: %s', flight_file)
except EnvironmentError:
log.error('could not write file: %s', flight_file)
# if airport stand data was provided
if len(airport_stands_df):
# Output the APDS position data
positions_file = output_files[1]
try:
with open(positions_file, 'w') as file:
file.write(POSITION_FIELDS)
for key, value in sorted(flights.items()):
for event in sorted(value.positions):
print(event, file=file)
log.info('written file: %s', positions_file)
except EnvironmentError:
log.error('could not write file: %s', positions_file)
# Output the APDS event data
event_file = output_files[2]
try:
with open(event_file, 'w') as file:
file.write(FLIGHT_EVENT_FIELDS)
for key, value in sorted(flights.items()):
for event in sorted(value.events):
print(event, file=file)
log.info('written file: %s', event_file)
except EnvironmentError:
log.error('could not write file: %s', event_file)
return errno.EACCES
log.info('apds conversion complete for %s flights on %s',
valid_flights, start_date)
return 0
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: convert_apt_data.py <apds_filename> [stands_filename]')
sys.exit(errno.EINVAL)
# Get the stands_filename, if supplied
stands_filename = ''
if len(sys.argv) >= 3:
stands_filename = sys.argv[2]
error_code = convert_apds_data(sys.argv[1], stands_filename)
if error_code:
sys.exit(error_code)
| 2.75 | 3 |
legal_advice_builder/signals.py | prototypefund/django-legal-advice-builder | 4 | 12785856 | <reponame>prototypefund/django-legal-advice-builder
import django.dispatch
answer_created = django.dispatch.Signal()
| 1.296875 | 1 |
src/zope/app/applicationcontrol/browser/tests/test_servercontrolview.py | zopefoundation/zope.app.applicationcontrol | 0 | 12785857 | <filename>src/zope/app/applicationcontrol/browser/tests/test_servercontrolview.py<gh_stars>0
##############################################################################
#
# Copyright (c) 2001, 2002, 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Server Control View Tests
"""
import unittest
import zope.component
from zope.app.applicationcontrol.applicationcontrol import (
applicationController)
from zope.app.applicationcontrol.browser.servercontrol import ServerControlView
from zope.app.applicationcontrol.interfaces import IServerControl
from zope.component.testing import PlacelessSetup as PlacefulSetup
from zope.app.applicationcontrol.tests import MockServerControl
class Test(PlacefulSetup, unittest.TestCase):
def _TestView__newView(self, container, request):
view = ServerControlView()
view.context = container
view.request = request
return view
def test_ServerControlView(self):
control = MockServerControl()
globalSiteManager = zope.component.getGlobalSiteManager()
globalSiteManager.registerUtility(control, IServerControl)
test_serverctrl = self._TestView__newView(
applicationController,
{'shutdown': 1,
'time': 100},
)
test_serverctrl.action()
self.assertEqual(control.did_shutdown, 100)
test_serverctrl = self._TestView__newView(
applicationController,
{'restart': 1,
'time': 100},
)
test_serverctrl.action()
self.assertEqual(control.did_restart, 100)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| 1.9375 | 2 |
test/old_cintojson.py | sqohapoe/CQOSJ_LIU | 66 | 12785858 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import re
import sys
import json
#import copy
import codecs
#reload(sys)
#sys.setdefaultencoding('UTF-8')
DEBUG_MODE = False
CIN_HEAD = "%gen_inp"
ENAME_HEAD = "%ename"
CNAME_HEAD = "%cname"
ENCODING_HEAD = "%encoding"
SELKEY_HEAD = "%selkey"
KEYNAME_HEAD = "%keyname"
CHARDEF_HEAD = "%chardef"
PARSING_HEAD_STATE = 0
PARSE_KEYNAME_STATE = 1
PARSE_CHARDEF_STATE = 2
HEADS = [
CIN_HEAD,
ENAME_HEAD,
CNAME_HEAD,
ENCODING_HEAD,
SELKEY_HEAD,
KEYNAME_HEAD,
CHARDEF_HEAD,
]
class CinToJson(object):
# TODO check the possiblility if the encoding is not utf-8
encoding = 'utf-8'
def __init__(self):
self.sortByCharset = False
self.ename = ""
self.cname = ""
self.selkey = ""
self.keynames = {}
self.chardefs = {}
self.dupchardefs = {}
self.bopomofo = {}
self.big5F = {}
self.big5LF = {}
self.big5S = {}
self.big5Other = {}
self.cjk = {}
self.cjkExtA = {}
self.cjkExtB = {}
self.cjkExtC = {}
self.cjkExtD = {}
self.cjkExtE = {}
self.cjkOther = {}
self.phrases = {}
self.privateuse = {}
self.cincount = {}
self.cincount['bopomofo'] = 0
self.cincount['big5F'] = 0
self.cincount['big5LF'] = 0
self.cincount['big5S'] = 0
self.cincount['big5Other'] = 0
self.cincount['cjk'] = 0
self.cincount['cjkExtA'] = 0
self.cincount['cjkExtB'] = 0
self.cincount['cjkExtC'] = 0
self.cincount['cjkExtD'] = 0
self.cincount['cjkExtE'] = 0
self.cincount['cjkOther'] = 0
self.cincount['phrases'] = 0
self.cincount['cjkCIS'] = 0
self.cincount['privateuse'] = 0
self.cincount['totalchardefs'] = 0
self.charsetRange = {}
self.charsetRange['bopomofo'] = [int('0x3100', 16), int('0x3130', 16)]
self.charsetRange['bopomofoTone'] = [int('0x02D9', 16), int('0x02CA', 16), int('0x02C7', 16), int('0x02CB', 16)]
self.charsetRange['cjk'] = [int('0x4E00', 16), int('0x9FD6', 16)]
self.charsetRange['big5F'] = [int('0xA440', 16), int('0xC67F', 16)]
self.charsetRange['big5LF'] = [int('0xC940', 16), int('0xF9D6', 16)]
self.charsetRange['big5S'] = [int('0xA140', 16), int('0xA3C0', 16)]
self.charsetRange['cjkExtA'] = [int('0x3400', 16), int('0x4DB6', 16)]
self.charsetRange['cjkExtB'] = [int('0x20000', 16), int('0x2A6DF', 16)]
self.charsetRange['cjkExtC'] = [int('0x2A700', 16), int('0x2B73F', 16)]
self.charsetRange['cjkExtD'] = [int('0x2B740', 16), int('0x2B81F', 16)]
self.charsetRange['cjkExtE'] = [int('0x2B820', 16), int('0x2CEAF', 16)]
self.charsetRange['pua'] = [int('0xE000', 16), int('0xF900', 16)]
self.charsetRange['puaA'] = [int('0xF0000', 16), int('0xFFFFE', 16)]
self.charsetRange['puaB'] = [int('0x100000', 16), int('0x10FFFE', 16)]
self.charsetRange['cjkCIS'] = [int('0x2F800', 16), int('0x2FA20', 16)]
self.haveHashtagInKeynames = ["ez.cin", "ezsmall.cin", "ezmid.cin", "ezbig.cin"]
self.saveList = ["ename", "cname", "selkey", "keynames", "cincount", "chardefs", "dupchardefs", "privateuse"]
self.curdir = os.path.abspath(os.path.dirname(__file__))
def __del__(self):
del self.keynames
del self.chardefs
del self.dupchardefs
del self.bopomofo
del self.big5F
del self.big5LF
del self.big5S
del self.big5Other
del self.cjk
del self.cjkExtA
del self.cjkExtB
del self.cjkExtC
del self.cjkExtD
del self.cjkExtE
del self.cjkOther
del self.privateuse
del self.phrases
del self.cincount
self.keynames = {}
self.chardefs = {}
self.dupchardefs = {}
self.bopomofo = {}
self.big5F = {}
self.big5LF = {}
self.big5S = {}
self.big5Other = {}
self.cjk = {}
self.cjkExtA = {}
self.cjkExtB = {}
self.cjkExtC = {}
self.cjkExtD = {}
self.cjkExtE = {}
self.cjkOther = {}
self.privateuse = {}
self.phrases = {}
self.cincount = {}
def run(self, file, filePath, sortByCharset):
print(file)
print(filePath)
self.jsonFile = re.sub('\.cin$', '', file) + '.json'
self.sortByCharset = sortByCharset
state = PARSING_HEAD_STATE
if file in self.haveHashtagInKeynames:
if DEBUG_MODE:
print("字根含有 # 符號!")
if not os.path.exists(filePath):
open(filePath, 'w').close()
with io.open(filePath, encoding='utf-8') as fs:
for line in fs:
line = re.sub('^ | $|\\n$', '', line)
if file in self.haveHashtagInKeynames:
if not line or (line[0] == '#' and state == PARSING_HEAD_STATE):
continue
else:
if not line or line[0] == '#':
continue
if state is not PARSE_CHARDEF_STATE:
if CIN_HEAD in line:
continue
if ENAME_HEAD in line:
self.ename = head_rest(ENAME_HEAD, line)
if CNAME_HEAD in line:
self.cname = head_rest(CNAME_HEAD, line)
if ENCODING_HEAD in line:
continue
if SELKEY_HEAD in line:
self.selkey = head_rest(SELKEY_HEAD, line)
if CHARDEF_HEAD in line:
if 'begin' in line:
state = PARSE_CHARDEF_STATE
else:
state = PARSING_HEAD_STATE
continue
if KEYNAME_HEAD in line:
if 'begin' in line:
state = PARSE_KEYNAME_STATE
else:
state = PARSING_HEAD_STATE
continue
if state is PARSE_KEYNAME_STATE:
key, root = safeSplit(line)
key = key.strip().lower()
if ' ' in root:
root = '\u3000'
else:
root = root.strip()
self.keynames[key] = root
continue
else:
if CHARDEF_HEAD in line:
continue
if self.cname == "中標倉頡":
if '#' in line:
line = re.sub('#.+', '', line)
key, root = safeSplit(line)
key = key.strip().lower()
if root == "Error":
if DEBUG_MODE:
print("發生錯誤!")
break
if ' ' in root:
root = '\u3000'
else:
root = root.strip()
charset = self.getCharSet(key, root)
if not self.sortByCharset:
if key in self.chardefs:
if root in self.chardefs[key]:
if DEBUG_MODE:
print("含有重複資料: " + key)
try:
self.dupchardefs[key].append(root)
except KeyError:
self.dupchardefs[key] = [root]
else:
try:
self.chardefs[key].append(root)
except KeyError:
self.chardefs[key] = [root]
self.cincount['totalchardefs'] += 1
else:
try:
self.chardefs[key].append(root)
except KeyError:
self.chardefs[key] = [root]
self.cincount['totalchardefs'] += 1
if self.sortByCharset:
if DEBUG_MODE:
print("排序字元集!")
self.mergeDicts(self.big5F, self.big5LF, self.big5S, self.big5Other, self.bopomofo, self.cjk, self.cjkExtA, self.cjkExtB, self.cjkExtC, self.cjkExtD, self.cjkExtE, self.cjkOther, self.phrases, self.privateuse)
#print("WTF")
#print(self.jsonFile);
self.saveJsonFile(self.jsonFile)
def mergeDicts(self, *chardefsdicts):
for chardefsdict in chardefsdicts:
for key in chardefsdict:
for root in chardefsdict[key]:
if key in self.chardefs:
if root in self.chardefs[key]:
if DEBUG_MODE:
print("含有重複資料: " + key)
try:
self.dupchardefs[key].append(root)
except KeyError:
self.dupchardefs[key] = [root]
else:
try:
self.chardefs[key].append(root)
except KeyError:
self.chardefs[key] = [root]
self.cincount['totalchardefs'] += 1
else:
try:
self.chardefs[key].append(root)
except KeyError:
self.chardefs[key] = [root]
self.cincount['totalchardefs'] += 1
def toJson(self):
return {key: value for key, value in self.__dict__.items() if key in self.saveList}
def saveJsonFile(self, file):
#filename = self.getJsonFile(file)
filename = file
try:
with codecs.open(filename, 'w', 'utf-8') as f:
js = json.dump(self.toJson(), f, ensure_ascii=False, sort_keys=True, indent=4)
except Exception:
print("FIXME")
pass # FIXME: handle I/O errors?
def getJsonDir(self):
json_dir = os.path.join(self.curdir, os.pardir, "json")
os.makedirs(json_dir, mode=0o700, exist_ok=True)
return json_dir
def getJsonFile(self, name):
return os.path.join(self.getJsonDir(), name)
def getCharSet(self, key, root):
matchstr = ''
if len(root) > 1:
try:
self.phrases[key].append(root)
except KeyError:
self.phrases[key] = [root]
self.cincount['phrases'] += 1
return "phrases"
else:
matchstr = root
matchint = ord(matchstr)
if matchint <= self.charsetRange['cjk'][1]:
if (matchint in range(self.charsetRange['bopomofo'][0], self.charsetRange['bopomofo'][1]) or # Bopomofo 區域
matchint in self.charsetRange['bopomofoTone']):
try:
self.bopomofo[key].append(root) # 注音符號
except KeyError:
self.bopomofo[key] = [root]
self.cincount['bopomofo'] += 1
return "bopomofo"
elif matchint in range(self.charsetRange['cjk'][0], self.charsetRange['cjk'][1]): # CJK Unified Ideographs 區域
try:
big5code = matchstr.encode('big5')
big5codeint = int(big5code.hex(), 16)
if big5codeint in range(self.charsetRange['big5F'][0], self.charsetRange['big5F'][1]): # Big5 常用字
try:
self.big5F[key].append(root)
except KeyError:
self.big5F[key] = [root]
self.cincount['big5F'] += 1
return "big5F"
elif big5codeint in range(self.charsetRange['big5LF'][0], self.charsetRange['big5LF'][1]): # Big5 次常用字
try:
self.big5LF[key].append(root)
except KeyError:
self.big5LF[key] = [root]
self.cincount['big5LF'] += 1
return "big5LF"
elif big5codeint in range(self.charsetRange['big5S'][0], self.charsetRange['big5S'][1]): # Big5 符號
try:
self.big5S[key].append(root)
except KeyError:
self.big5S[key] = [root]
self.cincount['big5S'] += 1
return "big5LF"
else: # Big5 其它漢字
try:
self.big5Other[key].append(root)
except KeyError:
self.big5Other[key] = [root]
self.cincount['big5Other'] += 1
return "big5Other"
except: # CJK Unified Ideographs 漢字
try:
self.cjk[key].append(root)
except KeyError:
self.cjk[key] = [root]
self.cincount['cjk'] += 1
return "cjk"
elif matchint in range(self.charsetRange['cjkExtA'][0], self.charsetRange['cjkExtA'][1]): # CJK Unified Ideographs Extension A 區域
try:
self.cjkExtA[key].append(root) # CJK 擴展 A 區
except KeyError:
self.cjkExtA[key] = [root]
self.cincount['cjkExtA'] += 1
return "cjkExtA"
else:
if matchint in range(self.charsetRange['cjkExtB'][0], self.charsetRange['cjkExtB'][1]): # CJK Unified Ideographs Extension B 區域
try:
self.cjkExtB[key].append(root) # CJK 擴展 B 區
except KeyError:
self.cjkExtB[key] = [root]
self.cincount['cjkExtB'] += 1
return "cjkExtB"
elif matchint in range(self.charsetRange['cjkExtC'][0], self.charsetRange['cjkExtC'][1]): # CJK Unified Ideographs Extension C 區域
try:
self.cjkExtC[key].append(root) # CJK 擴展 C 區
except KeyError:
self.cjkExtC[key] = [root]
self.cincount['cjkExtC'] += 1
return "cjkExtC"
elif matchint in range(self.charsetRange['cjkExtD'][0], self.charsetRange['cjkExtD'][1]): # CJK Unified Ideographs Extension D 區域
try:
self.cjkExtD[key].append(root) # CJK 擴展 D 區
except KeyError:
self.cjkExtD[key] = [root]
self.cincount['cjkExtD'] += 1
return "cjkExtD"
elif matchint in range(self.charsetRange['cjkExtE'][0], self.charsetRange['cjkExtE'][1]): # CJK Unified Ideographs Extension E 區域
try:
self.cjkExtE[key].append(root) # CJK 擴展 E 區
except KeyError:
self.cjkExtE[key] = [root]
self.cincount['cjkExtE'] += 1
return "cjkExtE"
elif (matchint in range(self.charsetRange['pua'][0], self.charsetRange['pua'][1]) or # Unicode Private Use 區域
matchint in range(self.charsetRange['puaA'][0], self.charsetRange['puaA'][1]) or
matchint in range(self.charsetRange['puaB'][0], self.charsetRange['puaB'][1])):
try:
self.privateuse[key].append(root) # Unicode 私用區
except KeyError:
self.privateuse[key] = [root]
self.cincount['privateuse'] += 1
return "pua"
elif matchint in range(self.charsetRange['cjkCIS'][0], self.charsetRange['cjkCIS'][1]): # cjk compatibility ideographs supplement 區域
try:
self.privateuse[key].append(root) # CJK 相容字集補充區
except KeyError:
self.privateuse[key] = [root]
self.cincount['cjkCIS'] += 1
return "pua"
# 不在 CJK Unified Ideographs 區域
try:
self.cjkOther[key].append(root) # CJK 其它漢字或其它字集字元
except KeyError:
self.cjkOther[key] = [root]
self.cincount['cjkOther'] += 1
return "cjkOther"
def head_rest(head, line):
return line[len(head):].strip()
def safeSplit(line):
if ' ' in line:
return line.split(' ', 1)
elif '\t' in line:
return line.split('\t', 1)
else:
return line, "Error"
# def main():
#
# app = CinToJson()
# if len(sys.argv) >= 2:
# cinFile = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, "cin", sys.argv[1])
# if os.path.exists(cinFile):
# if len(sys.argv) >= 3 and sys.argv[2] == "sort":
# app.run(sys.argv[1], cinFile, True)
# else:
# app.run(sys.argv[1], cinFile, False)
# else:
# if len(sys.argv) == 1:
# sortList = ['cnscj.cin', 'CnsPhonetic.cin']
# for file in os.listdir(os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, "cin")):
# if file.endswith(".cin"):
# if DEBUG_MODE:
# print('轉換 ' + file + ' 中...')
# app.__init__()
# cinFile = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, "cin", file)
# if file in sortList:
# app.run(file, cinFile, True)
# else:
# app.run(file, cinFile, False)
# app.__del__()
# else:
# if DEBUG_MODE:
# print('檔案不存在!')
| 2.515625 | 3 |
lambdas_with_docker/lambda_to_lambda_caller.py | koki-nakamura22/lambda-local-dev-example | 0 | 12785859 | <reponame>koki-nakamura22/lambda-local-dev-example
# This function is the caller.
#
# How to execute this file.
# Must upload this file and lambda_to_lambda_callee.py to AWS Lambda then executing them
# because it cannot execute another Lambda function locally.
import json
import boto3
def lambda_handler(event, context):
response_body = {}
execute_async_params = {
'testKey': 'test parameter for executing async'
}
response_body['executeAsync'] = __executeAnotherLambdaAsync(execute_async_params)
execute_sync_params = {
'testKey': 'test parameter for executing sync'
}
response_body['executeSync'] = __executeAnotherLambdaSync(execute_sync_params)
return {
'statusCode': 200,
'body': response_body
}
# Executing another Lambda function async.
def __executeAnotherLambdaAsync(params):
return __executeAnotherLambda(params, 'Event')
# Executing another Lambda function sync.
def __executeAnotherLambdaSync(params):
return __executeAnotherLambda(params, 'RequestResponse')
# Executing another Lambda function.
def __executeAnotherLambda(params, invocation_type):
lambda_client = boto3.client('lambda')
response = lambda_client.invoke(
FunctionName='lambda_to_lambda_callee',
InvocationType=invocation_type,
Payload=json.dumps(params)
)
pay_load = response['Payload'].read()
pay_load_str = pay_load.decode('utf-8')
if pay_load_str != '' and not pay_load_str is None:
return json.loads(pay_load_str)
else:
return {}
| 2.4375 | 2 |
vanadis/__init__.py | CyanideCN/vanadis | 1 | 12785860 | <gh_stars>1-10
from vanadis.colormap import Colormap
from vanadis.palette import parse_palette
__version__ = '0.0.3' | 1.226563 | 1 |
hackru/locustfile.py | sakib/hackru | 3 | 12785861 | <reponame>sakib/hackru
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
#def on_start(self):
# """ on_start is called when a Locust starts, before tasks scheduled """
#self.login()
#print "starting locust (%r)" % (self.locust)
#def login(self):
# self.client.post("/login", {"username": "test"})
@task(2)
def index(self):
self.client.get("/")
#@task(1)
#def profile(self):
# self.client.get("/profile")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 9000
| 2.8125 | 3 |
kernel/builder.py | dereklpeck/paradoxia | 1 | 12785862 | <gh_stars>1-10
"""
Generate Agent
"""
import os
import subprocess
def create_agent(lhost, lport, mode):
if(len(lhost) > 0 and len(lport) > 0 and len(mode) > 0):
if(mode == "static"):
static = True
else:
print("[WARNING]: It is recommended you create a static Bot.")
static = False
os.chdir("bot")
with open("clientc.h", "r+") as source_code:
source = source_code.read()
replace = source.replace("lhost", lhost)
final_replace = replace.replace("lport", lport)
with open("client.h", "w") as final:
final.write(final_replace)
if(os.name == "nt"):
if(static == True):
print("[+] Building Static BOT which will connect on {lhost}:{lport}.".format(lhost=lhost, lport=lport))
subprocess.call(["make", "windows-static"], stdout=open(os.devnull,"w"), stderr=subprocess.STDOUT)
else:
print("[+] Building BOT which will connect on {lhost}:{lport}.".format(lhost=lhost, lport=lport))
subprocess.call(["make", "windows"], stdout=open(os.devnull,"w"), stderr=subprocess.STDOUT)
else:
if(static == True):
print("[+] Building Static BOT which will connect on {lhost}:{lport}.".format(lhost=lhost, lport=lport))
subprocess.call(["make", "linux-static"], stdout=open(os.devnull,"w"), stderr=subprocess.STDOUT)
else:
print("[+] Building BOT which will connect on {lhost}:{lport}.".format(lhost=lhost, lport=lport))
subprocess.call(["make", "linux"], stdout=open(os.devnull,"w"), stderr=subprocess.STDOUT)
os.chdir("..")
try:
file = "bot/Paradoxia.exe"
#os.remove("bot/Paradoxia.h")
with open(file, "rb") as backdoor:
hello = os.stat(file)
print("\n-> Paradoxia.exe | Size : {size} bytes | Path : {path}"
.format(size=str(hello.st_size), path=os.path.dirname(os.path.abspath(file))))
except FileNotFoundError:
print("-> Failed to create Backdoor.")
except Exception as es:
print("-> Error : " +str(es))
else:
print("""
[X] USAGE : build lhost=<lhost> lport=<lport> <static>/<normal>
LHOST - Ipv4 Address of Server to Connect to.
LPORT - Port of Server to Connect to.
static - Standalone Executable to run on almost any System.
normal - Executable that requires libraries to run.
EXAMPLES :
[+] build lhost=192.168.0.101 lport=443 static
|- Size : Around 2.1 MB.
|- This will generate an Executable that you can easily spread
without worrying that it will work or not.
[+] build lhost=192.168.0.101 lport=443 normal
|- Size : Around 600 kb.
|- This will generate an Executable that you can use for tests
on your own PC. Or infect a System which an environment where
it can run.
""") | 2.84375 | 3 |
accounts/views.py | RomanOsadchuk/nospoil | 0 | 12785863 | from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.views.generic import DetailView, TemplateView
from django.views.generic.edit import FormView
from playoffs.models import Playoff
class RegistrationView(FormView):
template_name = 'registration/register.html'
form_class = UserCreationForm
success_url = '/'
def form_valid(self, form):
name = form.cleaned_data['username']
password = form.cleaned_data['<PASSWORD>']
user = User.objects.create_user(name, password=password)
new_user = authenticate(username=name, password=password)
login(self.request, new_user)
return super(RegistrationView, self).form_valid(form)
class ProfileView(LoginRequiredMixin, TemplateView):
template_name = 'registration/profile.html'
def get_context_data(self, *args, **kwargs):
context = super(ProfileView, self).get_context_data(*args, **kwargs)
context['playoffs'] = Playoff.objects.filter(owner=self.request.user)
return context
class UserPageView(DetailView):
model = User
context_object_name = 'user'
template_name = 'accounts/user_page.html'
def get_object(self, queryset=None):
username = self.kwargs.get('username')
obj = get_object_or_404(User, username=username)
return obj
def get_context_data(self, *args, **kwargs):
context = super(UserPageView, self).get_context_data(*args, **kwargs)
context['playoffs'] = Playoff.objects.filter(owner=self.object)
return context
| 2.03125 | 2 |
solutions/6002.py | pacokwon/leetcode | 2 | 12785864 | class Bitset:
def __init__(self, size):
self.bitmap = 0
self.size = size
self.cnt = 0
def fix(self, idx):
if self.bitmap & (1 << idx) == 0:
self.bitmap = self.bitmap | (1 << idx)
self.cnt += 1
def unfix(self, idx):
if self.bitmap & (1 << idx):
self.bitmap = self.bitmap ^ (1 << idx)
self.cnt -= 1
def flip(self):
self.bitmap = self.bitmap ^ ((1 << self.size) - 1)
self.cnt = self.size - self.cnt
def all(self):
return self.cnt == self.size
def one(self):
return self.bitmap > 0
def count(self):
return self.cnt
def toString(self):
a = bin(self.bitmap)[2:]
return a[::-1] + '0' * (self.size - len(a))
| 3.125 | 3 |
uiCompile2.py | bambooshoot/renameFileList | 0 | 12785865 | <gh_stars>0
from pyside2uic import compileUi
import py_compile,glob,os,re
uiPath=os.curdir
print uiPath
preIndex=len(uiPath)+1
uiFiles=glob.glob("%s/*.ui"%uiPath)
print uiFiles
for uiFile in uiFiles:
print uiFile
fileBaseName=re.search("[^\\\\]+.ui$",uiFile).group(0)
fileBaseName=re.search("^[^.]+",fileBaseName).group(0)
pyFile="%s/%sUI.py"%(uiPath,fileBaseName)
print uiFile,pyFile
pyFileP = open(pyFile, 'w')
compileUi(uiFile, pyFileP, False, 4, False)
pyFileP.close() | 2.40625 | 2 |
zorro/scripts/calcMeanFRCs.py | C-CINA/zorro | 8 | 12785866 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 7 16:12:33 2016
@author: rmcleod
"""
import numpy as np
import matplotlib.pyplot as plt
import os, os.path, glob
mcFRCFiles = glob.glob( "FRC/*mcFRC.npy" )
zorroFRCFiles = glob.glob( "FRC/*zorroFRC.npy" )
zorroFRCs = [None] * len( zorroFRCFiles)
for J in np.arange( len(zorroFRCFiles) ):
zorroFRCs[J] = np.load( zorroFRCFiles[J] )
mcFRCs = [None] * len( mcFRCFiles)
for J in np.arange( len(mcFRCFiles) ):
mcFRCs[J] = np.load( mcFRCFiles[J] )
zorroMeanFRC = np.mean( np.array(zorroFRCs), axis=0 )
mcMeanFRC = np.mean( np.array(mcFRCs), axis=0 )
plt.figure()
plt.plot( mcMeanFRC, '.-', color='firebrick', label='MotionCorr' )
plt.plot( zorroMeanFRC, '.-', color='black', label='Zorro' )
plt.title( "Mean FRC Re-aligned from MotionCorr" )
plt.legend()
plt.xlim( [0,len(mcMeanFRC)] )
plt.savefig( "Dataset_mean_MC_vs_Zorro.png" ) | 2.375 | 2 |
csgo_gsi_arduino_lcd/data/arduino_mediator.py | Darkness4/csgo-gsi-arduino | 5 | 12785867 | # -*- coding: utf-8 -*-
"""
ArduinoMediator.
@auteur: Darkness4
"""
import logging
from threading import Thread
from time import sleep, time
from typing import Optional
from csgo_gsi_arduino_lcd.entities.state import State
from csgo_gsi_arduino_lcd.entities.status import Status
from serial import Serial
class ArduinoMediator(Thread):
"""Give order to the arduino."""
state: Optional[State] = None
__refresh = False # Order to refresh informations
__start = True # Order to start/stop
__status: Status = Status.NONE
ser_arduino: Serial
def __init__(self, ser_arduino: Serial):
"""Init save."""
super(ArduinoMediator, self).__init__()
self.ser_arduino = ser_arduino
@property
def status(self) -> Status:
return self.__status
@status.setter
def status(self, status: Status):
"""Change Messenger behavior."""
self.__status = status
self.__refresh = True # Informations need to be refreshed
def run(self):
"""Thread start."""
while self.__start:
self.refresh() if self.__refresh else sleep(0.1)
logging.info("Messenger is dead.")
def refresh(self):
self.__refresh = False
# Has refreshed
if self.__status in (
Status.BOMB,
Status.DEFUSED,
Status.EXPLODED,
): # Bomb
self.draw_bomb_timer()
elif self.__status == Status.NONE:
self.draw_idling()
else: # Default status
self.write_player_stats()
def draw_bomb_timer(self):
"""40 sec bomb timer on arduino."""
offset = time()
actualtime: int = int(40 - time() + offset)
while actualtime > 0 and self.__status == Status.BOMB:
oldtime = actualtime
sleep(0.1)
actualtime = int(40 - time() + offset)
if oldtime != actualtime: # Actualization only integer change
self.ser_arduino.write(b"BOMB PLANTED")
# Wait for second line
sleep(0.1)
for i in range(0, 40, 5):
self.ser_arduino.write(
ArduinoMediator.progress(actualtime - i)
)
self.ser_arduino.write(str(actualtime).encode())
sleep(0.1)
if self.__status == Status.DEFUSED:
self.ser_arduino.write(b"BOMB DEFUSED")
# Wait for second line
sleep(0.1)
self.ser_arduino.write(b" ")
sleep(0.1)
elif self.__status == Status.EXPLODED:
self.ser_arduino.write(b"BOMB EXPLODED")
# Wait for second line
sleep(0.1)
self.ser_arduino.write(b" ")
sleep(0.1)
def write_player_stats(self):
"""Player stats writer."""
# Not too fast
sleep(0.1)
# Writing health and armor in Serial
self.draw_health_and_armor()
# Wait for second line
sleep(0.1)
# Kill or Money
if self.__status == Status.NOT_FREEZETIME:
self.draw_kills()
elif self.__status == Status.FREEZETIME:
self.draw_money()
sleep(0.1)
def draw_kills(self):
"""Show kills in one line."""
# HS and Kill counter
self.ser_arduino.write(b"K: ")
if self.state is not None:
for i in range(self.state.round_kills):
if i < self.state.round_killhs:
self.ser_arduino.write(b"\x01") # Byte 1 char : HS
else:
self.ser_arduino.write(b"\x00") # Byte 0 char : kill no HS
def draw_money(self):
"""Show money in one line."""
if self.state is not None:
self.ser_arduino.write(f"M: {self.state.money}".encode())
def draw_health_and_armor(self):
"""Show health and armor in one line."""
if self.state is not None:
self.ser_arduino.write(b"H: ")
self.ser_arduino.write(
ArduinoMediator.progress(self.state.health // 5)
)
self.ser_arduino.write(
ArduinoMediator.progress((self.state.health - 25) // 5)
)
self.ser_arduino.write(
ArduinoMediator.progress((self.state.health - 50) // 5)
)
self.ser_arduino.write(
ArduinoMediator.progress((self.state.health - 75) // 5)
)
self.ser_arduino.write(b" A: ")
self.ser_arduino.write(
ArduinoMediator.progress(self.state.armor // 5)
)
self.ser_arduino.write(
ArduinoMediator.progress((self.state.armor - 25) // 5)
)
self.ser_arduino.write(
ArduinoMediator.progress((self.state.armor - 50) // 5)
)
self.ser_arduino.write(
ArduinoMediator.progress((self.state.armor - 75) // 5)
)
def draw_idling(self):
"""Print text while idling."""
self.ser_arduino.write(b"Waiting for")
sleep(0.1)
self.ser_arduino.write(b"matches")
def shutdown(self):
"""Stop the mediator."""
self.__start = False
@staticmethod
def progress(i: int) -> bytes:
"""
Progress bar, for arduino 5px large.
Parameters
----------
i : int
Select which character to send to Arduino.
Returns
-------
bytes : Character send to Arduino.
"""
if i <= 0:
return b"\x07"
elif 1 <= i <= 5:
return bytes([i + 1])
else:
return b"\x06"
| 2.59375 | 3 |
scripts/data_collect.py | ehu-ai/domrand | 20 | 12785868 | <filename>scripts/data_collect.py
#!/usr/bin/env python2
from __future__ import print_function
import os
import argparse
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
"""
Hacky script for collecting real world samples
(uses ros)
0. Plug in camera and launch it (for Asus: `roslaunch openni_launch openni.launch`)
1. Use rviz to look at camera image.
2. When ready to take, enter x, y to save the current image to a file
"""
rospy.init_node('test')
bridge = CvBridge()
def save_image(filename):
raw_img = rospy.wait_for_message(img_topic, Image)
cv_image = bridge.imgmsg_to_cv2(raw_img, 'bgr8')
# Save the file
# make dir if it doesn't exist
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
success = cv2.imwrite(filename, cv_image)
assert success, "File write failed somehow"
parser = argparse.ArgumentParser(description='MAML')
parser.add_argument('--filepath', type=str, default='./data/real_world', help='')
parser.add_argument('--prefix', type=str, default=None, help='')
parser.add_argument('--camera', type=str, default='asus', help='')
FLAGS = parser.parse_args()
if FLAGS.camera == 'asus':
img_topic = '/camera/rgb/image_raw'
else:
img_topic = '/kinect2/hd/image_color'
print('Enter <x y> when ready to grab snapshot')
print()
while not rospy.is_shutdown():
inp = raw_input('x y: ')
x,y = inp[0], inp[1]
if FLAGS.prefix is None:
filename = '{}-{}.jpg'.format(x,y)
else:
filename = '{}-{}-{}.jpg'.format(FLAGS.prefix, x,y)
full_path = os.path.join(FLAGS.filepath, filename)
save_image(full_path)
print("Saved {}".format(full_path))
| 2.6875 | 3 |
nlzss/verify.py | meunierd/nlzss | 48 | 12785869 | <reponame>meunierd/nlzss<filename>nlzss/verify.py
#!/usr/bin/env python3
import sys
from sys import stdin, stdout, stderr, exit
from os import SEEK_SET, SEEK_CUR, SEEK_END
from errno import EPIPE
from struct import pack, unpack
class DecompressionError(ValueError):
pass
class VerificationError(ValueError):
pass
def bits(byte):
return ((byte >> 7) & 1,
(byte >> 6) & 1,
(byte >> 5) & 1,
(byte >> 4) & 1,
(byte >> 3) & 1,
(byte >> 2) & 1,
(byte >> 1) & 1,
(byte) & 1)
def decompress_raw_lzss10(indata, decompressed_size, _overlay=False):
"""Decompress LZSS-compressed bytes. Returns a bytearray."""
data = bytearray()
it = iter(indata)
if _overlay:
disp_extra = 3
else:
disp_extra = 1
def writebyte(b):
data.append(b)
def readbyte():
return next(it)
def readshort():
# big-endian
a = next(it)
b = next(it)
return (a << 8) | b
def copybyte():
data.append(next(it))
while len(data) < decompressed_size:
b = readbyte()
if b == 0:
# dumb optimization
for _ in range(8):
copybyte()
continue
flags = bits(b)
for flag in flags:
if flag == 0:
copybyte()
elif flag == 1:
sh = readshort()
count = (sh >> 0xc) + 3
disp = (sh & 0xfff) + disp_extra
for _ in range(count):
writebyte(data[-disp])
else:
raise ValueError(flag)
if decompressed_size <= len(data):
break
if len(data) != decompressed_size:
raise DecompressionError("decompressed size does not match the expected size")
return data
def lz11_tokens(indata):
it = iter(indata)
i = 4
def readbyte():
nonlocal i
i += 1
return next(it)
while True:
flagpos = i
flags = bits(readbyte())
for flag in flags:
pos = i
if flag == 0:
yield readbyte(), pos, flagpos
elif flag == 1:
b = readbyte()
indicator = b >> 4
if indicator == 0:
# 8 bit count, 12 bit disp
# indicator is 0, don't need to mask b
count = (b << 4)
b = readbyte()
count += b >> 4
count += 0x11
elif indicator == 1:
# 16 bit count, 12 bit disp
count = ((b & 0xf) << 12) + (readbyte() << 4)
b = readbyte()
count += b >> 4
count += 0x111
else:
# indicator is count (4 bits), 12 bit disp
count = indicator
count += 1
disp = ((b & 0xf) << 8) + readbyte()
disp += 1
yield (count, -disp), pos, flagpos
else:
raise ValueError(flag)
def verify(obj):
"""Verify LZSS-compressed bytes or a file-like object.
Shells out to verify_file() or verify_bytes() depending on
whether or not the passed-in object has a 'read' attribute or not.
Returns None on success. Raises an exception on error."""
if hasattr(obj, 'read'):
return verify_file(obj)
else:
return verify_bytes(obj)
def verify_bytes(data):
"""Verify LZSS-compressed bytes.
Returns None on success. Raises an exception on error.
"""
header = data[:4]
if header[0] == 0x10:
tokenize = lz10_tokens
elif header[0] == 0x11:
tokenize = lz11_tokens
else:
raise VerificationError("not as lzss-compressed file")
decompressed_size, = unpack("<L", header[1:] + b'\x00')
data = data[4:]
tokens = tokenize(data, decompressed_size)
return verify_tokens(tokens)
def verify_file(f):
"""Verify an LZSS-compressed file.
Returns None on success. Raises an exception on error.
"""
header = f.read(4)
if header[0] == 0x10:
tokenize = lz10_tokens
elif header[0] == 0x11:
tokenize = lz11_tokens
else:
raise VerificationError("not as lzss-compressed file")
decompressed_size, = unpack("<L", header[1:] + b'\x00')
data = f.read()
tokens = tokenize(data)
return verify_tokens(tokens, decompressed_size)
def verify_tokens(tokens, decompressed_length):
length = 0
for t in tokens:
t, pos, flagpos = t
if type(t) == tuple:
count, disp = t
assert disp < 0
assert 0 < count
if disp + length < 0:
raise VerificationError(
"disp too large. length: {:#x}, disp: {:#x}, pos: {:#x}, flagpos: {:#x}"
.format(length, disp, pos, flagpos))
length += count
else:
length += 1
if length >= decompressed_length:
break
if length != decompressed_length:
raise VerificationError(
"decompressed size does not match. got: {:#x}, expected: {:#x}".format(
length, decompressed_length))
def dump_file(f):
header = f.read(4)
if header[0] == 0x10:
tokenize = lz10_tokens
elif header[0] == 0x11:
tokenize = lz11_tokens
else:
raise VerificationError("not as lzss-compressed file")
decompressed_size, = unpack("<L", header[1:] + b'\x00')
data = f.read()
tokens = tokenize(data)
def dump():
for t, pos, flagpos in tokens:
if type(t) == tuple:
yield t
from pprint import pprint
pprint(list(dump()))
def main(args=None):
if args is None:
args = sys.argv[1:]
if '--overlay' in args:
args.remove('--overlay')
overlay = True
else:
overlay = False
if len(args) < 1 or args[0] == '-':
if overlay:
print("Can't verify overlays from stdin", file=stderr)
return 2
if hasattr(stdin, 'detach'):
f = stdin.detach()
else:
f = stdin
else:
try:
f = open(args[0], "rb")
except IOError as e:
print(e, file=stderr)
return 2
try:
if overlay:
print("Can't verify overlays", file=stderr)
else:
#verify_file(f)
dump_file(f)
except (VerificationError,) as e:
print(e, file=stderr)
return 1
return 0
if __name__ == '__main__':
exit(main())
| 2.578125 | 3 |
adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/python/temperature_application_interface_def.py | ArrowElectronics/Vital-Signs-Monitoring | 5 | 12785870 | <reponame>ArrowElectronics/Vital-Signs-Monitoring
from ctypes import *
from common_application_interface_def import *
from m2m2_core_def import *
class M2M2_TEMPERATURE_APP_CMD_ENUM_t(c_ubyte):
_M2M2_TEMPERATURE_APP_CMD_LOWEST = 0x60
M2M2_TEMPERATURE_APP_CMD_SET_FS_REQ = 0x62
M2M2_TEMPERATURE_APP_CMD_SET_FS_RESP = 0x63
class temperature_app_stream_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("sequence_num", c_ushort),
("nTS", c_ulong),
("nTemperature1", c_ushort),
("nTemperature2", c_ushort),
]
class temperature_app_dcb_lcfg_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
]
class temperature_app_lcfg_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("field", c_ubyte),
("value", c_ulong * 21),
]
| 2.390625 | 2 |
experiments/utils.py | pedrobn23/pyutai | 3 | 12785871 | import numpy as np
from pyutai import trees
from potentials import cluster
def cpd_size(cpd):
return np.prod(cpd.cardinality)
def unique_values(cpd):
unique, _ = np.unique(cpd.values, return_counts=True)
return len(unique)
def stats(net):
if not net.endswith('.bif'):
raise ValueError('Net format not supported. Expected .bif, got {net}')
file_ = read.read(f'networks/{net}')
model = file_.get_model()
cpds = model.get_cpds()
unique_values = statistics.mean(_unique_values(cpd) for cpd in cpds)
max_values = max(
((i, _unique_values(cpd)) for i, cpd in enumerate(cpds)),
key=lambda x: x[1])
print(
f'Net: {net}. Mean unique value: {unique_values:.2f}. Biggest cpd: {max_values}'
)
def tree_from_cpd(cpd, selector):
if selector is None:
pass
else:
selector = selector(cpd.values, cpd.variables)
cardinality_ = dict(zip(cpd.variables, cpd.cardinality))
return trees.Tree.from_array(cpd.values,
cpd.variables,
cardinality_,
selector=selector)
def cluster_from_cpd(cpd):
return cluster.Cluster.from_array(cpd.values,
cpd.variables)
| 2.46875 | 2 |
geopayment/providers/__init__.py | Lh4cKg/tbcpay | 0 | 12785872 | <filename>geopayment/providers/__init__.py
from geopayment.providers.credo import CredoProvider
from geopayment.providers.tbc import TBCProvider
from geopayment.providers.bog import IPayProvider, IPayInstallmentProvider
| 1.234375 | 1 |
PCscrapy/spiders/PCscrap.py | arju88nair/PCScrapy | 0 | 12785873 | <reponame>arju88nair/PCScrapy<filename>PCscrapy/spiders/PCscrap.py<gh_stars>0
import scrapy
import re
import datetime
import logging
import time
import RAKE
from datetime import datetime
import hashlib
from scrapy.spiders import XMLFeedSpider
from pymongo import MongoClient
from PCscrapy.scrapLinks import Links
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from random import shuffle
from PCscrapy.geography import tags
from bson.json_util import dumps
import pprint
import json
now = datetime.now()
start = time.time()
connection = MongoClient('mongodb://localhost:27017/Culminate')
db = connection.Culminate
logging.debug('Blah')
class Spider(XMLFeedSpider):
"""
Active main spider which crawls through the links provided
"""
name = "scrap"
allowed_domains = ["feeds.feedburner.com"]
itertag = 'item'
logging.getLogger("requests").setLevel(logging.WARNING)
logging.basicConfig(
level=logging.DEBUG,
format=
'%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='weird.log',
filemode='w')
def start_requests(self):
shuffle(Links)
for url in Links:
request = scrapy.Request(url=url[0], callback=self.parse)
request.meta['source'] = url[1]
request.meta['category'] = url[2]
request.meta['type'] = url[3]
request.meta['url'] = url[0]
# logging.error('For ' + url[0] + ' in ' + url[2])
yield request
"""
Parsing block for the default rss
"""
def parse_node(self, response, node):
item = {}
source = response.meta.get('source')
category = response.meta.get('category')
# self.logger.info('Hi, this is a <%s> node!: %s', self.itertag, ''.join(node.extract()))
title = node.xpath('title/text()').extract_first()
item['title'] = cleanhtml(title)
if title:
item['link'] = node.xpath('link/text()').extract_first()
item['published'] = node.xpath('pubDate/text()').extract_first()
description = node.xpath('description/text()').extract_first()
description = cleanhtml(description)
item['summary'] = description
item['source'] = response.meta.get('source')
tagText=str(title)+str(description)
countryClass=tags.getCountry(tagText)
if len(countryClass) > 0:
item['category'] = "India"
else:
item['category'] = response.meta.get('category')
if source == "The Guardian":
item['image'] = node.xpath("*[local-name()='content'][@width='460']/@url").extract_first()
else:
media = node.xpath("*[local-name()='content']/@url").extract_first()
thumb = node.xpath("*[local-name()='thumbnail']/@url").extract_first()
full = node.xpath("fullimage/text()").extract_first()
image = node.xpath("image/text()").extract_first()
enclosure = node.xpath("enclosure/@url").extract_first()
if media:
item['image'] = media
elif thumb:
item['image'] = thumb
elif enclosure:
item['image'] = enclosure
elif image:
item['image'] = image
elif full:
item['image'] = full
item['type'] = response.meta.get('type')
item['uTag'] = hashlib.sha256(
title.encode('utf-8')).hexdigest()[:16]
item['created_at'] = str(datetime.now())
Rake = RAKE.Rake('stopwords_en.txt')
words = Rake.run(title)
tagWordArray = []
for word in words:
tagWordArray.append(word[0].title())
item['tags'] = tagWordArray
db.Temp.insert_one(item)
insertingBlock(item, source, category)
def handle_spider_closed(spider, reason):
popularInsert()
print("Closed handle")
dispatcher.connect(handle_spider_closed, signals.spider_closed)
def cleanhtml(raw_html):
"""
To remove html tags in the summary
"""
if raw_html is not None:
cleanr = re.compile(r'<w:(.*)>(.*)</w:(.*)>')
cleantext = re.sub(cleanr, ' ', raw_html)
cleanr = re.compile(r'<[^>]+>')
cleantext = re.sub(cleanr, ' ', cleantext)
cleanr = re.compile(''')
cleantext = re.sub(cleanr, "'", cleantext)
cleanr = re.compile('&.*?;')
cleantext = re.sub(cleanr, '', cleantext)
cleanr = re.compile('\n')
cleantext = re.sub(cleanr, '', cleantext)
cleanr = re.compile('{.*?}')
cleantext = re.sub(cleanr, '', cleantext)
cleanr = re.compile('/.*?/')
cleantext = re.sub(cleanr, '', cleantext)
cleanr = re.compile('table.MsoNormalTable')
cleantext = re.sub(cleanr, '', cleantext)
cleantext = cleantext.strip()
return cleantext
else:
return ""
def insertingBlock(item, source, category):
"""
Inserting function with respect to the collection name parsed
"""
if db[category].count() == 0:
db[category].insert_one(item)
else:
tags = str(item['uTag'])
if db.Main.find_one(
{'uTag': tags}, {'_id': 1}):
pass
else:
insertDoc = db.Main.insert_one(item)
db[category].insert_one(item)
if insertDoc:
logging.debug('Inserted new for ' + category + " for " + source
)
logging.debug('\n')
else:
logging.debug('Error in insertion for ' +
category + " for " + source)
logging.debug('\n')
# def randomiseInsert():
# temp = list(db.Temp.find({}, {'_id': False}))
# shuffle(temp)
# if temp:
# for item in temp:
# insertingBlock(item, item['source'], item['category'])
# db.Temp.drop()
# logging.info('Work time:' + str(time.time() - start))
# logging.info('Ended at ' + now.strftime("%Y-%m-%d %H:%M"))
def popularInsert():
popular = list(db.PopularPosts.aggregate([
{
'$lookup':
{
'from': "Main",
'localField': "idPost",
'foreignField': "uTag",
'as': "Main"
}
},
{
'$project': {
'_id': 1,
"idPost": 1,
"Main": 1,
"count": {'$size': "$users"}
}
},
{'$sort': {'count': -1, 'created_at': -1}},
{
'$limit': 8
}
]))
db.Popular.remove()
for item in popular:
db.Popular.insert(item)
| 2.46875 | 2 |
compile_trump.py | DarkGuenther/TrumpBot | 0 | 12785874 | """Loads all of Trumps tweets and saves them to a pickle for easier access"""
import pickle
import json
RAW_FILE = "trump_tweets_raw.txt"
PICKLE_FILE = "trump_tweets.pickle"
raw = json.load(open(RAW_FILE))
tweets = [e["text"] for e in raw]
clean_tweets = []
# Filter out urls
for t in tweets:
parts = []
for p in t.split(sep=" "):
if "://" in p:
p = ""
parts.append(p)
t = " ".join(parts)
clean_tweets.append(t)
tweets = clean_tweets
pickle.dump(tweets, open(PICKLE_FILE, "wb"))
print("Dumped", len(tweets), "tweets to", PICKLE_FILE) | 3.640625 | 4 |
src/component/ScanPaths.py | renchangjiu/kon-windows | 2 | 12785875 | import os
import threading
from PyQt5 import QtCore
from PyQt5.QtCore import QObject
from src.Apps import Apps
from src.model.Music import Music
from src.model.MusicList import MusicList
from src.service.MP3Parser import MP3
class ScanPaths(QObject, threading.Thread):
""" 异步扫描指定目录(指配置文件)下的所有音乐文件, 并写入数据库 """
# 1/2, 1: 扫描开始, 2: 扫描结束
scan_state_change = QtCore.pyqtSignal(int)
def __init__(self):
super().__init__()
@staticmethod
def scan(slot_func):
scan = ScanPaths()
scan.scan_state_change.connect(slot_func)
scan.start()
def run(self) -> None:
self.scan_state_change.emit(1)
search_paths = list(map(lambda v: v.path, filter(lambda v: v.checked, Apps.config.scanned_paths)))
music_files = ScanPaths.__find_music_files(search_paths)
musics = ScanPaths.__get_mp3_info(music_files)
Apps.musicService.batch_insert(musics)
self.scan_state_change.emit(2)
@staticmethod
def __find_music_files(search_paths: list) -> list:
files = list()
while len(search_paths) > 0:
size = len(search_paths)
for i in range(size):
pop = search_paths.pop()
if not os.path.exists(pop):
continue
listdir = list(map(lambda v: os.path.join(pop, v), ScanPaths.__listdir(pop)))
for ld in listdir:
if os.path.isdir(ld):
search_paths.append(ld)
else:
if ScanPaths.__is_music_file(ld):
files.append(ld)
return files
@staticmethod
def __is_music_file(path):
if (path.endswith("mp3") or path.endswith("MP3")) and os.path.getsize(path) > 100 * 1024:
return True
return False
@staticmethod
def __get_mp3_info(paths: list):
musics = []
for path in paths:
try:
mp3 = MP3(path)
if mp3.ret["has-ID3V2"] and mp3.duration >= 30:
size = os.path.getsize(path)
if size < 1024 * 1024:
size = str(int(size / 1024)) + "KB"
else:
size = str(round(size / 1024 / 1024, 1)) + "MB"
title = mp3.title
if title == "":
title = os.path.basename(path)
artist = mp3.artist
if artist == "":
artist = "未知歌手"
album = mp3.album
if album == "":
album = "未知专辑"
duration = mp3.duration
music = Music()
music.mid = MusicList.DEFAULT_ID
music.path = path
music.title = title
music.artist = artist
music.album = album
music.duration = duration
music.size = size
musics.append(music)
except IndexError as e:
pass
except UnicodeDecodeError as e1:
pass
return musics
@staticmethod
def __listdir(path):
try:
return os.listdir(path)
except PermissionError as e:
print(e.strerror)
return []
| 2.640625 | 3 |
exercises/ja/exc_03_16_02.py | Jette16/spacy-course | 2,085 | 12785876 | <filename>exercises/ja/exc_03_16_02.py
import spacy
nlp = spacy.load("ja_core_news_sm")
text = (
"チックフィレイはジョージア州カレッジパークに本社を置く、"
"チキンサンドを専門とするアメリカのファストフードレストランチェーンです。"
)
# parserを無効化
with ____.____(____):
# テキストを処理する
doc = ____
# docの固有表現を表示
print(____)
| 2.6875 | 3 |
pvfit/measurement/spectral_correction.py | markcampanelli/pvfit | 4 | 12785877 | <gh_stars>1-10
import warnings
import numpy
import scipy.constants
import scipy.interpolate
from pvfit.common.constants import c_m_per_s, h_J_s, q_C
class DataFunction:
r"""
Store data representing one/more functions in :math:`\mathbb{R}^2`
with common, monotonic increasing domain values.
TODO Describe interface.
"""
def __init__(self, *, x: numpy.ndarray, y: numpy.ndarray):
# Copies inputs and sorts on increasing x values.
x = numpy.asarray_chkfinite(x, dtype=float)
if x.size == 0:
raise ValueError("x must have at least one element.")
if 1 < x.ndim:
raise ValueError("x cannot have dimension greater than one.")
x_size = x.size
x, x_argsort = numpy.unique(x, return_index=True)
if x.size != x_size:
raise ValueError("x values must be unique.")
if y.shape[-1] != x_size:
raise ValueError("last dimension of y must equal size of x.")
self.x = x
y = numpy.asarray_chkfinite(y, dtype=float)
self.y = y[..., x_argsort]
def __eq__(self, obj):
return isinstance(obj, DataFunction) and numpy.array_equal(self.x, obj.x) and numpy.array_equal(self.y, obj.y)
class DataFunctionPositiveXNonnegativeY(DataFunction):
r"""
Store data representing a function in :math:`\mathbb{R}^2` with
:math:`0 < x` and :math:`0 \leq y`.
TODO Describe interface.
"""
def __init__(self, *, x: numpy.ndarray, y: numpy.ndarray):
super().__init__(x=x, y=y)
if numpy.any(self.x <= 0):
raise ValueError("x values must all be positive.")
if numpy.any(self.y < 0):
raise ValueError("y values must all be non-negative.")
class QuantumEfficiency(DataFunctionPositiveXNonnegativeY):
"""
Store data representing a quantum efficiency (QE) curve.
TODO Describe interface and units [nm] and [1] or [%].
"""
def __init__(self, *, lambda_nm: numpy.ndarray, QE: numpy.ndarray, is_percent: bool = False):
super().__init__(x=lambda_nm, y=QE)
# Do not convert raw data. Instead track if it is given as a percent.
self.is_percent = is_percent
@property
def lambda_nm(self) -> numpy.ndarray:
"""Return wavelengths."""
return self.x
@property
def QE(self) -> numpy.ndarray:
"""Return QE as fraction."""
if self.is_percent:
return self.y/100
else:
return self.y
@property
def QE_percent(self) -> numpy.ndarray:
"""Return QE as percent."""
if self.is_percent:
return self.y
else:
return 100*self.y
@property
def S_A_per_W(self) -> "SpectralResponsivity":
"""
Convert quantum efficiency (QE) curve to spectral responsivity
(SR) curve.
TODO Describe interface.
"""
return SpectralResponsivity(
lambda_nm=self.lambda_nm, S_A_per_W=self.QE * self.lambda_nm * 1.e-9 * q_C / (h_J_s * c_m_per_s))
class SpectralIrradiance(DataFunctionPositiveXNonnegativeY):
"""
Store data representing a spectral irradiance curve.
TODO Describe interface and units [nm] and [A/W/m^2].
"""
def __init__(self, *, lambda_nm: numpy.ndarray, E_W_per_m2_nm: numpy.ndarray):
super().__init__(x=lambda_nm, y=E_W_per_m2_nm)
@property
def lambda_nm(self) -> numpy.ndarray:
return self.x
@property
def E_W_per_m2_nm(self) -> numpy.ndarray:
return self.y
class SpectralResponsivity(DataFunctionPositiveXNonnegativeY):
"""
Store data representing a spectral responsivity (SR) curve.
TODO Describe interface and units [nm] and [A/W].
"""
def __init__(self, *, lambda_nm: numpy.ndarray, S_A_per_W: numpy.ndarray):
super().__init__(x=lambda_nm, y=S_A_per_W)
@property
def lambda_nm(self) -> numpy.ndarray:
return self.x
@property
def S_A_per_W(self) -> numpy.ndarray:
return self.y
@property
def QE(self) -> "QuantumEfficiency":
"""
Convert spectral responsivity (SR) curve to quantum efficiency
(QE) curve.
TODO Describe interface.
"""
return QuantumEfficiency(
lambda_nm=self.lambda_nm, QE=self.S_A_per_W * h_J_s * c_m_per_s / (self.lambda_nm * 1.e-9 * q_C))
def inner_product(*, f1: DataFunction, f2: DataFunction) -> numpy.ndarray:
r"""
Compute inner product of two data functions.
The inner product of two data functions is the integral of the product
of the two functions over their common domain of defintion. Because
the data function model is piecewise linear, an algebraic solution
exists and is used for the computation. See the :class:`DataFunction`
class for details on the model that informs the computation.
Parameters
----------
f1
First data function.
f2
Second data function.
Returns
-------
inner_product : numpy.ndarray
Integral of the product of the two data functions over their
common domain.
Warns
------
UserWarning
If `inner_product` is non-finite or is zero due to no domain overlap.
Notes
-----
The inner product is computed as--
.. math:: \int_{x=x_1}^{x_2} f_1(x) \, f_{2}(x) \, \mathrm{d}x,
where the interval of integration :math:`[x_1, x_2]` is the common
domain of the two data functions. If the domains do not overlap, then
zero is returned.
"""
x_min = numpy.maximum(f1.x[0], f2.x[0])
x_max = numpy.minimum(f1.x[-1], f2.x[-1])
if x_max <= x_min:
warnings.warn("DataFunction domains do not overlap.")
return numpy.zeros(numpy.broadcast(f1.y, f2.y).shape[:-1])
x_union = numpy.union1d(f1.x, f2.x)
x_union = x_union[numpy.logical_and(x_min <= x_union, x_union <= x_max)]
y1 = scipy.interpolate.interp1d(f1.x, f1.y, copy=False, assume_sorted=True)(x_union)
y2 = scipy.interpolate.interp1d(f2.x, f2.y, copy=False, assume_sorted=True)(x_union)
slopes1 = (y1[..., 1:] - y1[..., :-1]) / (x_union[1:] - x_union[:-1])
intercepts1 = y1[..., :-1] - slopes1 * x_union[:-1]
slopes2 = (y2[..., 1:] - y2[..., :-1]) / (x_union[1:] - x_union[:-1])
intercepts2 = y2[..., :-1] - slopes2 * x_union[:-1]
A = intercepts1 * intercepts2
B = (slopes1 * intercepts2 + slopes2 * intercepts1) / 2
C = slopes1 * slopes2 / 3
x_union_squared = x_union * x_union
x_union_cubed = x_union_squared * x_union
inner_product = numpy.array(numpy.sum(C * (x_union_cubed[1:] - x_union_cubed[:-1]) +
B * (x_union_squared[1:] - x_union_squared[:-1]) +
A * (x_union[1:] - x_union[:-1]), axis=-1))
if not numpy.all(numpy.isfinite(inner_product)):
warnings.warn("Non-finite inner product detected.")
return inner_product
def M(*, S_TD_OC: SpectralResponsivity, E_TD_OC: SpectralIrradiance, S_TD_RC: SpectralResponsivity,
E_TD_RC: SpectralIrradiance, S_RD_OC: SpectralResponsivity, E_RD_OC: SpectralIrradiance,
S_RD_RC: SpectralResponsivity, E_RD_RC: SpectralIrradiance) -> numpy.ndarray:
r"""
Compute spectral mismatch correction factor (:math:`M`).
The spectral mismatch is between a photovoltaic (PV) test device (TD)
and a PV reference device (RD), each at a particular (non-explicit)
temperature and illuminated by a (possibly different) spectral
irradiance at operating condition (OC). The corresponding reference
condition (RC) of each device need not be the same, but often are.
:math:`M` should be strictly positive, but could evalute to
be zero, infinite, or NaN depending on possible zero values of the
component integrals. See the :class:`SpectralIrradiance` and
:class:`SpectralResponsivity` classes for details on the data function
models that inform the computation, which includes vectorized
computations.
Parameters
----------
S_TD_OC
Spectral responsivity of TD at OC [A/W].
E_TD_OC
Spectral irradiance illuminating TD at OC [W/m2/nm].
S_TD_RC
Spectral responsivity of TD at RC [A/W].
E_TD_RC
Spectral irradiance illuminating TD at RC [W/m2/nm].
S_RD_OC
Spectral responsivity of RD at OC [A/W].
E_RD_OC
Spectral irradiance illuminating RD at OC [W/m2/nm].
S_RD_RC
Spectral responsivity of RD at RC [A/W].
E_RD_RC
Spectral irradiance illuminating RD at RC [W/m2/nm].
Returns
-------
M : numpy.ndarray
Spectral mismatch correction factor (:math:`M`).
Warns
------
UserWarning
If :math:`M` is computed as non-positive, infinite, or NaN.
See Also
--------
inner_product : The function used to compute the integrals of the
products of two data functions.
Notes
-----
:math:`M` is defined by this relationship between the short-circuit
currents (:math:`I_\mathrm{sc}`) of a TD and a RD at their
respective OC and RC--
.. math:: \frac{I_\mathrm{sc,TD,OC}}{I_\mathrm{sc,TD,RC}} =
M \frac{I_\mathrm{sc,RD,OC}}{I_\mathrm{sc,RD,RC}},
so that, under linearity and homogeneity assumption, :math:`M` is
computed as--
.. math:: M &= \frac{I_\mathrm{sc,TD,OC} I_\mathrm{sc,RD,RC}}
{I_\mathrm{sc,TD,RC} I_\mathrm{sc,RD,OC}} \\
&= \frac{
\int_{\lambda=0}^\infty S_\mathrm{TD}(T_\mathrm{TD,OC}, \lambda)
E_\mathrm{TD,OC}(\lambda) \, \mathrm{d}\lambda \,
\int_{\lambda=0}^\infty S_\mathrm{RD}(T_\mathrm{RD,RC}, \lambda)
E_\mathrm{RD,RC}(\lambda) \, \mathrm{d}\lambda}{
\int_{\lambda=0}^\infty S_\mathrm{TD}(T_\mathrm{TD,RC}, \lambda)
E_\mathrm{TD,RC}(\lambda) \, \mathrm{d}\lambda \,
\int_{\lambda=0}^\infty S_\mathrm{RD}(T_\mathrm{RD,OC}, \lambda)
E_\mathrm{RD,OC}(\lambda) \, \mathrm{d}\lambda},
where any pertinent constant scaling factors cancel out between
numerator and denominator, such as device areas, curve measurement
scaling errors, and unit conversions [1]_.
References
----------
.. [1] <NAME> and <NAME>, "Calibration of a single‐diode
performance model without a short‐circuit temperature
coefficient," Energy Science & Engineering, vol. 6, no. 4,
pp. 222-238, 2018. https://doi.org/10.1002/ese3.190.
"""
# TODO Warn if computation appears innacurate due to missing non-zero data at end(s) of common domain intervals.
M = numpy.array((inner_product(f1=S_TD_OC, f2=E_TD_OC) * inner_product(f1=S_RD_RC, f2=E_RD_RC)) /
(inner_product(f1=S_TD_RC, f2=E_TD_RC) * inner_product(f1=S_RD_OC, f2=E_RD_OC)))
if not numpy.all(numpy.isfinite(M)):
warnings.warn("Non-finite M detected.")
if not numpy.all(0 < M):
warnings.warn("Non-positive M detected.")
return M
| 2.484375 | 2 |
lexos/managers/file_manager.py | WheatonCS/Lexos | 107 | 12785878 | <gh_stars>100-1000
import io
import os
import shutil
import zipfile
from os import makedirs
from os.path import join as pathjoin
from typing import List, Tuple, Dict
import numpy as np
import pandas as pd
from flask import request, send_file
import lexos.helpers.constants as constants
import lexos.helpers.general_functions as general_functions
import lexos.managers.session_manager as session_manager
from lexos.managers.lexos_file import LexosFile
class FileManager:
def __init__(self):
"""Class for object to hold info about user's files & choices in Lexos.
Each user will have their own unique instance of the
FileManager. A major data attribute of this class is a dictionary
holding the LexosFile objects, each representing an uploaded file to be
used in Lexos. The key for the dictionary is the unique ID of the file,
with the value being the corresponding LexosFile object.
"""
self._files = {}
self.next_id = 0
makedirs(pathjoin(session_manager.session_folder(),
constants.FILE_CONTENTS_FOLDER))
@property
def files(self) -> Dict[int, LexosFile]:
"""A property for private attribute: _files.
:return: a dict map file id to lexos_files.
"""
return self._files
def add_file(self, original_filename: str, file_name: str,
file_string: str) -> int:
"""Adds a file to the FileManager.
The new file identifies with the next ID to be used.
:param original_filename: the original file name of the uploaded file.
:param file_name: the file name we store.
:param file_string: the string contents of the text.
:return: the id of the newly added file.
"""
# solve the problem that there is file with the same name
exist_clone_file = True
while exist_clone_file:
exist_clone_file = False
for file in list(self.files.values()):
if file.name == file_name:
file_name = 'copy of ' + file_name
original_filename = 'copy of ' + original_filename
exist_clone_file = True
break
new_file = LexosFile(
original_filename,
file_name,
file_string,
self.next_id)
self.files[new_file.id] = new_file
self.next_id += 1
self.files[new_file.id].set_name(file_name) # Set the document label
return new_file.id
def delete_files(self, file_ids: List[int]):
"""Deletes all the files that have id in IDs.
:param file_ids: an array containing all the id of the files that need
to be deleted.
"""
for file_id in file_ids:
file_id = int(file_id) # in case that the id is not int
self.files[file_id].clean_and_delete()
del self.files[file_id] # Delete the entry
def get_active_files(self) -> List[LexosFile]:
"""Creates a list of all the active files in FileManager.
:return: a list of LexosFile objects.
"""
active_files = []
for l_file in list(self.files.values()):
if l_file.active:
active_files.append(l_file)
return active_files
def delete_active_files(self) -> List[int]:
"""Deletes every active file.
These active files are deleted by calling the delete method on the
LexosFile object before removing it from the dictionary.
:return: list of deleted file_ids.
"""
file_ids = []
for file_id, l_file in list(self.files.items()):
if l_file.active:
file_ids.append(file_id)
l_file.clean_and_delete()
del self.files[file_id] # Delete the entry
return file_ids
def disable_all(self):
"""Disables every file in the file manager."""
for l_file in list(self.files.values()):
l_file.disable()
def enable_all(self):
"""Enables every file in the file manager."""
for l_file in list(self.files.values()):
l_file.enable()
def get_previews_of_active(self) -> List[Tuple[int, str, str, str]]:
"""Creates a formatted list of previews from every active file.
Each preview on this formatted list of previews is made from every
individual active file located in the file manager.
:return: a formatted list with an entry (tuple) for every active file,
containing the preview information (the file id, name, label
and preview).
"""
previews = []
for l_file in self.files.values():
if l_file.active:
previews.append(
(l_file.id, l_file.name, l_file.label,
l_file.get_preview())
)
# TODO: figure out this should be l_file.label or l_file.class_label
return previews
def get_previews_of_inactive(self) -> List[Tuple[int, str, str, str]]:
"""Creates a formatted list of previews from every inactive file.
Each preview on this formatted list of previews is made from every
individual inactive file located in the file manager.
:return: a formatted list with an entry (tuple) for every inactive
file, containing the preview information (the file id, name,
label and preview).
"""
previews = []
for l_file in list(self.files.values()):
if not l_file.active:
previews.append(
(l_file.id, l_file.name, l_file.class_label,
l_file.get_preview())
)
return previews
def get_content_of_active_with_id(self) -> Dict[int, str]:
"""Helper method to get_matrix.
:return: get all the file content from the file_manager
"""
return {file.id: file.load_contents()
for file in self.get_active_files()}
def toggle_file(self, file_id: int):
"""Toggles the active status of the given file.
:param file_id: the id of the file to be toggled.
"""
l_file = self.files[file_id]
if l_file.active:
l_file.disable()
else:
l_file.enable()
def enable_files(self, file_ids: List[int]):
"""Enables a list of Lexos files.
:param file_ids: list of fileIDs selected in the UI.
"""
for file_id in file_ids:
file_id = int(file_id)
l_file = self.files[file_id]
l_file.enable()
def disable_files(self, file_ids: List[int]):
"""Disables a list of Lexos files.
:param file_ids: list of fileIDs selected in the UI.
"""
for file_id in file_ids:
file_id = int(file_id)
l_file = self.files[file_id]
l_file.disable()
def classify_active_files(self):
"""Applies a class label (from request.data) to every active file."""
# TODO: probably should not get request form here
class_label = request.data
for l_file in list(self.files.values()):
if l_file.active:
l_file.set_class_label(class_label)
def add_upload_file(self, raw_file_string: bytes, file_name: str):
"""Detects (and applies) the encoding type of the file's contents.
Since chardet runs slow, initially detects (only) MIN_ENCODING_DETECT
chars; if that fails, chardet entire file for a fuller test
:param raw_file_string: the file you want to detect the encoding
:param file_name: name of the file
"""
decoded_file_string = general_functions.decode_bytes(
raw_bytes=raw_file_string)
# Line encodings:
# \n Unix, OS X
# \r Mac OS 9
# \r\n Win. CR+LF
# The following block converts everything to '\n'
# "\r\n" -> '\n'
if "\r\n" in decoded_file_string[:constants.MIN_NEWLINE_DETECT]:
decoded_file_string = decoded_file_string.replace('\r', '')
# '\r' -> '\n'
if '\r' in decoded_file_string[:constants.MIN_NEWLINE_DETECT]:
decoded_file_string = decoded_file_string.replace('\r', '\n')
# Add the file to the FileManager
self.add_file(file_name, file_name, decoded_file_string)
def handle_upload_workspace(self):
"""Handles the session when you upload a workspace (.lexos) file."""
# save .lexos file
save_path = os.path.join(constants.UPLOAD_FOLDER,
constants.WORKSPACE_DIR)
save_file = os.path.join(save_path, str(self.next_id) + '.zip')
try:
os.makedirs(save_path)
except FileExistsError:
pass
f = open(save_file, 'wb')
f.write(request.data)
f.close()
# clean the session folder
shutil.rmtree(session_manager.session_folder())
# extract the zip
upload_session_path = os.path.join(
constants.UPLOAD_FOLDER, str(
self.next_id) + '_upload_work_space_folder')
with zipfile.ZipFile(save_file) as zf:
zf.extractall(upload_session_path)
general_functions.copy_dir(upload_session_path,
session_manager.session_folder())
# remove temp
shutil.rmtree(save_path)
shutil.rmtree(upload_session_path)
try:
# if there is no file content folder make one.
# this dir will be lost during download(zip) if your original file
# content folder does not contain anything.
os.makedirs(os.path.join(session_manager.session_folder(),
constants.FILE_CONTENTS_FOLDER))
except FileExistsError:
pass
def update_workspace(self):
"""Updates the whole work space."""
# update the savepath of each file
for l_file in list(self.files.values()):
l_file.save_path = pathjoin(
session_manager.session_folder(),
constants.FILE_CONTENTS_FOLDER,
str(l_file.id) + '.txt')
# update the session
session_manager.load()
def scrub_files(self, saving_changes: bool) -> \
List[Tuple[int, str, str, str]]:
"""Scrubs active files & creates a formatted preview list w/ results.
:param saving_changes: a boolean saying whether or not to save the
changes made.
:return: a formatted list with an entry (tuple) for every active file,
containing the preview information (the file id, label, class
label, and scrubbed contents preview).
"""
previews = []
for l_file in list(self.files.values()):
if l_file.active:
previews.append(
(l_file.id,
l_file.label,
l_file.class_label,
l_file.scrub_contents(saving_changes)))
return previews
def cut_files(self, saving_changes: bool) -> \
List[Tuple[int, str, str, str]]:
"""Cuts active files & creates a formatted preview list w/ the results.
:param saving_changes: a boolean saying whether or not to save the
changes made.
:return: a formatted list with an entry (tuple) for every active file,
containing the preview information (the file id, label, class
label, and cut contents preview).
"""
active_files = []
for l_file in list(self.files.values()):
if l_file.active:
active_files.append(l_file)
previews = []
for l_file in active_files:
l_file.active = False
children_file_contents = l_file.cut_contents()
num_cut_files = len(children_file_contents)
l_file.save_cut_options(parent_id=None)
if saving_changes:
for i, file_string in enumerate(children_file_contents):
original_filename = l_file.name
zeros = len(str(num_cut_files)) - len(str(i + 1))
doc_label = l_file.label + '_' + ('0' * zeros) + str(i + 1)
file_id = self.add_file(
original_filename, doc_label + '.txt', file_string)
self.files[file_id].set_scrub_options_from(parent=l_file)
self.files[file_id].save_cut_options(parent_id=l_file.id)
self.files[file_id].set_name(doc_label)
self.files[file_id].set_class_label(
class_label=l_file.class_label)
else:
for i, file_string in enumerate(children_file_contents):
previews.append(
(l_file.id,
l_file.name,
l_file.label + '_' + str(i + 1),
general_functions.make_preview_from(file_string)))
if saving_changes:
previews = self.get_previews_of_active()
return previews
def zip_active_files(self, zip_file_name: str):
"""Sends a zip file of files containing contents of the active files.
:param zip_file_name: Name to assign to the zipped file.
:return: zipped archive to send to the user, created with Flask's
send_file.
"""
# TODO: make send file happen in interface
zip_stream = io.BytesIO()
zip_file = zipfile.ZipFile(file=zip_stream, mode='w')
for l_file in list(self.files.values()):
if l_file.active:
# Make sure the filename has an extension
l_file_name = l_file.name
if not l_file_name.endswith('.txt'):
l_file_name = l_file_name + '.txt'
zip_file.write(
l_file.save_path,
arcname=l_file_name,
compress_type=zipfile.ZIP_STORED)
zip_file.close()
zip_stream.seek(0)
return send_file(
zip_stream,
attachment_filename=zip_file_name,
as_attachment=True)
def zip_workspace(self) -> str:
"""Sends a zip file containing a pickle file of session & its folder.
:return: the path of the zipped workspace
"""
# TODO: move this to matrix model
# initialize the save path
save_path = os.path.join(
constants.UPLOAD_FOLDER,
constants.WORKSPACE_DIR)
rounded_next_id = str(self.next_id % 10000) # take the last 4 digit
workspace_file_path = os.path.join(
constants.UPLOAD_FOLDER,
rounded_next_id + '_' + constants.WORKSPACE_FILENAME)
# remove unnecessary content in the workspace
try:
shutil.rmtree(
os.path.join(
session_manager.session_folder(),
constants.RESULTS_FOLDER))
# attempt to remove result folder(CSV matrix that kind of crap)
except FileNotFoundError:
pass
# move session folder to work space folder
try:
# try to remove previous workspace in order to resolve conflict
os.remove(workspace_file_path)
except FileNotFoundError:
pass
try:
# empty the save path in order to resolve conflict
shutil.rmtree(save_path)
except FileNotFoundError:
pass
general_functions.copy_dir(session_manager.session_folder(), save_path)
# save session in the work space folder
session_manager.save(save_path)
# zip the dir
zip_file = zipfile.ZipFile(workspace_file_path, 'w')
general_functions.zip_dir(save_path, zip_file)
zip_file.close()
# remove the original dir
shutil.rmtree(save_path)
return workspace_file_path
def check_actives_tags(self) -> Tuple[bool, bool, bool]:
"""Checks the tags of the active files for DOE/XML/HTML/SGML tags.
:return: three booleans, the first signifying the presence of any type
of tags, the secondKeyWord the presence of DOE tags, the third
signifying the presence of gutenberg tags/boilerplate.
"""
found_tags = False
found_doe = False
found_gutenberg = False
for l_file in list(self.files.values()):
if not l_file.active:
continue
# with the looping, do not do the rest of current loop
if l_file.doc_type == 'doe':
found_doe = True
found_tags = True
if l_file.has_tags:
found_tags = True
if l_file.is_gutenberg:
found_gutenberg = True
if found_doe and found_tags:
break
return found_tags, found_doe, found_gutenberg
def update_label(self, file_id: int, file_label: str):
"""Sets the file label of the file denoted to the supplied file label.
Files are denoted by the given id.
:param file_id: the id of the file for which to change the label.
:param file_label: the label to set the file to.
"""
self.files[file_id] = file_label
def get_active_labels_with_id(self) -> Dict[int, str]:
"""Gets labels of all active files in dictionary{file_id: file_label}.
:return: a dictionary of the currently active files' labels.
"""
return {l_file.id: l_file.label
for l_file in self.files.values() if l_file.active}
def get_class_division_map(self) -> pd.DataFrame:
"""Gets the class division map to help with topword analysis.
:return: a pandas data frame where:
- the data is the division map with boolean values that indicate
which class each file belongs to.
- the index is the class labels.
- the column is the file id.
"""
# active files labels and classes.
active_files = self.get_active_files()
file_ids = [file.id for file in active_files]
class_labels = {file.class_label for file in active_files}
# initialize values and get class division map.
label_length = len(file_ids)
class_length = len(class_labels)
class_division_map = pd.DataFrame(
data=np.zeros((class_length, label_length), dtype=bool),
index=class_labels,
columns=file_ids)
# set correct boolean value for each file.
for file in active_files:
class_division_map[file.id][file.class_label] = True
# Set file with no class to Untitled.
class_division_map.index = \
["Untitled" if class_label == "" else class_label
for class_label in class_division_map.index]
return class_division_map
def get_previews_of_all(self) -> List[dict]:
"""Creates a formatted list of previews from every file.
Each preview on this formatted list of previews is made from every
individual file located in the file manager. For use in the Select
screen.
:return: a list of dictionaries with preview information for every
file.
"""
previews = []
for l_file in list(self.files.values()):
values = {
"id": l_file.id,
"filename": l_file.name,
"label": l_file.label,
"class": l_file.class_label,
"source": l_file.original_source_filename,
"preview": l_file.get_preview(),
"state": l_file.active}
previews.append(values)
return previews
def delete_all_file(self):
"""Deletes every active file.
This is done by calling the delete method on the LexosFile object
before removing it from the dictionary.
"""
for file_id, l_file in list(self.files.items()):
l_file.clean_and_delete()
del self.files[file_id] # Delete the entry
| 2.65625 | 3 |
render.py | MikeSpreitzer/queueset-test-viz | 0 | 12785879 | #!/usr/bin/env python3
import argparse
import cairo
import parse_test
import subprocess
import typing
def hue_to_rgb(hue: float, lo: float) -> typing.Tuple[float, float, float]:
hue = max(0, min(1, hue))
if hue <= 1/3:
return (1 - (1-lo)*(hue-0)*3, lo + (1-lo)*(hue-0)*3, lo)
if hue <= 2/3:
return (lo, 1-(1-lo)*(hue-1/3)*3, lo + (1-lo)*(hue-1/3)*3)
return (lo + (1-lo)*(hue-2/3)*3, lo, 1-(1-lo)*(hue-2/3)*3)
def text_in_rectangle(context: cairo.Context, text: str, left: float, top: float, width: float, height: float) -> None:
extents = context.text_extents(text)
origin = (left + (width - extents.width)/2 - extents.x_bearing,
top + (height-extents.height)/2 - extents.y_bearing)
context.move_to(*origin)
context.show_text(text)
return
def render_parse(surface: cairo.Surface, parse: parse_test.TestParser,
vert_per_second: float, top_text: str, bottom_text: str) -> None:
context = cairo.Context(surface)
context.select_font_face(
"Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
num_seats = len(parse.seats)
num_queues = len(parse.queue_to_lanes)
hor_per_track = float(36)
tick_left = float(108)
seats_left = tick_left + 9
seats_right = seats_left + hor_per_track * num_seats
vert_per_header = float(18)
htop = 0
if top_text:
top_text_extents = context.text_extents(top_text)
htop += vert_per_header
seats_orig = (seats_left, htop + 2*vert_per_header)
queues_left = seats_right + hor_per_track
queues_right = queues_left + hor_per_track * \
(parse.queue_lane_sum + (num_queues-1) * 0.1)
page_width = queues_right + hor_per_track*0.5
if top_text:
page_width = max(page_width, top_text_extents.width + 24)
queues_orig = (queues_left, seats_orig[1])
page_height = seats_orig[1] + \
(parse.max_t - parse.min_t) * vert_per_second + 1
if bottom_text:
bottom_text_extents = context.text_extents(bottom_text)
bottom_text_orig = (12 - bottom_text_extents.x_bearing,
page_height + 6 - bottom_text_extents.y_bearing)
page_height += bottom_text_extents.height + 12
page_width = max(
page_width, bottom_text_orig[0] + bottom_text_extents.x_advance)
surface.set_size(page_width, page_height)
print(
f'num_seats={num_seats}, num_queues={num_queues}, queue_lane_sum={parse.queue_lane_sum}, page_width={page_width}, page_height={page_height}')
if top_text:
text_in_rectangle(context, top_text, 0, 0, page_width, vert_per_header)
if bottom_text:
context.move_to(*bottom_text_orig)
context.show_text(bottom_text)
context.set_line_width(0.5)
# Render the secion headings
text_in_rectangle(context, "Seats", seats_left, htop,
seats_right-seats_left, vert_per_header)
text_in_rectangle(context, "Queues", queues_left, htop,
queues_right-queues_left, vert_per_header)
# get ordered list of queues
qids = sorted([qid for qid in parse.queue_to_lanes])
# Render the queue headings
qright = queues_left
qlefts: typing.Mapping[int, float] = dict()
htop += vert_per_header
for qid in qids:
hleft = qright
qlefts[qid] = qright
hwidth = hor_per_track * len(parse.queue_to_lanes[qid].seats)
qright += hwidth + hor_per_track*0.1
id_str = str(qid)
text_in_rectangle(context, id_str, hleft, htop,
hwidth, vert_per_header)
# Render the seat run fills
num_flows = 1 + parse.max_flow
for (reqid, req) in parse.requests.items():
reqid_str = f'{reqid[0]},{reqid[1]},{reqid[2]}'
stop = seats_orig[1] + vert_per_second * \
(req.real_dispatch_t-parse.min_t)
smid = seats_orig[1] + vert_per_second * (req.real_mid_t-parse.min_t)
sheight1 = vert_per_second*(req.real_mid_t-req.real_dispatch_t)
sheight2 = vert_per_second*(req.real_finish_t-req.real_mid_t)
rgb1 = hue_to_rgb(reqid[0]/num_flows, 0.80)
rgb2 = hue_to_rgb(reqid[0]/num_flows, 0.92)
context.new_path()
for (_, run) in enumerate(req.seat_runs1):
left = seats_orig[0] + run[0]*hor_per_track
width = run[1]*hor_per_track
context.rectangle(left, stop, width, sheight1)
context.set_source_rgb(*rgb1)
context.fill()
context.new_path()
for (_, run) in enumerate(req.seat_runs):
left = seats_orig[0] + run[0]*hor_per_track
width = run[1]*hor_per_track
context.rectangle(left, smid, width, sheight2)
context.set_source_rgb(*rgb2)
context.fill()
context.set_source_rgb(0, 0, 0)
# Render the rest
lastick = None
for (reqid, req) in parse.requests.items():
reqid_str = f'{reqid[0]},{reqid[1]},{reqid[2]}'
context.new_path()
stop = seats_orig[1] + vert_per_second * \
(req.real_dispatch_t-parse.min_t)
sheight = vert_per_second*(req.real_finish_t-req.real_dispatch_t)
if lastick is None or stop > lastick + 18:
et_str = str(req.real_dispatch_t-parse.min_t)
text_in_rectangle(context, et_str, 0, stop, seats_left, 0)
lastick = stop
context.move_to(tick_left, stop)
context.line_to(seats_left, stop)
# Render the seat run outlines
for (idx, run) in enumerate(req.seat_runs):
left = seats_orig[0] + run[0]*hor_per_track
width = run[1]*hor_per_track
context.rectangle(left, stop, width, sheight)
if idx == 0:
label = reqid_str
else:
label = reqid_str + chr(97+idx)
text_in_rectangle(context, label, left, stop, width, sheight)
# Render the queue entry
qleft = qlefts[req.queue] + hor_per_track * req.qlane
qtop = queues_orig[1] + vert_per_second * \
(req.virt_dispatch_t-parse.min_t)
qwidth = hor_per_track
qheight = vert_per_second*(req.virt_finish_t - req.virt_dispatch_t)
context.rectangle(qleft, qtop, qwidth, qheight)
text_in_rectangle(context, reqid_str, qleft, qtop, qwidth, qheight)
context.stroke()
eval_y = seats_orig[1] + vert_per_second*(parse.eval_t - parse.min_t)
context.move_to(hor_per_track*0.1, eval_y)
context.line_to(page_width - hor_per_track*0.1, eval_y)
context.set_source_rgb(1, 0, 0)
context.stroke()
context.show_page()
return
def git_credit() -> str:
cp1 = subprocess.run(['git', 'rev-parse', 'HEAD'],
capture_output=True, check=True, text=True)
cp2 = subprocess.run(['git', 'status', '--porcelain'],
capture_output=True, check=True, text=True)
ans = 'Rendered by github.com/MikeSpreitzer/queueset-test-viz commit ' + cp1.stdout.rstrip()
if cp2.stdout.rstrip():
ans += ' dirty'
return ans
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='render queueset test log')
arg_parser.add_argument('--vert-per-sec', type=float,
default=36, help='points per second, default is 36')
arg_parser.add_argument('--top-text')
arg_parser.add_argument(
'--bottom-text', help='defaults to github reference to renderer')
arg_parser.add_argument('infile', type=argparse.FileType('rt'))
arg_parser.add_argument('outfile', type=argparse.FileType('wb'))
args = arg_parser.parse_args()
if args.bottom_text is None:
bottom_text = git_credit()
else:
bottom_text = args.bottom_text
test_parser = parse_test.TestParser()
test_parser.parse(args.infile)
surface = cairo.PDFSurface(args.outfile, 100, 100)
render_parse(surface, test_parser, args.vert_per_sec,
args.top_text, bottom_text)
surface.finish()
args.outfile.close()
| 2.390625 | 2 |
sito_io/manifest.py | xkortex/sito-io | 0 | 12785880 | from collections.abc import MutableMapping
from .fileio import ManifestMap
class MutableManifestTree(MutableMapping):
"""An abstract (but possibly concrete) representation of a collection of resources
MMT acts much like a dict, but manages side-effects and invariants
todo: we may want to lazily eval Resource returns in order to populate the absolute
path on demand. Or do other side-effecty things. For now, Resource just contains
redundant info for abspath, for simplicity
idea: in-place file interfaces to be able to write to virtual "files" in the tree and
then save the whole manifest. This could be useful for manipulating archives or interacting
with a remote endpoint.
"""
def __init__(self, base):
self._manifest = ManifestMap(base=base, elements={})
def __getitem__(self, key):
"""Given a relative path, return its Resource"""
return self._manifest.elements.__getitem__(key)
def __setitem__(self, key, resource):
"""Insert a resource"""
self._manifest.elements.__setitem__(key, resource)
def __delitem__(self, key):
"""Remove a resource"""
self._manifest.elements.__delitem__(key)
def __len__(self):
"""Count of files (not dirs) in manifest"""
def __iter__(self):
"""Iterate over keys"""
# def add(self, resource):
# """Add a resource to the tree"""
#
# def discard(self, element):
# """Remove a resource"""
def items(self):
yield from self._manifest.elements.items()
def keys(self):
"""Iterates over relative paths"""
yield from self._manifest.elements.keys()
def values(self):
"""Iterates over Resource objects"""
yield from self._manifest.elements.values()
| 2.984375 | 3 |
Fall 2016/Homeworks/HW2/Solutions/problem9.py | asmitde/TA-PSU-CMPSC101 | 0 | 12785881 | # Name: <NAME>
# ID: aud311
# Date: 09/20/2016
# Assignment: Homework 2, Problem 9
# Description: Program to convert a 6-bit binary number to decimal
# Prompt the user to enter a 6-bit binary number
binary = int(input('Enter a 6-bit binary number: '))
# Extract the bits and form the decimal number
decimal = 0
decimal += (binary % 10) * (2 ** 0)
binary //= 10
decimal += (binary % 10) * (2 ** 1)
binary //= 10
decimal += (binary % 10) * (2 ** 2)
binary //= 10
decimal += (binary % 10) * (2 ** 3)
binary //= 10
decimal += (binary % 10) * (2 ** 4)
binary //= 10
decimal += (binary % 10) * (2 ** 5)
# Display the decimal number
print('The decimal equivalent is', decimal)
| 4.28125 | 4 |
tests/extentions/test_manager.py | artsalliancemedia/awsome | 1 | 12785882 | <reponame>artsalliancemedia/awsome
from AWSome.executor import Executor
from AWSome.extentions import Extention
from AWSome.extentions import SkipException
from AWSome.extentions.manager import ExtentionsManager
import pytest
from tests.fixtures import options
class MockCommand(object):
def __init__(self):
self.extentions = { "update-profile": {} }
class MockExtention(Extention):
def __init__(self, options, exc_class=None):
super(MockExtention, self).__init__(options)
self._exc_class = exc_class
def post_run(self, command, executor):
if self._exc_class:
raise self._exc_class("test")
class TestExtentionsManager(object):
def test_extentions_are_loaded(self, options):
ExtentionsManager.EXTENTIONS = { "update-profile": MockExtention }
command = MockCommand()
executor = Executor("echo")
extentions = ExtentionsManager(options)
extentions.post_run(command, executor)
assert extentions._loaded_extentions != {}
def test_exceptions_are_propagated(self):
ExtentionsManager.EXTENTIONS = {
"update-profile": lambda o: MockExtention(o, Exception)
}
command = MockCommand()
executor = Executor("echo")
extentions = ExtentionsManager(options)
pytest.raises(Exception, extentions.post_run, command, executor)
def test_skip_exceptions_are_not_propagated(self):
ExtentionsManager.EXTENTIONS = {
"update-profile": lambda o: MockExtention(o, SkipException)
}
command = MockCommand()
executor = Executor("echo")
extentions = ExtentionsManager(options)
reload_conf = extentions.post_run(command, executor)
assert reload_conf == False
| 2.125 | 2 |
hashdd/algorithms/algorithm.py | hashdd/pyhashdd | 20 | 12785883 | """
algorithm.py
@brad_anton
License:
Copyright 2015 hashdd.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
class algorithm(object):
name = None
validation_regex = None
sample = None # The result of hashing sample.exe
implements_readfile = False
def __init__(self, arg):
self.setup(arg)
self.update(arg)
@staticmethod
def prefilter(arg):
"""
Override, use to inspect the input buffer
to determine if it meets algorithm requirements
(e.g. length). Return True to continue processing
otherwise return False to abort
"""
return True
def setup(self, arg):
# Override
pass
def hexdigest(self):
# Override
pass
def update(self, arg):
# Override
pass
def digest(self):
# Override
pass
def copy(self):
copy = super(self.__class__, self).__new__(self.__class__)
return copy
def readfile(self, filename, filesize):
raise NotImplemented
@classmethod
def validate(self, string):
"""Checks an input string to determine if it matches the characteristics
of the hash
"""
if self.validation_regex is None:
raise Exception("Cannot validate string for \
algorithm {}, no alphabet and/or digest_size \
defined".format(self.name))
return bool(self.validation_regex.match(string))
| 2.59375 | 3 |
main.py | divishrengasamy/EFI-Toolbox | 0 | 12785884 | <filename>main.py
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 13 06:05:37 2021
@author: <NAME>
"""
import data_preprocessing as dp
import pandas as pd
import os
import interpretability_methods as im
import classification_methods as clf_m
import fuzzy_logic as fl
import results_gen_methods as rgm
import user_xi as usxi
from sklearn.model_selection import train_test_split
import classification_models as cm
import multi_fusion as mf
"""
set experiment configuration before running the tool
"""
######################################################################################################################
# STAGE 1 : Experiments configuration
######################################################################################################################
param, model_selected = usxi.exp_config_portal()
# # Loading and Preprocessing Dataset
x, y = dp.data_preprocessing(param[0], param[1])
print('Features:', x)
print('Class:', y.value_counts())
# Train/Test Data Size, for evaluation of classification models
data_size_for_testing = param[2] / 100
print(f"Your data_size_for_testing:{data_size_for_testing}")
# Data size to be used evaluation of interpretability
data_size_for_interpretability = param[3] / 100
print(f"Your data_size_for_interpretability:{data_size_for_interpretability}")
# K-Fold Cross validation for model
cv = param[4]
print(f"Your cross - validation folds: {cv}")
# K-Fold Cross validation for Fuzzy model
fcv = param[5]
print(f"Your cross - validation folds: {fcv}")
######################################################################################################################
# STAGE 2 : Model configuration for the classification pipeline / Initialize dataframe for storing evaluation results
######################################################################################################################
model_selected = model_selected
SHAP_RESULTS = pd.DataFrame(index=x.columns.values, columns=usxi.models_to_eval)
LIME_RESULTS = pd.DataFrame(index=x.columns.values, columns=usxi.models_to_eval)
PI_RESULTS = pd.DataFrame(index=x.columns.values, columns=usxi.models_to_eval)
ENSEMBLE_ML_MODEL = pd.DataFrame(index=x.columns.values, columns=['SHAP', 'LIME', 'PI'])
FUZZY_DATA = pd.DataFrame()
######################################################################################################################
# STAGE 3 : Classification model Evaluation based on the Model configuration and models selected
#####################################################################################################################
def generate_fi(model_selected, x, y, data_size_for_testing,data_size_for_interpretability):
# split into train test sets as per the configuration
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=data_size_for_testing, random_state=42,
shuffle=True,
stratify=y)
x_train = x_train.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
x_test = x_test.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)
for model_name in model_selected:
if model_name == "LightGBM Classifier":
SHAP_RESULTS[model_name], LIME_RESULTS[model_name], PI_RESULTS[
model_name] = im.intpr_technqs_impl(x, y,
cm.lgbm_clf(x, y, x_train, x_test, y_train, y_test, cv),
data_size_for_interpretability,
model_name)
elif model_name == "Logistic Regressor classifier":
SHAP_RESULTS[model_name], LIME_RESULTS[model_name], PI_RESULTS[
model_name] = im.intpr_technqs_impl(x, y,
cm.logistic_regression_clf(x, y, x_train, x_test, y_train, y_test, cv),
data_size_for_interpretability,
model_name)
elif model_name == "Artificial Neural Network":
SHAP_RESULTS[model_name], LIME_RESULTS[model_name], PI_RESULTS[
model_name] = im.intpr_technqs_impl(x, y,
cm.ann_clf(x, y, x_train, x_test, y_train, y_test, cv),
data_size_for_interpretability,
model_name)
elif model_name == "Random Forest classifier":
SHAP_RESULTS[model_name], LIME_RESULTS[model_name], PI_RESULTS[
model_name] = im.intpr_technqs_impl(x, y,
cm.random_forest_clf(x, y, x_train, x_test, y_train,y_test, cv),
data_size_for_interpretability,
model_name)
elif model_name == 'Support vector machines':
SHAP_RESULTS[model_name], LIME_RESULTS[model_name], PI_RESULTS[
model_name] = im.intpr_technqs_impl(x, y,
cm.svm_clf(x, y, x_train, x_test, y_train, y_test, cv),
data_size_for_interpretability,
model_name)
SHAP_RESULTS, LIME_RESULTS, PI_RESULTS = generate_fi(model_selected, x, y, data_size_for_testing,data_size_for_interpretability)
######################################################################################################################
# STAGE 4 : MODEL SPECIFIC Feature Importance - Single Fusion
######################################################################################################################
SHAP_RESULTS.dropna(how='all', axis=1)
LIME_RESULTS.dropna(how='all', axis=1)
PI_RESULTS.dropna(how='all', axis=1)
SHAP_RESULTS.to_excel(os.path.join(rgm.generating_results('Results_XLS'), "SHAP_RESULTS.xlsx"))
LIME_RESULTS.to_excel(os.path.join(rgm.generating_results('Results_XLS'), "LIME_RESULTS.xlsx"))
PI_RESULTS.to_excel(os.path.join(rgm.generating_results('Results_XLS'), "PI_RESULTS.xlsx"))
for model_name in model_selected:
if model_name == "LightGBM Classifier":
ENSEMBLE_ML_MODEL['PI'] = PI_RESULTS[model_name]
ENSEMBLE_ML_MODEL['LIME'] = LIME_RESULTS[model_name]
ENSEMBLE_ML_MODEL['SHAP'] = SHAP_RESULTS[model_name]
im.ensemble_feature_importance(ENSEMBLE_ML_MODEL[['SHAP']], ENSEMBLE_ML_MODEL[['LIME']],
ENSEMBLE_ML_MODEL[['PI']], model_name,
top_feature_majority_voting=int((len(x.columns.values) * 0.50)))
FUZZY_DATA["LightGBM Classifier"] = im.fuzzy_intpr_impl(x, y, clf_m.load_models("LightGBM Classifier"),
data_size_for_interpretability,
model_name, fcv)
ENSEMBLE_ML_MODEL.to_excel(os.path.join(rgm.generating_results('Results_XLS'), f"{model_name}.xlsx"))
elif model_name == "Logistic Regressor classifier":
ENSEMBLE_ML_MODEL['PI'] = PI_RESULTS[model_name]
ENSEMBLE_ML_MODEL['LIME'] = LIME_RESULTS[model_name]
ENSEMBLE_ML_MODEL['SHAP'] = SHAP_RESULTS[model_name]
im.ensemble_feature_importance(ENSEMBLE_ML_MODEL[['SHAP']], ENSEMBLE_ML_MODEL[['LIME']],
ENSEMBLE_ML_MODEL[['PI']], model_name,
top_feature_majority_voting=int((len(x.columns.values) * 0.50)))
FUZZY_DATA["Logistic Regressor classifier"] = im.fuzzy_intpr_impl(x, y, clf_m.load_models(
"Logistic Regressor classifier"),
data_size_for_interpretability,
model_name, fcv)
ENSEMBLE_ML_MODEL.to_excel(os.path.join(rgm.generating_results('Results_XLS'), f"{model_name}.xlsx"))
elif model_name == "Artificial Neural Network":
ENSEMBLE_ML_MODEL['PI'] = PI_RESULTS[model_name]
ENSEMBLE_ML_MODEL['LIME'] = LIME_RESULTS[model_name]
ENSEMBLE_ML_MODEL['SHAP'] = SHAP_RESULTS[model_name]
im.ensemble_feature_importance(ENSEMBLE_ML_MODEL[['SHAP']], ENSEMBLE_ML_MODEL[['LIME']],
ENSEMBLE_ML_MODEL[['PI']], model_name,
top_feature_majority_voting=int((len(x.columns.values) * 0.50)))
FUZZY_DATA["Artificial Neural Network"] = im.fuzzy_intpr_impl(x, y,
cm.ann_clf(x, y, x_train, x_test, y_train, y_test,
cv),
data_size_for_interpretability,
model_name, fcv)
ENSEMBLE_ML_MODEL.to_excel(os.path.join(rgm.generating_results('Results_XLS'), f"{model_name}.xlsx"))
elif model_name == "Random Forest classifier":
ENSEMBLE_ML_MODEL['PI'] = PI_RESULTS[model_name]
ENSEMBLE_ML_MODEL['LIME'] = LIME_RESULTS[model_name]
ENSEMBLE_ML_MODEL['SHAP'] = SHAP_RESULTS[model_name]
im.ensemble_feature_importance(ENSEMBLE_ML_MODEL[['SHAP']], ENSEMBLE_ML_MODEL[['LIME']],
ENSEMBLE_ML_MODEL[['PI']], model_name,
top_feature_majority_voting=int((len(x.columns.values) * 0.50)))
FUZZY_DATA["Random Forest classifier"] = im.fuzzy_intpr_impl(x, y,
clf_m.load_models("Random Forest classifier"),
data_size_for_interpretability,
model_name, fcv)
ENSEMBLE_ML_MODEL.to_excel(os.path.join(rgm.generating_results('Results_XLS'), f"{model_name}.xlsx"))
elif model_name == "Support vector machines":
ENSEMBLE_ML_MODEL['PI'] = PI_RESULTS[model_name]
ENSEMBLE_ML_MODEL['LIME'] = LIME_RESULTS[model_name]
ENSEMBLE_ML_MODEL['SHAP'] = SHAP_RESULTS[model_name]
im.ensemble_feature_importance(ENSEMBLE_ML_MODEL[['SHAP']], ENSEMBLE_ML_MODEL[['LIME']],
ENSEMBLE_ML_MODEL[['PI']], model_name,
top_feature_majority_voting=int((len(x.columns.values) * 0.50)))
FUZZY_DATA["Support vector machines"] = im.fuzzy_intpr_impl(x, y, clf_m.load_models("Support vector machines"),
data_size_for_interpretability,
model_name, fcv)
ENSEMBLE_ML_MODEL.to_excel(os.path.join(rgm.generating_results('Results_XLS'), f"{model_name}.xlsx"))
FUZZY_DATA.dropna(how='all', axis=1)
FUZZY_DATA.to_excel(os.path.join(rgm.generating_results('Results_XLS'), "FUZZY-DATA_Before_AC.xlsx"))
######################################################################################################################
# STAGE 5: Feature Importance - Multi Fusion
######################################################################################################################
mf.multi_fusion_feature_imp(SHAP_RESULTS, LIME_RESULTS, PI_RESULTS, x, model_selected)
######################################################################################################################
# STAGE 6: Feature Importance - FUZZY Fusion
######################################################################################################################
fl.fuzzy_implementation(FUZZY_DATA, model_selected)
######################################################################################################################
# STAGE 7 : Generate Report
######################################################################################################################
# <-------------------------------------------- Evaluation Specific Reports ------------------------------------------>
# Machine learning model evaluation
rgm.generate_eval_report('perm')
rgm.generate_eval_report('ROC')
rgm.generate_eval_report('Confusion_Matrix')
# Interpretability Techniques
rgm.generate_eval_report("Permutation Importances")
rgm.generate_eval_report("Lime")
rgm.generate_eval_report("SHAP")
# Fusion Approach
rgm.generate_eval_report('single_fusion')
rgm.generate_eval_report('Multi-Fusion')
rgm.generate_eval_report('Majority Voting')
rgm.generate_eval_report('Rank')
# Fuzzy Approach
rgm.generate_eval_report('FUZZY')
# <------------------------------------------------ Model Specific Reports ------------------------------------------->
for model_name in model_selected:
if model_name == "LightGBM Classifier":
rgm.generate_model_report("LightGBM Classifier")
elif model_name == "Logistic Regressor classifier":
rgm.generate_model_report("Logistic Regressor classifier")
elif model_name == "Artificial Neural Network":
rgm.generate_model_report("Artificial Neural Network")
elif model_name == "Random Forest classifier":
rgm.generate_model_report("Random Forest classifier")
elif model_name == 'Support vector machines':
rgm.generate_model_report('Support vector machines')
# <------------------------------------------------ Multi Fusion Report ---------------------------------------------->
rgm.generate_multi_fusion(model_selected)
| 2.359375 | 2 |
SMS_handler.py | alexfromsocal/PhoneVote | 0 | 12785885 | <reponame>alexfromsocal/PhoneVote
#from flask import Flask, request, redirect
#from twilio.twiml.messaging_response import MessagingResponse as MR
from twilio.rest import Client
from flask import url_for, session, Flask, request
from configparser import ConfigParser
from twilio.twiml.messaging_response import MessagingResponse
from AdminPrivilegeCheck import checkNum
import os, sys
import random
import numpy
from twilio.rest import Client
from configparser import ConfigParser
#import importlib
app = Flask(__name__)
@app.route('/sms_inbound')
#Receives the incoming SMS to process further for our client.
def incoming_sms():
configTwilio = ConfigParser()
configTwilio.read('config.cfg')
account = configTwilio.get('auth', 'account')
token = configTwilio.get('auth', 'token')
servicesid = configTwilio.get('auth', 'servicesid')
logoncode = configTwilio.get('key', 'passcode')
client = Client(account, token)
phone_numbers = client.messaging \
.services(sid=servicesid) \
.phone_numbers \
.list()
body = request.values.get('Body', None)
response =MessagingResponse()
if str(body).lower() == "logon":
isAdmin = checkAdmin(request.values.get('From'), client, phone_numbers)
print("logged")
else:
response.message("Your features aren't valid yet")
#Gets the message from sender and grabs the body of it.
#Taken from Twilio helper library
return str(response)
def redirect_to_first_question(response, survey):
first_question = survey.questions.order_by('id').first()
first_question_url = url_for('question', question_id=first_question.id)
response.redirect(url=first_question_url, method='GET')
def welcome_user(survey, send_function):
welcome_text = 'Welcome to the %s' % survey.title
send_function(welcome_text)
def survey_error(survey, send_function):
if not survey:
send_function('Sorry, but there are no surveys to be answered.')
return True
elif not survey.has_questions:
send_function('Sorry, there are no questions for this survey.')
return True
return False
def checkAdmin(PhNum, client, phone_numbers):
list = readToList()
#serverPhone = phone_numbers[0].phone_number
strOfNumbers = ''.join(str(n) for n in list)
print(strOfNumbers)
if PhNum in list:
message = client.messages.create(
to=PhNum,
from_=phone_numbers[0].phone_number,
body="What is your passkey?")
print(message.sid)
return True
else:
message = client.messages.create(
to=PhNum,
from_=phone_numbers[0].phone_number,
body="You cannot access this.")
print(message.sid)
return False
def readToList():
file = open("AdminList.txt", "r")
list = file.read().splitlines()
file.close()
return list
@app.route("/sms_outbound", methods=['GET', 'POST'])
def outgoing_sms():
body = request.values.get('Body', None)
resp = MessagingResponse() #Taken from Twilio helper library
resp.message("This is the ship that made the Kessel Run in fourteen parsecs?")
return str(resp)
if __name__ == "__main__":
app.run(debug=True) | 2.6875 | 3 |
Python3/547.py | rakhi2001/ecom7 | 854 | 12785886 | __________________________________________________________________________________________________
sample 192 ms submission
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
seen = set()
def visit_all_friends(i: int):
for friend_idx,is_friend in enumerate(M[i]):
if is_friend and friend_idx not in seen:
seen.add(friend_idx)
visit_all_friends(friend_idx)
count = 0
for ridx in range(len(M)):
if ridx not in seen:
visit_all_friends(ridx)
count += 1
return count
__________________________________________________________________________________________________
sample 13172 kb submission
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
def dfs1(r, c, circle):
frds = [r, c]
f_s = {r, c}
i = 0
while i < len(frds):
j = frds[i]
for k in range(len(M)):
if M[j][k] == 1 and k not in f_s:
f_s.add(k)
frds.append(k)
i = i + 1
for i in f_s:
for j in f_s:
M[i][j] = circle
circle = 1
for i in range(len(M)):
for j in range(len(M[0])):
if M[i][j] == 1:
circle = circle + 1
dfs1(i, j, circle)
break
return circle - 1
__________________________________________________________________________________________________
| 3.359375 | 3 |
src/calculate_features/helpers/features/size.py | flysoso/NetAna-Complex-Network-Analysis | 3 | 12785887 | '''
Total number of edges n the graph (graph size):
m = |E|
'''
import networkx as nx
def calculate(network):
try:
n = network.size()
except:
n = 0
return n
| 3.28125 | 3 |
blockchat/app/blockchain.py | Samyak2/blockchain | 0 | 12785888 | import os
import logging
import json
import asyncio
from collections import defaultdict
import nacl
from quart import Quart, jsonify, request, websocket
from quart_cors import cors
from blockchat.utils import encryption
from blockchat.types.blockchain import Blockchain, BlockchatJSONEncoder, BlockchatJSONDecoder
from blockchat.types.blockchain import parse_node_addr
import blockchat.utils.storage as storage
numeric_level = getattr(logging, os.getenv("LOG_LEVEL", "WARNING"), "WARNING")
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % os.getenv("LOG_LEVEL"))
logging.basicConfig(level=numeric_level)
# Instantiate the Node
app = Quart(__name__)
app = cors(app, allow_origin="*")
app.json_encoder = BlockchatJSONEncoder
app.json_decoder = BlockchatJSONDecoder
# load node secret and node address from env vars
node_secret = nacl.signing.SigningKey(bytes.fromhex(os.getenv("NODE_KEY")))
node_url = os.getenv("NODE_ADDR", None)
assert node_url is not None
node_url = parse_node_addr(node_url)
node_identifier = encryption.encode_verify_key(node_secret.verify_key)
storage_backend = os.getenv("STORAGE_TYPE", "memory").lower()
if storage_backend == "firebase":
db = storage.FirebaseBlockchatStorage()
logging.warning("Using Firebase storage backend")
else:
db = storage.InMemoryBlockchatStorage()
logging.warning("Using in-memory storage backend")
# Instantiate the Blockchain
blockchain = Blockchain(db, node_url, node_secret)
monitor_tags = defaultdict(set)
monitor_chats = defaultdict(set)
@app.websocket('/transactions/ws')
async def transaction_socket():
global monitor_tags
if 'tag' not in websocket.args:
return 'Tag not specified'
tag = websocket.args.get('tag')
queue = asyncio.Queue()
monitor_tags[tag].add(queue)
await websocket.accept()
if blockchain.db.is_transaction_unconfirmed(tag):
await websocket.send('unc')
elif blockchain.db.is_transaction_confirmed(tag):
await websocket.send('mined')
try:
while True:
data = await queue.get()
await websocket.send(data)
if data == "mined":
break
finally:
monitor_tags[tag].remove(queue)
if not monitor_tags[tag]:
monitor_tags.pop(tag)
@app.websocket('/chat/ws')
async def chat_socket():
global monitor_chats
if 'sender' not in websocket.args:
return 'Sender address not specified'
sender = websocket.args.get('sender')
queue = asyncio.Queue()
monitor_chats[sender].add(queue)
logging.info("Monitoring sender %s", sender)
await websocket.accept()
try:
while True:
data = await queue.get()
await websocket.send(data)
finally:
monitor_chats[sender].remove(queue)
if not monitor_tags[sender]:
monitor_chats.pop(sender)
async def mine_wrapper():
if blockchain.db.num_transactions() == 0:
return False
logging.info("Mining now")
# get the transactions to be added
transactions = blockchain.db.pop_transactions()
# let client know that their transaction is being mined
for transaction in transactions:
if transaction.tag in monitor_tags:
asyncio.gather(*(mtag.put('mining') for mtag in monitor_tags[transaction.tag]))
# ensure chain is the best before mining
blockchain.resolve_conflicts()
last_block = blockchain.last_block
# add a "mine" transaction
blockchain.new_transaction(node_identifier, node_identifier, "<<MINE>>",
self_sign=True, add_to=transactions)
# We run the proof of work algorithm to get the next proof...
proof = blockchain.proof_of_work(last_block, transactions)
# Forge the new Block by adding it to the chain
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash, transactions, last_block)
for transaction in transactions:
if transaction.tag in monitor_tags:
asyncio.gather(*(mtag.put('mined') for mtag in monitor_tags[transaction.tag]))
logging.info("Mined")
return block
@app.route('/block/mine', methods=['GET'])
async def mine():
block = await mine_wrapper()
if not block:
return "Nothing to mine", 200
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
return jsonify(response), 200
@app.route('/chat/messages', methods=['GET'])
async def get_messages():
if not 'user_key' in request.args:
return 'User public key missing', 400
user_key = request.args.get('user_key').strip()
if not user_key:
return 'Invalid user public key', 400
txs = blockchain.db.get_user_messages(user_key)
num_txs = len(txs)
response = {
'transactions': txs,
'length': num_txs
}
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
async def new_transaction():
values = await request.get_json()
# Check that the required fields are in the POST'ed data
required_values = ['sender', 'recipient', 'message', 'signature']
if not all(k in values for k in required_values):
return 'Missing values', 400
# Create a new Transaction
transaction, tag = blockchain.new_transaction(values['sender'], values['recipient'],
values['message'], values['signature'])
if not tag:
return "Cannot verify transaction", 400
if transaction.receiver in monitor_chats:
json_dump = json.dumps(transaction.to_dict())
await asyncio.gather(*(mchat.put(json_dump) for mchat in
monitor_chats[transaction.receiver]))
response = {'message': 'Transaction will be added to the next block.',
'tag': tag}
return jsonify(response), 201
@app.route('/transactions/is_unconfirmed', methods=['GET'])
async def check_transaction_unconfirmed():
if 'tag' not in request.args:
return 'Missing tag in parameters', 400
tag = request.args.get('tag')
unconfirmed = blockchain.db.is_transaction_unconfirmed(tag)
return jsonify({"unconfirmed": unconfirmed}), 201
@app.route('/transactions/is_confirmed', methods=['GET'])
async def check_transaction_confirmed():
if 'tag' not in request.args:
return 'Missing tag in parameters', 400
tag = request.args.get('tag')
confirmed = blockchain.db.is_transaction_confirmed(tag)
return jsonify({"confirmed": confirmed}), 201
@app.route('/chain/get', methods=['GET'])
async def full_chain():
chain = blockchain.db.chain
response = {
'chain': chain,
'length': chain[-1]["index"]
}
return jsonify(response), 200
@app.route('/chain/length', methods=['GET'])
async def chain_length():
response = {
'length': len(blockchain),
}
return jsonify(response), 200
@app.route('/block/add', methods=['POST'])
async def add_block():
values = await request.get_json()
block_to_add = values.get('block')
# try to add block
success = blockchain.add_block(block_to_add)
if success:
return jsonify({
"message": "Block added successfully"}), 200
return "Error: Invalid block", 400
@app.route('/nodes/register', methods=['POST'])
async def register_nodes():
values = await request.get_json()
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
blockchain.register_node(node)
replaced = blockchain.resolve_conflicts()
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.get_nodes()),
'chain_replaced': replaced
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods=['GET'])
async def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced'
}
else:
response = {
'message': 'Our chain is authoritative'
}
return jsonify(response), 200
# schedule mine job every x minutes
@app.before_first_request
async def mine_job_req():
asyncio.create_task(mine_job())
async def mine_job():
while True:
await asyncio.sleep(10)
await mine_wrapper()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app.run(host='0.0.0.0', port=port, threaded=False, processes=1)
| 2.015625 | 2 |
backend/doppelkopf/db.py | lkoehl/doppelkopf | 0 | 12785889 | <reponame>lkoehl/doppelkopf
import click
from flask.cli import with_appcontext
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
import doppelkopf
db = SQLAlchemy()
migrate = Migrate()
@click.command("seed-data")
@with_appcontext
def seed_data_command():
doppelkopf.events.EventType.insert_all()
doppelkopf.toggles.Toggle.insert_all()
click.echo("Initialized seed data.")
def init_app(app):
db.init_app(app)
migrate.init_app(app, db)
app.cli.add_command(seed_data_command)
| 2.1875 | 2 |
Lesson05/pepsearch.py | xperthunter/pybioinformatics | 0 | 12785890 | #!/usr/bin/env python3
import gzip
import sys
# Write a program that finds peptidies within protein sequences
# Command line:
# python3 pepsearch.py IAN
"""
python3 pepsearch.py proteins.fasta.gz IAN | wc -w
43
"""
| 2.9375 | 3 |
pygame/animation.py | Tom-Li1/py_modules | 0 | 12785891 | <filename>pygame/animation.py<gh_stars>0
import pygame, sys, time
from pygame.locals import *
pygame.init()
WINDOWWIDTH = 800
WINDOWHEIGHT = 1000
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)
pygame.display.set_caption('Animation')
DOWNLEFT = 'downleft'
DOWNRIGHT = 'downright'
UPLEFT = 'upleft'
UPRIGHT = 'upright'
MOVESPEED = 4
WHITE = (255, 255, 255)
RED = (225, 0, 0)
GREEN = (0, 225, 0)
BLUE = (0, 0, 225)
b1 = {'rect':pygame.Rect(300, 80, 50, 100), 'color':RED, 'dir':UPRIGHT}
b2 = {'rect':pygame.Rect(200, 200, 20, 20), 'color':GREEN, 'dir':UPLEFT}
b3 = {'rect':pygame.Rect(100, 150, 60, 60), 'color':BLUE, 'dir':DOWNLEFT}
b4 = {'rect':pygame.Rect(150, 300, 60, 60), 'color':RED, 'dir':DOWNLEFT}
b5 = {'rect':pygame.Rect(500, 150, 60, 60), 'color':GREEN, 'dir':DOWNLEFT}
b6 = {'rect':pygame.Rect(700, 470, 60, 60), 'color':BLUE, 'dir':DOWNLEFT}
b7 = {'rect':pygame.Rect(450, 290, 60, 60), 'color':RED, 'dir':DOWNLEFT}
b8 = {'rect':pygame.Rect(550, 400, 60, 60), 'color':GREEN, 'dir':DOWNLEFT}
b9 = {'rect':pygame.Rect(340, 700, 60, 60), 'color':BLUE, 'dir':DOWNLEFT}
b10 = {'rect':pygame.Rect(280, 620, 60, 60), 'color':RED, 'dir':DOWNLEFT}
boxes = [b1, b2, b3, b4, b5, b6, b7, b8, b9, b10]
mainClock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
windowSurface.fill(WHITE)
for b in boxes:
if b['dir'] == DOWNLEFT:
b['rect'].left -= MOVESPEED
b['rect'].top += MOVESPEED
if b['dir'] == DOWNRIGHT:
b['rect'].left += MOVESPEED
b['rect'].top += MOVESPEED
if b['dir'] == UPLEFT:
b['rect'].left -= MOVESPEED
b['rect'].top -= MOVESPEED
if b['dir'] == UPRIGHT:
b['rect'].left += MOVESPEED
b['rect'].top -= MOVESPEED
if b['rect'].top < 0:
if b['dir'] == UPLEFT:
b['dir'] = DOWNLEFT
if b['dir'] == UPRIGHT:
b['dir'] = DOWNRIGHT
if b['rect'].bottom > WINDOWHEIGHT:
if b['dir'] == DOWNLEFT:
b['dir'] = UPLEFT
if b['dir'] == DOWNRIGHT:
b['dir'] = UPRIGHT
if b['rect'].left < 0:
if b['dir'] == DOWNLEFT:
b['dir'] = DOWNRIGHT
if b['dir'] == UPLEFT:
b['dir'] = UPRIGHT
if b['rect'].right > WINDOWWIDTH:
if b['dir'] == DOWNRIGHT:
b['dir'] = DOWNLEFT
if b['dir'] == UPRIGHT:
b['dir'] = UPLEFT
pygame.draw.rect(windowSurface, b['color'], b['rect'])
pygame.display.update()
mainClock.tick(60)
| 2.375 | 2 |
timeshifter-agents/timeshifter-agents/roller_ball.py | SwamyDev/ML-TimeShifters | 0 | 12785892 | from reinforcement.agents.td_agent import TDAgent
from reinforcement.models.q_regression_model import QRegressionModel
from reinforcement.policies.e_greedy_policies import NormalEpsilonGreedyPolicy
from reinforcement.reward_functions.q_neuronal import QNeuronal
from unityagents import UnityEnvironment
import tensorflow as tf
from unity_session import UnitySession
UNITY_BINARY = "../environment-builds/RollerBall/RollerBall.exe"
TRAIN_MODE = True
MEMORY_SIZE = 10
LEARNING_RATE = 0.01
ALPHA = 0.2
GAMMA = 0.9
N = 10
START_EPS = 1
TOTAL_EPISODES = 1000
if __name__ == '__main__':
with UnityEnvironment(file_name=UNITY_BINARY) as env, tf.Session():
default_brain = env.brain_names[0]
model = QRegressionModel(4, [100], LEARNING_RATE)
Q = QNeuronal(model, MEMORY_SIZE)
episode = 0
policy = NormalEpsilonGreedyPolicy(lambda: START_EPS / (episode + 1))
agent = TDAgent(policy, Q, N, GAMMA, ALPHA)
sess = UnitySession(env, agent, brain=default_brain, train_mode=TRAIN_MODE)
for e in range(TOTAL_EPISODES):
episode = e
sess.run()
print("Episode {} finished.".format(episode)) | 2.09375 | 2 |
src/arbiter/eicar.py | polyswarm/polyswarm-client | 21 | 12785893 | <reponame>polyswarm/polyswarm-client<gh_stars>10-100
import base64
import logging
from polyswarmartifact import ArtifactType
from polyswarmclient.abstractarbiter import AbstractArbiter
from polyswarmclient.abstractscanner import ScanResult
logger = logging.getLogger(__name__) # Initialize logger
EICAR = base64.b64decode(
b'WDVPIVAlQEFQWzRcUFpYNTQoUF4pN0NDKTd9JEVJQ0FSLVNUQU5EQVJELUFOVElWSVJVUy1URVNULUZJTEUhJEgrSCo=')
class Arbiter(AbstractArbiter):
"""Arbiter which matches hashes to a database of known samples"""
def __init__(self, client, testing=0, scanner=None, chains=None, artifact_types=None):
"""Initialize a verbatim arbiter
Args:
client (polyswwarmclient.Client): Client to use
testing (int): How many test bounties to respond to
chains (set[str]): Chain(s) to operate on
artifact_types (list(ArtifactType)): List of artifact types you support
"""
if artifact_types is None:
artifact_types = [ArtifactType.FILE]
super().__init__(client, testing, scanner, chains, artifact_types)
async def scan(self, guid, artifact_type, content, metadata, chain):
"""Scan an artifact
Args:
guid (str): GUID of the bounty under analysis, use to track artifacts in the same bounty
artifact_type (ArtifactType): Artifact type for the bounty being scanned
content (bytes): Content of the artifact to be scan
metadata (dict) Dict of metadata for the artifact
chain (str): Chain we are operating on
Returns:
ScanResult: Result of this scan
"""
return ScanResult(bit=True, verdict=(content == EICAR))
| 2.375 | 2 |
models/model.py | AnnLIU15/SegCovid | 0 | 12785894 | import torch
import torch.nn as nn
from .layer import *
##### U^2-Net ####
class U2NET(nn.Module):
'''
详细见U2Net论文(md中有链接)
'''
def __init__(self, in_channels=1, out_channels=3):
super(U2NET, self).__init__()
self.stage1 = RSU7(in_channels, 32, 64)
self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage2 = RSU6(64, 32, 128)
self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage3 = RSU5(128, 64, 256)
self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage4 = RSU4(256, 128, 512)
self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage5 = RSU4F(512, 256, 512)
self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage6 = RSU4F(512, 256, 512)
# decoder
self.stage5d = RSU4F(1024, 256, 512)
self.stage4d = RSU4(1024, 128, 256)
self.stage3d = RSU5(512, 64, 128)
self.stage2d = RSU6(256, 32, 64)
self.stage1d = RSU7(128, 16, 64)
self.side1 = nn.Conv2d(64, out_channels, 3, padding=1)
self.side2 = nn.Conv2d(64, out_channels, 3, padding=1)
self.side3 = nn.Conv2d(128, out_channels, 3, padding=1)
self.side4 = nn.Conv2d(256, out_channels, 3, padding=1)
self.side5 = nn.Conv2d(512, out_channels, 3, padding=1)
self.side6 = nn.Conv2d(512, out_channels, 3, padding=1)
self.outconv = nn.Conv2d(6*out_channels, out_channels, 1)
def forward(self, x):
hx = x
# stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
# stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
# stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
# stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
# stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
# stage 6
hx6 = self.stage6(hx)
hx6up = upsample_like(hx6, hx5)
# -------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))
hx5dup = upsample_like(hx5d, hx4)
hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))
hx4dup = upsample_like(hx4d, hx3)
hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))
hx3dup = upsample_like(hx3d, hx2)
hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))
hx2dup = upsample_like(hx2d, hx1)
hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))
# side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = upsample_like(d2, d1)
d3 = self.side3(hx3d)
d3 = upsample_like(d3, d1)
d4 = self.side4(hx4d)
d4 = upsample_like(d4, d1)
d5 = self.side5(hx5d)
d5 = upsample_like(d5, d1)
d6 = self.side6(hx6)
d6 = upsample_like(d6, d1)
d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1))
return d0, d1, d2, d3, d4, d5, d6
### U^2-Net small ###
class U2NETP(nn.Module):
def __init__(self, in_channels=1, out_channels=3):
super(U2NETP, self).__init__()
self.stage1 = RSU7(in_channels, 16, 64)
self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage2 = RSU6(64, 16, 64)
self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage3 = RSU5(64, 16, 64)
self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage4 = RSU4(64, 16, 64)
self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage5 = RSU4F(64, 16, 64)
self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage6 = RSU4F(64, 16, 64)
# decoder
self.stage5d = RSU4F(128, 16, 64)
self.stage4d = RSU4(128, 16, 64)
self.stage3d = RSU5(128, 16, 64)
self.stage2d = RSU6(128, 16, 64)
self.stage1d = RSU7(128, 16, 64)
self.side1 = nn.Conv2d(64, out_channels, 3, padding=1)
self.side2 = nn.Conv2d(64, out_channels, 3, padding=1)
self.side3 = nn.Conv2d(64, out_channels, 3, padding=1)
self.side4 = nn.Conv2d(64, out_channels, 3, padding=1)
self.side5 = nn.Conv2d(64, out_channels, 3, padding=1)
self.side6 = nn.Conv2d(64, out_channels, 3, padding=1)
self.outconv = nn.Conv2d(6*out_channels, out_channels, 1)
def forward(self, x):
hx = x
# stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
# stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
# stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
# stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
# stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
# stage 6
hx6 = self.stage6(hx)
hx6up = upsample_like(hx6, hx5)
# decoder
hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))
hx5dup = upsample_like(hx5d, hx4)
hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))
hx4dup = upsample_like(hx4d, hx3)
hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))
hx3dup = upsample_like(hx3d, hx2)
hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))
hx2dup = upsample_like(hx2d, hx1)
hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))
# side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = upsample_like(d2, d1)
d3 = self.side3(hx3d)
d3 = upsample_like(d3, d1)
d4 = self.side4(hx4d)
d4 = upsample_like(d4, d1)
d5 = self.side5(hx5d)
d5 = upsample_like(d5, d1)
d6 = self.side6(hx6)
d6 = upsample_like(d6, d1)
d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1))
return d0, d1, d2, d3, d4, d5, d6
| 2.53125 | 3 |
get_dark_files.py | jprchlik/find_contaminated_darks | 0 | 12785895 | import os,sys
import datetime as dt
import numpy as np
try:
#for python 3.0 or later
from urllib.request import urlopen
except ImportError:
#Fall back to python 2 urllib2
from urllib2 import urlopen
import requests
from multiprocessing import Pool
import drms
from shutil import move
import glob
###Remove proxy server variables from Lockheed after using the proxy server to connect to the google calendar 2019/02/20 <NAME>
##os.environ.pop("http_proxy" )
##os.environ.pop("https_proxy")
class dark_times:
def __init__(self,time,
irisweb='http://iris.lmsal.com/health-safety/timeline/iris_tim_archive/{2}/IRIS_science_timeline_{0}.V{1:2d}.txt',
simpleb=False,complexa=False,tol=50):
"""
A python class used for finding and downloading IRIS dark observations. This module requires that parameters be specified in
a parameter file in this directory. The parameter file's name must be "parameter_file" and contain the three following lines:
Line1: email address registered with JSOC (e.g. <EMAIL>)
Line2: A base directory containing the level 1 IRIS dark files. The program will concatenate YYYY/MM/simpleb/ or YYYY/MM/complexa/ onto the base directory
Line3: A base directory containing the level 0 IRIS dark files. The program will concatenate simpleb/YYYY/MM/ or complexa/YYYY/MM/ onto the base directory
Example three lines below:
<EMAIL>
/data/alisdair/IRIS_LEVEL1_DARKS/
/data/alisdair/opabina/scratch/joan/iris/newdat/orbit/level0/
The program will create the level0 and level1 directories as needed.
Parameters
----------
time: str
A string containing the date the dark observations started based on the IRIS calibration-as-run calendar in YYYY/MM/DD format (e.g.
test = gdf.dark_times(time,simpleb=True))
irisweb: string, optional
A formatted text string which corresponds to the location of the IRIS timeline files
(Default = 'http://iris.lmsal.com/health-safety/timeline/iris_tim_archive/{2}/IRIS_science_timeline_{0}.V{1:2d}.txt').
The {0} character string corresponds the date of the timeline uploaded in YYYYMMDD format, while {1:2d}
corresponds to the highest number version of the timeline, which I assume is the timeline uploaded to the spacecraft.
simpleb: boolean, optional
Whether to download simpleb darks can only perform simpleb or complexa darks per call (Default = False).
complexa: boolean, optional
Whether to download complexa darks can only perform simpleb or complexa darks per call (Default = False).
tol: int, optional
The number of darks in a directory before the program decides to download. If greater than tolerance
than it will not download any new darks if less than tolerance then it will download the new darks (Default = 50).
Returns
-------
None
Just downloads files and creates required directories.
"""
#web page location of IRIS timeline
self.irisweb = irisweb #.replace('IRIS',time+'/IRIS')
self.otime = dt.datetime.strptime(time,'%Y/%m/%d')
self.stime = self.otime.strftime('%Y%m%d')
#Type of dark to download simple B or complex A
self.complexa = complexa
self.simpleb = simpleb
#Minimum number of dark files reqiured to run
self.tol = tol
#read lines in parameter file
parU = open('parameter_file','r')
pars = parU.readlines()
parU.close()
#update parameters based on new parameter file
#get email address
self.email = pars[0].strip()
#get level 1/download base directory (without simpleb or complexa subdirectory
bdir = pars[1].strip()
#get level 0 directory
ldir = pars[2].strip()
if complexa:
self.obsid = 'OBSID=4203400000'
if simpleb:
self.obsid = 'OBSID=4202000003'
#make the download directory
if self.simpleb:
self.bdir = bdir+'/{0}/simpleB/'.format(self.otime.strftime('%Y/%m'))
self.ldir = ldir+'/simpleB/{0}/'.format(self.otime.strftime('%Y/%m'))
else:
self.bdir = bdir+'/{0}/complexA/'.format(self.otime.strftime('%Y/%m'))
self.ldir = ldir+'/complexA/{0}/'.format(self.otime.strftime('%Y/%m'))
def request_files(self):
#First check that any time line exists for given day
searching = True
sb = 0 #searching backwards days to correct for weekend or multiday timelines
while searching:
#look in iris's timeline structure
self.stime = (self.otime-dt.timedelta(days=sb)).strftime('%Y%m%d')
irispath = (self.otime-dt.timedelta(days=sb)).strftime('%Y/%m/%d')
inurl = self.irisweb.format(self.stime,0,irispath).replace(' ','0') #searching for V00 file verision
resp = requests.head(inurl)
#leave loop if V00 is found
if resp.status_code == 200: searching =False
else: sb += 1 #look one day back if timeline is missing
if sb >= 9:
searching = False #dont look back more than 9 days
sys.stdout.write('FAILED, IRIS timeline does not exist')#printing this will cause the c-shell script to fail too
sys.exit(1) # exit the python script
check = True
v = 0 #timeline version
#get lastest timeline version
while check == True:
inurl = self.irisweb.format(self.stime, v,irispath).replace(' ','0')
resp = requests.head(inurl)
if resp.status_code != 200:
check = False
v+=-1
inurl = self.irisweb.format(self.stime, v,irispath).replace(' ','0')
else:
v+=1
#get the timeline file information for request timeline
res = urlopen(inurl)
self.res = res
#Need to add decode because python 3 is wonderful 2019/01/16 <NAME>
self.timeline = res.read().decode('utf-8')
def get_start_end(self):
#lines with OBSID=obsid
self.lines = []
for line in self.timeline.split('\n'):
if self.obsid in line:
self.lines.append(line)
#get the last set of OBSIDs (useful for eclipse season)
#Query from start to end time 2019/01/02 <NAME>
self.sta_dark = self.lines[0][3:20]
self.end_dark = self.lines[-1][3:20]
self.sta_dark_dt = self.create_dt_object(self.sta_dark)
self.end_dark_dt = self.create_dt_object(self.end_dark)
self.sta_dark_dt = self.sta_dark_dt-dt.timedelta(minutes=1)
self.end_dark_dt = self.end_dark_dt+dt.timedelta(minutes=1)
#create datetime objects using doy in timeline
def create_dt_object(self,dtobj):
splt = dtobj.split(':')
obj = dt.datetime(int(splt[0]),1,1,int(splt[2]),int(splt[3]))+dt.timedelta(days=int(splt[1])-1) #convert doy to datetime obj
return obj
#set up JSOC query for darks
def dark_query(self):
#use drms module to download from JSOC (https://pypi.python.org/pypi/drms)
client = drms.Client(email=self.email,verbose=False)
fmt = '%Y.%m.%d_%H:%M'
self.qstr = 'iris.lev1[{0}_TAI-{1}_TAI][][? IMG_TYPE ~ "DARK" ?]'.format(self.sta_dark_dt.strftime(fmt),self.end_dark_dt.strftime(fmt))
self.expt = client.export(self.qstr)
#setup string to pass write to sswidl for download
### fmt = '%Y-%m-%dT%H:%M:%S'
### self.response = client.query(jsoc.Time(self.sta_dark_dt.strftime(fmt),self.end_dark_dt.strftime(fmt)),jsoc.Series('iris.lev1'),
### jsoc.Notify('<EMAIL>'),jsoc.Segment('image'))
###
self.get_darks(client)
def get_darks(self,client):
#### import time
#### wait = True
####
#### request = client.request_data(self.response)
#### waittime = 60.*5. #five minute wait to check on data completion
#### time.sleep(waittime) #
####
#### while wait:
#### stat = client.check_request(request)
#### if stat == 1:
#### temp.sleep(waittime)
#### elif stat == 0:
#### wait = False
#### elif stat > 1:
#### break #jump out of loop if you get an error
# check to make sure directory does not exist
if not os.path.exists(self.bdir):
os.makedirs(self.bdir)
#also make level0 directory
if not os.path.exists(self.ldir):
os.makedirs(self.ldir)
#get number of records
try:
index = np.arange(np.size(self.expt.urls.url))
if index[-1] < self.tol: #make sure to have at least 50 darks in archive before downloading
sys.stdout.write("FAILED, LESS THAN {0:2d} DARKS IN ARCHIVE".format(self.tol))
sys.exit(1)
except: #exit nicely if no records exist
sys.stdout.write("FAILED, No JSOC record exists")
sys.exit(1)
#check to see if darks are already downloaded Added 2017/03/20
#make sure the downloaded files are on the same day added 2017/12/05 (<NAME>)
if len(glob.glob(self.bdir+'/iris.lev1.{0}*.fits'.format(self.otime.strftime('%Y-%m-%d')))) < self.tol:
#Dowloand the data using drms in par. (will fuss about mounted drive ocassionaly)
for ii in index: self.download_par(ii)
#DRMS DOES NOT WORK IN PARALELL
#### pool = Pool(processes=4)
#### outf = pool.map(self.download_par,index)
#### pool.close()
### self.expt.download(bdir,1,fname_from_rec=True)
#download the data
#### res = client.get_request(request,path=bdir,progress=True)
#### res.wait()
#
def download_par(self,index):
# get file from JSOC
outf = self.expt.download(self.bdir,index,fname_from_rec=True)
#format output file
fils = str(outf['download'].values[0])
fils = fils.split('/')[-1]
nout = fils[:14]+'-'+fils[14:16]+'-'+fils[16:24]+fils[26:]
#create new file name in same as previous format
if os.path.isfile(str(outf['download'].values[0])):
move(str(outf['download'].values[0]),self.bdir+nout)
#run to completion
def run_all(self):
self.request_files()
self.get_start_end()
self.dark_query()
| 2.546875 | 3 |
src/parser.py | Bolinooo/hint-parser | 0 | 12785896 | from .regex_patterns import *
from bs4 import BeautifulSoup
import datetime
import re
def parse(response, option):
"""
Function to extract data from html schedule
:return: Parsed html in dictionary
"""
soup = BeautifulSoup(response.content, 'html.parser')
title_blue_original = soup.find("font", {"color": "#0000FF"}).text.strip()
if option != "classes" and option != "schedule":
size = "4"
else:
size = "5"
title_black_original = soup.find("font", {"size": size}).text.strip()
title_blue_stripped = "".join(title_blue_original.split())[:-1]
date = soup.find_all('font')[-1].get_text(strip=True)
schedule = []
rows = soup.find_all('table')[0].find_all('tr', recursive=False)[1:30:2]
if option != "schedule":
schedule.append(
{'title_blue': title_blue_stripped, 'title_black': title_black_original})
else:
rowspans = {}
for block, row in enumerate(rows, 1):
daycells = row.select('> td')[1:]
daynum, rowspan_offset = 0, 0
for daynum, daycell in enumerate(daycells, 1):
daynum += rowspan_offset
while rowspans.get(daynum, 0):
rowspan_offset += 1
rowspans[daynum] -= 1
daynum += 1
rowspan = (int(daycell.get('rowspan', default=2)) // 2) - 1
if rowspan:
rowspans[daynum] = rowspan
texts = daycell.find_all('font')
if texts:
info = (item.get_text(strip=True) for item in texts)
seperated_info = get_separated_cell_info(info)
time = convert_date(date, daynum)
timetable = convert_timetable(block, block + rowspan)
schedule.append({
'abbrevation': title_blue_stripped,
'title': title_black_original,
'start_begin': timetable[0],
'start_end': timetable[1],
'start_block': block,
'end_begin': timetable[2],
'end_end': timetable[3],
'end_block': block + rowspan,
'daynum': daynum,
'day': time[0],
'date_full': time[1],
'date_year': time[1][0:4],
'date_month': time[1][5:7],
'date_day': time[1][8:10],
'info': seperated_info
})
# print(schedule)
while daynum < 5:
daynum += 1
if rowspans.get(daynum, 0):
rowspans[daynum] -= 1
if not schedule:
schedule = {}
print("Page succesfully parsed")
return schedule
def convert_date(soup_date, daynum):
"""
Function to calculate day and date based on string and daynum
:param soup_date: string containing the date of schedule page
:param daynum: int of current day
:return: tuple with current day and current date
"""
days = {
1: "Maandag",
2: "Dinsdag",
3: "Woensdag",
4: "Donderdag",
5: "Vrijdag"
}
one_day, one_month, one_year = soup_date[0:2], soup_date[3:5], soup_date[6:10]
partials = [one_day, one_month, one_year]
items = [int(i) for i in partials]
d0 = datetime.date(year=items[2], month=items[1], day=items[0])
current_day = days[daynum]
current_date = d0 + datetime.timedelta(days=daynum - 1)
return current_day, str(current_date)
def convert_timetable(start, end):
"""
Function to convert rows to time
:param start: Starting row number
:param end: Ending row number
:return: Tuple with all correct starting and ending times
"""
timetable = {
1: ("8:30", "9:20"),
2: ("9:20", "10:10"),
3: ("10:30", "11:20"),
4: ("11:20", "12:10"),
5: ("12:10", "13:00"),
6: ("13:00", "13:50"),
7: ("13:50", "14:40"),
8: ("15:00", "15:50"),
9: ("15:50", "16:40"),
10: ("17:00", "17:50"),
11: ("17:50", "18:40"),
12: ("18:40", "19:30"),
13: ("19:30", "20:20"),
14: ("20:20", "21:10"),
15: ("21:10", "22:00"),
}
start_begin = timetable[start][0]
start_end = timetable[start][1]
end_begin = timetable[end][0]
end_end = timetable[end][1]
return start_begin, start_end, end_begin, end_end
def combine_dicts(parsed_items, parsed_counters):
"""
Function to combine parsed schedule data and quarter/week-info to a single dictionary
:param parsed_items: defaultdict with nested lists containing separated dicts with crawled data per schedule
:param parsed_counters: defaultdict with nested lists containing week and quarter per schedule
:return: clean dictionary
"""
print("Starting to build final dictionary")
result = {}
empty_schedules = 0
for l1 in parsed_items:
for option, (length, l2) in parsed_counters.items():
if len(l1) == length:
for item in zip(l1, l2):
schedule = bool(item[0])
if schedule:
quarter = item[1][0]
week = item[1][1]
result.setdefault(option, {})
result[option].setdefault(quarter, {})
result[option][quarter].setdefault(week, [])
result[option][quarter][week].append(item[0])
else:
empty_schedules += 1
print("Succesfully builded final dictionary")
print("{amount} schedules were empty.".format(amount=empty_schedules))
return result
def get_separated_cell_info(cell_info):
"""
Function to give each value in
:param cell_info: generator that behaves like an iterator. Cell_info can contain e.g. lecture, teacher code etc.
:return: category(key) of the reg_ex_dict and the matched value
"""
seperated_info = {}
for info in cell_info:
# data contains
# 1. a key from reg_ex_dict
# 2. the value of the result after executing regular expressions on info
data = get_category_and_result(info)
# Some cells only has one value for example Hemelvaartsdag. get_category_and_result won't return this value.
# Therefore, data is None then save the info.
if data is None:
seperated_info["event"] = info
# location needs to be splitted in building, floor and room
elif data[0] == "location":
dotSeperatedParts = data[1].split(".")
seperated_info["building"] = dotSeperatedParts[0]
seperated_info["floor"] = dotSeperatedParts[1]
seperated_info["room"] = dotSeperatedParts[2]
else:
seperated_info[data[0]] = data[1]
return seperated_info
def get_category_and_result(info):
"""
Function to get the category(key) and the matched value after executing a regular expression
:param info: info is a string
:return: category(key) of the reg_ex_dict and the matched value
"""
# catergory e.g. lecture
for category in reg_ex_dict:
# pattern e.g. pattern1
for pattern in reg_ex_dict[category]:
match = re.match(pattern, info)
if match:
return category, match.group()
| 3.046875 | 3 |
research/mobilenet/mobilenet_v1.py | luotigerlsx/models_archive | 0 | 12785897 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""MobileNet v1.
Adapted from tf.keras.applications.mobilenet.MobileNet().
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and different
head (for example: embeddings, localization and classification).
As described in https://arxiv.org/abs/1704.04861.
MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>
"""
import logging
import tensorflow as tf
from research.mobilenet import common_modules
from research.mobilenet.configs import archs
layers = tf.keras.layers
MobileNetV1Config = archs.MobileNetV1Config
def mobilenet_v1(config: MobileNetV1Config = MobileNetV1Config()
) -> tf.keras.models.Model:
"""Instantiates the MobileNet Model."""
model_name = config.name
input_shape = config.input_shape
img_input = layers.Input(shape=input_shape, name='Input')
# build network base
x = common_modules.mobilenet_base(img_input, config)
# build classification head
x = common_modules.mobilenet_head(x, config)
return tf.keras.models.Model(inputs=img_input,
outputs=x,
name=model_name)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)-15s:%(levelname)s:%(module)s:%(message)s',
level=logging.INFO)
model = mobilenet_v1()
model.compile(
optimizer='adam',
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.categorical_crossentropy])
logging.info(model.summary())
| 2.578125 | 3 |
tests/test_stopper.py | tech-sketch/SeqAL | 0 | 12785898 | from unittest.mock import MagicMock
import pytest
from seqal.stoppers import BudgetStopper, F1Stopper
class TestF1Stopper:
"""Test F1Stopper class"""
@pytest.mark.parametrize(
"micro,micro_score,macro,macro_score,expected",
[
(True, 16, False, 0, True),
(True, 14, False, 0, False),
(False, 0, True, 16, True),
(False, 0, True, 14, False),
],
)
def test_stop(
self,
micro: bool,
micro_score: int,
macro: bool,
macro_score: int,
expected: bool,
) -> None:
"""Test stop function"""
# Arrange
stopper = F1Stopper(goal=15)
classification_report = {
"micro avg": {"f1-score": micro_score},
"macro avg": {"f1-score": macro_score},
}
result = MagicMock(classification_report=classification_report)
# Act
decision = stopper.stop(result, micro=micro, macro=macro)
# Assert
assert decision == expected
class TestBudgetStopper:
"""Test BudgetStopper class"""
@pytest.mark.parametrize("unit_count,expected", [(10, False), (20, True)])
def test_stop(self, unit_count: int, expected: bool) -> None:
"""Test stop function"""
# Arrange
stopper = BudgetStopper(goal=15, unit_price=1)
# Act
decision = stopper.stop(unit_count)
# Assert
assert decision == expected
| 2.578125 | 3 |
Code/CCIPCA.py | arturjordao/IncrementalDimensionalityReduction | 3 | 12785899 | """Candid Covariance-Free Incremental PCA (CCIPCA)."""
import numpy as np
from scipy import linalg
from sklearn.utils import check_array
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.base import BaseEstimator
from sklearn.preprocessing import normalize
import copy
class CCIPCA(BaseEstimator):
"""Candid Covariance-Free Incremental PCA (CCIPCA).
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
References
Candid Covariance-free Incremental Principal Component Analysis
"""
def __init__(self, n_components=10, amnesic=2, copy=True):
self.__name__ = 'Incremental Projection on Latent Space (IPLS)'
self.n_components = n_components
self.amnesic = amnesic
self.n = 0
self.copy = copy
self.x_rotations = None
self.sum_x = None
self.n_features = None
self.eign_values = None
self.x_mean = None
def normalize(self, x):
return normalize(x[:, np.newaxis], axis=0).ravel()
def fit(self, X, Y=None):
X = check_array(X, dtype=FLOAT_DTYPES, copy=self.copy)
n_samples, n_features = X.shape
if self.n == 0:
self.n_features = n_features
self.x_rotations = np.zeros((n_features, self.n_components))
self.eign_values = np.zeros((self.n_components))
self.incremental_mean = 1
for j in range(0, n_samples):
self.n = self.n + 1
u = X[j]
old_mean = (self.n-1)/self.n*self.incremental_mean
new_mean = 1/self.n*u
self.incremental_mean = old_mean+new_mean
if self.n == 1:
self.x_rotations[:, 0] = u
self.sum_x = u
else:
u = u - self.incremental_mean
self.sum_x = self.sum_x + u
k = min(self.n, self.n_components)
for i in range(1, k+1):
if i == self.n:
self.x_rotations[:, i - 1] = u
else:
w1, w2 = (self.n-1-self.amnesic)/self.n, (self.n+self.amnesic)/self.n
v_norm = self.normalize(self.x_rotations[:, i-1])
v_norm = np.expand_dims(v_norm, axis=1)
self.x_rotations[:, i - 1] = w1 * self.x_rotations[:, i - 1] + w2*u*np.dot(u.T, v_norm)[0]
v_norm = self.normalize(self.x_rotations[:, i-1])
v_norm = np.expand_dims(v_norm, axis=1)
u = u - (np.dot(u.T, v_norm)*v_norm)[:, 0]
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data."""
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
X -= self.incremental_mean
w_rotation = np.zeros(self.x_rotations.shape)
for c in range(0, self.n_components):
w_rotation[:, c] = self.normalize(self.x_rotations[:, c])
return np.dot(X, w_rotation) | 2.375 | 2 |
kr_fashion_mnist.py | mjbhobe/dl-keras | 0 | 12785900 | <reponame>mjbhobe/dl-keras<filename>kr_fashion_mnist.py
<<<<<<< HEAD
#!/usr/bin/env python
""" Fashion MNIST multiclass classification using Tensorflow 2.0 & Keras """
import sys
import os
import random
# import pathlib
# import json
# import glob
# import tarfile
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import (BatchNormalization, Conv2D, Dense, Dropout, Flatten,
Input, MaxPooling2D, ELU, ReLU, Softmax)
from tensorflow.keras.models import load_model
from tensorflow.keras.models import model_from_json
from tensorflow.keras.utils import plot_model
from tensorflow.python.keras.metrics import TrueNegatives
import kr_helper_funcs as kru
=======
import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import fashion_mnist
import matplotlib.pyplot as plt
import seaborn as sns
import kr_helper_funcs as kru
plt.style.use('seaborn')
>>>>>>> bbc974c7328d49125b62578f259511f7286a8dfc
SEED = 42
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
<<<<<<< HEAD
print(f"Using Tensorflow {tf.__version__}")
EPOCHS, BATCH_SIZE, BUFFER_SIZE = 25, 64, 512
def load_fashion_data():
""" load Fashion MNIST data & return datasets """
from tensorflow.keras.datasets import fashion_mnist
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.20,
random_state=SEED, stratify=y_train)
# Normalize data.
X_train = X_train.astype('float32') / 255.0
X_val = X_val.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
# Reshape grayscale to include channel dimension.
X_train = np.expand_dims(X_train, axis=3)
X_val = np.expand_dims(X_val, axis=3)
X_test = np.expand_dims(X_test, axis=3)
# Process labels.
label_binarizer = LabelBinarizer()
y_train = label_binarizer.fit_transform(y_train)
y_val = label_binarizer.fit_transform(y_val)
y_test = label_binarizer.fit_transform(y_test)
X_train, y_train, X_val, y_val, X_test, y_test = load_fashion_data()
print(f"X_train.shape = {X_train.shape} - y_train.shape = {y_train.shape}\n"
f"X_val.shape = {X_val.shape} - y_val.shape = {y_val.shape}\n"
f"X_test.shape = {X_test.shape} - y_test.shape = {y_test.shape} ")
train_ds = tf.data.Dataset.from_tensor_slices(X_train, y_train)
val_ds = tf.data.Dataset.from_tensor_slices(X_val, y_val)
test_ds = tf.data.Dataset.from_tensor_slices(X_test, y_test)
return train_ds, val_ds, test_ds
def build_model():
input_layer = Input(shape=(28, 28, 1))
x = Conv2D(filters=20, kernel_size=(5, 5), padding='same', strides=(1, 1))(input_layer)
x = ELU()(x)
x = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = Dropout(rate=0.5)(x)
x = Conv2D(filters=50, kernel_size=(5, 5), padding='same', strides=(1, 1))(x)
x = ELU()(x)
x = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = Dropout(rate=0.5)(x)
x = Flatten()(x)
x = Dense(units=500)(x)
x = ELU()(x)
x = Dropout(rate=0.5)(x)
x = Dense(units=10)(x)
output = Softmax()(x)
model = Model(inputs=input_layer, outputs=output)
# compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
MODEL_SAVE_PATH = os.path.join('./model_states', 'kr_fashion_mnist.hdf5')
DO_TRAINING = False
DO_PREDICTIONS = False
def main():
# load & prepere the datasets for training
print('Loading & preparing data...')
train_dataset, val_dataset, test_dataset = load_fashion_data()
train_dataset = train_dataset.shuffle(buffer_size=BUFFER_SIZE).batch(BATCH_SIZE).prefetch(buffer_size=BUFFER_SIZE)
val_dataset = val_dataset.shuffle(buffer_size=BUFFER_SIZE).batch(BATCH_SIZE).prefetch(buffer_size=BUFFER_SIZE)
test_dataset = test_dataset.shuffle(buffer_size=BUFFER_SIZE).batch(BATCH_SIZE).prefetch(buffer_size=BUFFER_SIZE)
if DO_TRAINING:
print('Training model...')
# create the model
model = build_model()
print(model.summary())
# train the model
hist = model.fit(train_dataset, validation_data=val_dataset, epochs=EPOCHS)
kru.show_plots(hist.history, metric='accuracy', plot_title='Fashion MNIST model performance')
# evaluate model
print('Evaluating model performance...')
loss, acc = model.evaluate(train_dataset)
print(f' - Training data -> loss = {loss:.3f} - acc = {acc:.3f}')
loss, acc = model.evaluate(val_dataset)
print(f' - Cross-val data -> loss = {loss:.3f} - acc = {acc:.3f}')
loss, acc = model.evaluate(test_dataset)
print(f' - Testing data -> loss = {loss:.3f} - acc = {acc:.3f}')
# save model state
print(f"Saving model state to {MODEL_SAVE_PATH}")
model.save(MODEL_SAVE_PATH)
del model
if DO_PREDICTIONS:
# load model from saved state & evaluate performance
model = load_model(MODEL_SAVE_PATH)
print('Evaluating model performance...')
loss, acc = model.evaluate(train_dataset)
print(f' - Training data -> loss = {loss:.3f} - acc = {acc:.3f}')
loss, acc = model.evaluate(val_dataset)
print(f' - Cross-val data -> loss = {loss:.3f} - acc = {acc:.3f}')
loss, acc = model.evaluate(test_dataset)
print(f' - Testing data -> loss = {loss:.3f} - acc = {acc:.3f}')
if __name__ == '__main__':
main()
=======
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
print(f"X_train.shape: {X_train.shape} - y_train.shape: {y_train.shape} - "
f"X_test.shape: {X_test.shape} - y_test.shape: {y_test.shape}")
X_train = X_train / 255.0
X_test = X_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
print(model.summary())
hist = model.fit(X_train, y_train, validation_split=0.2, epochs=25, batch_size=32)
kru.show_plots(hist.history, metric='accuracy')
# evaluate performance
loss, acc = model.evaluate(X_train, y_train)
print(f"Training data -> loss: {loss:.3f} - acc: {acc:.3f}")
loss, acc = model.evaluate(X_test, y_test)
print(f"Testing data -> loss: {loss:.3f} - acc: {acc:.3f}")
# save model
kru.save_model(model, 'kr_fashion2')
del model
>>>>>>> bbc974c7328d49125b62578f259511f7286a8dfc
| 2.984375 | 3 |
02 - Estruturas de controle/ex044.py | epedropaulo/MyPython | 0 | 12785901 | <reponame>epedropaulo/MyPython
cores = ['\033[m', '\033[31m', '\033[34m']
print(f'{cores[1]}-=-' * 7)
print(f'{cores[2]}FORMAS DE PAGAMENTO.')
print(f'{cores[1]}-=-{cores[0]}' * 7)
print('')
preco = float(input('Quanto é o produto? R$'))
print('')
print(f'Digite [{cores[2]} 1 {cores[0]}] para {cores[2]}sim{cores[0]}. \n'
f'Digite [{cores[1]} 2 {cores[0]}] para {cores[1]}não{cores[0]}.')
parcela = int(input('Você vai parcelar? '))
print('')
if parcela == 2:
print(f'Digite [{cores[2]} 1 {cores[0]}] para {cores[2]}DINHEIRO / CHEQUE{cores[0]}.\n'
f'Digite [{cores[1]} 2 {cores[0]}] para {cores[1]}cartão{cores[0]}.')
modo = int(input('Dinheiro/Cheque ou cartão: '))
if modo == 1:
preco1 = preco * 0.9
else:
preco1 = preco * 0.95
print('')
print(f'O preço em parcela única será de: R${preco1 :.2f}.')
elif parcela == 1:
vezes = int(input('Quantas vezes vai parcelar? '))
if vezes == 2:
preco1 = preco / vezes
else:
preco1 = (preco * 1.2) / vezes
print('')
print(f'O preço em {vezes} parcelas, será de R${preco1 :.2f} por parcela, pagando R${preco * 1.2 :.2f}.')
else:
print('OPÇÃO INVÁLIDA!')
| 3.25 | 3 |
cmz/cms_core/urls_helpers.py | inmagik/cmz | 1 | 12785902 | <filename>cmz/cms_core/urls_helpers.py
from django.conf.urls import url, include
from .views import CmsView
def create_urls(pages):
out = []
empty_urls = []
for page_name in pages:
page = pages[page_name]
extra_modules = page.get('extra_modules', [])
if 'url' in page and page['url']:
comp_url = r'^%s/$' % page['url']
else:
comp_url = r'^$'
if 'view' not in page:
#standard cms view
u = url(comp_url, CmsView.as_view(
page_name=page_name,
extra_modules=extra_modules,
template=page.get('template', None)
), name=page_name
)
else:
view = page['view']
view_params = page.get("view_parms", {})
u = url(comp_url, view, view_parms, name=page_name)
if page['url']:
out.append(u)
else:
empty_urls.append(u)
if len(empty_urls) > 2:
raise ValueError("CMZ ERROR: your pages.py has more than one empty url")
#trick for allowing ''
out.extend(empty_urls)
return out
| 2.53125 | 3 |
Application/errors_module/errors.py | GraphicalDot/datapod-backend-layer | 0 | 12785903 | <gh_stars>0
from loguru import logger
from sanic.response import json
from sanic import Blueprint
from sanic.exceptions import SanicException
ERRORS_BP = Blueprint('errors')
DEFAULT_MSGS = {
400: 'Bad Request',
401: 'Unauthorized',
403: 'Forbidden',
404: 'Not Found',
501: 'Not Implemented',
503: 'Internal Error'
}
def add_status_code(code):
def class_decorator(cls):
cls.status_code = code
return cls
return class_decorator
class ApiException(SanicException):
def __init__(self, message=None, status_code=None):
super().__init__(message)
logger.error(message)
if status_code is not None:
self.status_code = status_code
if message is None:
self.message = DEFAULT_MSGS[self.status_code]
else:
self.message = message
class DuplicateEntryError(Exception):
def __init__(self, unique_key, table_name):
self.msg = f"Duplicate key present --{unique_key}-- in table --{table_name}--"
def __str__(self):
return repr(self.msg)
##Errors related to Account creation
##---------------------ACCOUNT ERRORS --------------------------------##
@add_status_code(400)
class APIBadRequest(ApiException):
def __init__(self, message="Error happened in the api",
status_code=None):
super().__init__(message, status_code)
@add_status_code(400)
class IdentityAlreadyExists(ApiException):
def __init__(self, message="Code repos identity already exists",
status_code=None):
super().__init__(message)
@add_status_code(400)
class IdentityExistsNoPath(ApiException):
def __init__(self, message="Code repos identity exists but no path for private key exists",
status_code=None):
super().__init__(message)
@add_status_code(400)
class IdentityDoesntExists(ApiException):
def __init__(self, message="Code repos identity doesnt exists",
status_code=None):
super().__init__(message)
@add_status_code(400)
class PathDoesntExists(ApiException):
def __init__(self, path=None,
status_code=None):
self.message = f"{path} doesnt exists"
super().__init__(self.message)
@add_status_code(400)
class MnemonicRequiredError(ApiException):
def __init__(self, path=None,
status_code=None):
self.message = f"Mnemonic required from user, Encryption key is missing"
super().__init__(self.message)
@add_status_code(400)
class AccountError(ApiException):
def __init__(self, message="This Account already exists with us",
status_code=None):
super().__init__(message)
@add_status_code(400)
class ClaimAccountError(ApiException):
def __init__(self, message="The user already has claimed this account",
status_code=None):
super().__init__(message)
@add_status_code(400)
class AccountCreationError(ApiException):
def __init__(self, message="This user is not allowed to create accounts",
status_code=None):
super().__init__(message)
##---------------------ACCOUNT ERRORS END --------------------------------##
@ERRORS_BP.exception(ApiException)
def api_json_error(request, exception):
return json({
'message': exception.message,
'error': True,
'success': False,
'Data': None
}, status=exception.status_code)
@ERRORS_BP.exception(Exception)
def json_error(request, exception):
try:
code = exception.status_code
except AttributeError:
code = 500
logger.exception(exception)
return json({
'error': exception.args[0]
}, status=code)
| 2.328125 | 2 |
Modules/LeetCode/Task5.py | Itsuke/Learning-Python | 0 | 12785904 | '''
https://leetcode.com/discuss/interview-question/1683420/Facebook-or-Online-or-USA-or-E5
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
According to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two
nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to
be a descendant of itself).”
Constraints:
The number of nodes in the tree is in the range [2, 105].
-109 <= Node.val <= 109
All Node.val are unique. - Good to ask in feature
p != q - Good to ask in feature
p and q will exist in the tree. - Good to ask in feature
'''
#16:36
''' Solution proposal
_function(node, value, path)
path.append(value)
if (node.value == value):
return path
else:
function(node.left, p, q, node.value)
function(node.right, p, q, node.value)
function(root, p, q)
path_p = _function(root, p, [])
path_q = _function(root, q, [])
ancestor = [value for value in path_p if value in path_q]
'''
def _find_ancestor(node, value, path): #O(2*N)
path.append(node.value)
if node.value == value:
return True
elif node.left == None:
path.pop()
return
else:
if (_find_ancestor(node.left, value, path)):
return path
if (_find_ancestor(node.right, value, path)):
return path
path.pop()
def find_ancestor(root, p, q):
path_p = _find_ancestor(root, p, [])
path_q = _find_ancestor(root, q, [])
print(path_p, path_q)
ancestor = [value for value in path_p if value in path_q]
print(ancestor[-1])
'''Test cases
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
Output: 3
S1: 3, path_p = [3]
S2: 5, path_p = [3, 5]
S3: 3, path_q = [3]
S4: 3, path_q = [3, 5]
S5: 3, path_q = [3, 5, 6]
S6: 3, path_q = [3, 5, 2]
S7: 3, path_q = [3, 5, 2, 7]
S8: 3, path_q = [3, 5, 2, 4]
S9: 3, path_q = [3, 1]
s10: path_q U path_p = 3
'''
#16:59
class Tree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def add_leafs(self, left_value, right_value):
self.left = Tree(left_value)
self.right = Tree(right_value)
my_tree = Tree(3)
my_tree.add_leafs(5, 1)
my_tree.left.add_leafs(6, 2)
my_tree.right.add_leafs(0, 8)
my_tree.left.right.add_leafs(7, 4)
find_ancestor(my_tree, 5, 1)
find_ancestor(my_tree, 5, 4) | 3.546875 | 4 |
second_step/s7.py | relax-space/python-xxm | 0 | 12785905 | import pymongo
"""
mongo数据库的增删改查
1.首先本地启动mongodb: docker-compose -f second_step/example/mongo.yml up
2.运行以下命令
python.exe .\second_step\s7.py
参考:https://www.runoob.com/python3/python-mongodb.html
"""
class Model:
def __init__(self):
client = pymongo.MongoClient("mongodb://localhost:27017")
self.db = client["fruit"]
self.table =self.db["fruit"]
def add(self,fruitDict):
self.table.insert_one(fruitDict)
def update(self,d1,d2):
self.table.update_one(d1,d2)
def delete(self,fruitDict):
self.table.delete_one(fruitDict)
def find(self,fruitDict):
fruit = self.table.find(fruitDict)
return list(fruit)
if __name__ == "__main__":
m = Model()
fruitDict= {"name":"apple","price":100}
m.add(fruitDict)
b=m.find({"name":"apple"})
print(b)
m.update({"name":"apple"},{"$set":{"price":80}})
m.delete({"name":"apple"})
| 3.625 | 4 |
tests/test_individuals/test_mixed_individual.py | alessandrolenzi/yaga | 0 | 12785906 | from yaga_ga.evolutionary_algorithm.genes import IntGene, CharGene
from yaga_ga.evolutionary_algorithm.individuals import (
MixedIndividualStructure,
)
def test_initialization_with_tuple():
gene_1 = CharGene()
gene_2 = IntGene(lower_bound=1, upper_bound=1)
individual = MixedIndividualStructure((gene_1, gene_2))
assert len(individual) == 2
built = individual.build()
assert type(built[0]) == str
assert type(built[1]) == int
assert individual[0] == gene_1
assert individual[1] == gene_2
def test_progressive_initialization():
gene_1 = CharGene()
gene_2 = IntGene(lower_bound=1, upper_bound=1)
individual = MixedIndividualStructure(gene_1)
assert len(individual) == 1
built = individual.build()
assert len(built) == 1
assert type(built[0]) == str
individual_2 = individual.add_gene(gene_2)
assert len(individual_2) == 2
assert individual_2[0] == gene_1
assert individual_2[1] == gene_2
built2 = individual_2.build()
assert len(built2) == 2
assert type(built2[0]) == str
assert type(built2[1]) == int
| 2.640625 | 3 |
test/test_numeric.py | radovanhorvat/gonzales | 0 | 12785907 | <reponame>radovanhorvat/gonzales
import numpy as np
import gonzales.lib.physics as phy
def test_com():
# test center of mass calculation
# 1. case
r = np.array([[0., 0., 0.], [1., 0., 0.], [1., 1., 1.]])
m = np.array([1., 2., 3.])
com = phy.calc_com(r, m)
np.testing.assert_almost_equal(com, np.array([5/6., 0.5, 0.5]))
# 2. case - one of the masses dominates
r = np.array([[0., 0., 0.], [1., 0., 0.], [1., 1., 1.]])
m = np.array([1., 2., 1.0e15])
com = phy.calc_com(r, m)
np.testing.assert_almost_equal(com, np.array([1., 1., 1.]))
def test_ke():
# test kinetic energy calculation
# 1. trivial case
v = np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
m = np.array([1., 2., 3.])
ke = phy.calc_ke(v, m)
np.testing.assert_equal(ke, 0.)
# 2. other cases
v = np.array([[0., 0., 0.], [1., 0., 0.], [1., 1., 1.]])
m = np.array([1., 2., 3.])
ke = phy.calc_ke(v, m)
np.testing.assert_equal(ke, 11 / 2.)
def test_pe():
# test potential energy calculation
# 1. case
r = np.array([[0., 0., 0.], [1., 0., 0.], [1., 1., 1.]])
m = np.array([1., 2., 3.])
pe = phy.calc_pe(r, m, 1.0, 0.)
np.testing.assert_equal(pe, - (2. + 3 / np.sqrt(3) + 6 / np.sqrt(2)))
# 2. case - particles at huge distances
r = np.array([[0., 0., 0.], [1.0e15, 0., 0.], [1., 1.0e15, 1.]])
m = np.array([1., 2., 3.])
pe = phy.calc_pe(r, m, 1.0, 0.)
np.testing.assert_almost_equal(pe, 0.)
def test_te():
# test total energy calculation
r = np.array([[0., 0., 0.], [1., 0., 0.], [1., 1., 1.]])
v = np.array([[-1., 1., 0.], [1., -1., 0.], [1., 1., 1.]])
m = np.array([1., 2., 3.])
pe = phy.calc_pe(r, m, 1.0, 0.)
ke = phy.calc_ke(v, m)
te = phy.calc_te(r, v, m, 1.0, 0.)
np.testing.assert_equal(pe + ke, te)
def test_ang_mom():
# test angular momentum
r = np.array([[0., 0., 0.], [1., 0., 0.], [1., 1., 1.]])
v = np.array([[-1., 1., 0.], [1., -1., 0.], [1., -2., 1.]])
m = np.array([1., 2., 3.])
am = phy.calc_ang_mom(r, v, m)
np.testing.assert_equal(am, np.array([9., 0., -11.]))
| 2.390625 | 2 |
window/viewport.py | jeffa/window-viewport | 0 | 12785908 | <gh_stars>0
__version__='0.0.1'
class viewport:
def __init__( self, Wb=0, Wt=1, Wl=0, Wr=1, Vb=-1, Vt=1, Vl=-1, Vr=1 ):
self.Sx = ( Vr - Vl ) / ( Wr - Wl )
self.Sy = ( Vt - Vb ) / ( Wt - Wb );
self.Tx = ( Vl * Wr - Wl * Vr ) / ( Wr - Wl );
self.Ty = ( Vb * Wt - Wb * Vt ) / ( Wt - Wb );
def Dx( self, x ):
return self.Sx * x + self.Tx
def Dy( self, y ):
return self.Sy * y + self.Ty
| 2.390625 | 2 |
ava/preprocessing/__init__.py | mdmarti/autoencoded-vocal-analysis | 0 | 12785909 | <gh_stars>0
"""
AVA preprocessing module
Contains
--------
`ava.preprocessing.preprocess`
Preprocess syllable spectrograms.
`ava.preprocessing.utils`
Useful functions for preprocessing.
"""
| 0.90625 | 1 |
ascii_video.py | FabulousCodingFox/AsciiVideo | 0 | 12785910 | from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import cv2,time,os
from moviepy.editor import *
from tkinter import filedialog as fd
def im_to_ascii(im:Image,width:int=640,keepAlpha:bool=True,highContrastMode:bool=False,fontResolution:int=5):
ratio:float = width/im.size[0]
im:Image = im.resize((int(im.size[0]*ratio),int(im.size[1]*ratio)),Image.NEAREST).convert("LA")
if highContrastMode: ramp:str = "@. .:-=+*#%@"
else : ramp:str = " .:-=+*#%@"
c:list[str] = []
for h in range(im.size[1]):
row:list[str] = []
for w in range(im.size[0]):
col:tuple = im.getpixel((w,h))
if keepAlpha and col[1]<=127: row.append(" ")
else: row.append(ramp[int((col[0]/255)*len(ramp))-1])
c.append(" ".join(row))
w:int = im.size[0] * fontResolution * 5
h:int = im.size[1] * fontResolution * 6
font:ImageFont = ImageFont.truetype("monogram.ttf", 7 * fontResolution)
img = Image.new("RGB",(w,h),(0,0,0))
ImageDraw.Draw(img).text(
(0, 0),
"\n".join(c),
(255,255,255),
font=font
)
return img
def videoFileToAscii(path:str,skip:bool=False):
if not skip:
def extractFrames(path:str)->tuple[int,int,int]:
print("Extracting Frames...")
starttime = time.time()
vidcap = cv2.VideoCapture(path)
success,image = vidcap.read()
count = 0
length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
while success:
cv2.imwrite("frame/frame%d.png" % count, image)
success,image = vidcap.read()
count += 1
if time.time()-starttime>=2: print(int((count/length)*100),"%",sep="",end="\r");starttime=time.time()
return count,length,vidcap.get(cv2.CAP_PROP_FPS)
videoFrames, videoLength, videoFramerate = extractFrames(path)
videoTargetWidth = 120
videoTargetFramerate = 10
print("Converting Frames...")
for frame in range(0,videoFrames,int(videoFramerate/videoTargetFramerate)):
starttime = time.time()
with Image.open("frame/frame%d.png" % frame) as im:
im_to_ascii(im,videoTargetWidth,fontResolution=4).save("frame/frame%d.png" % frame)
if time.time()-starttime>=2: print(int((frame/videoFrames)*100),"%",sep="",end="\r");starttime=time.time()
else:
videoFrames = 359
videoFramerate = 30
videoTargetFramerate = 10
clip = ImageSequenceClip([f"frame/frame{frame}.png" for frame in range(0,videoFrames,int(videoFramerate/videoTargetFramerate))], fps = videoTargetFramerate)
clip.write_videofile(os.path.join(os.path.dirname(__file__),"output.mp4"))
if __name__ == "__main__":
path = fd.askopenfile(initialdir=os.path.dirname(__file__))
if True in [path.name.endswith(ext) for ext in [".mp4",".mkv",".avi",".mov"]]:
videoFileToAscii(path.name)
elif True in [path.name.endswith(ext) for ext in [".jpg",".jpeg",".png",".gif"]]:
with Image.open(path.name) as im:
i = im_to_ascii(im,width=516)
i.save("output.png")
i.show()
| 2.75 | 3 |
wrappers/python_2-7/runProducerCallbacksOWP.py | UpperLEFTY/worldpay-within-sdk | 0 | 12785911 | <filename>wrappers/python_2-7/runProducerCallbacksOWP.py
import WPWithinWrapperImpl
import WWTypes
import time
class TheEventListener():
def __init__(self):
print "Inialised custom event listener"
def beginServiceDelivery(self, serviceId, serviceDeliveryToken, unitsToSupply):
try:
print "OVERRIDE: event from core - onBeginServiceDelivery()"
print "ServiceID: {0}\n".format(serviceId)
print "UnitsToSupply: {0}\n".format(unitsToSupply)
print "SDT.Key: {0}\n".format(serviceDeliveryToken.key)
print "SDT.Expiry: {0}\n".format(serviceDeliveryToken.expiry)
print "SDT.Issued: {0}\n".format(serviceDeliveryToken.issued)
print "SDT.Signature: {0}\n".format(serviceDeliveryToken.signature)
print "SDT.RefundOnExpiry: {0}\n".format(serviceDeliveryToken.refundOnExpiry)
except Exception as e:
print "doBeginServiceDelivery failed: " + str(e)
def endServiceDelivery(self, serviceId, serviceDeliveryToken, unitsReceived):
try:
print "OVERRIDE: event from core - onEndServiceDelivery()"
print "ServiceID: {0}\n".format(serviceId)
print "UnitsReceived: {0}\n".format(unitsReceived)
print "SDT.Key: {0}\n".format(serviceDeliveryToken.key)
print "SDT.Expiry: {0}\n".format(serviceDeliveryToken.expiry)
print "SDT.Issued: {0}\n".format(serviceDeliveryToken.issued)
print "SDT.Signature: {0}\n".format(serviceDeliveryToken.signature)
print "SDT.RefundOnExpiry: {0}\n".format(serviceDeliveryToken.refundOnExpiry)
except Exception as e:
print "doEndServiceDelivery failed: " + str(e)
def run():
try:
print "WorldpayWithin Sample Producer (with callbacks)..."
global wpw
wpWithinEventListener = TheEventListener()
# add listeners to the events
# wpWithinEventListener.onBeginServiceDelivery += doBeginServiceDelivery
# wpWithinEventListener.onEndServiceDelivery += doEndServiceDelivery
wpw = WPWithinWrapperImpl.WPWithinWrapperImpl('127.0.0.1', 9055, True, wpWithinEventListener, 9095)
wpw.setup("Producer Example", "Example WorldpayWithin producer")
svc = WWTypes.WWService();
svc.setName("Car charger")
svc.setDescription("Can charge your hybrid / electric car")
svc.setId(1)
ccPrice = WWTypes.WWPrice()
ccPrice.setId(1)
ccPrice.setDescription("Kilowatt-hour")
ccPrice.setUnitDescription("One kilowatt-hour")
ccPrice.setUnitId(1)
ppu = WWTypes.WWPricePerUnit()
ppu.setAmount(25)
ppu.setCurrencyCode("GBP")
ccPrice.setPricePerUnit(ppu)
prices = {}
prices[ccPrice.getId()] = ccPrice
svc.setPrices(prices)
# [ CLIENT KEY, SERVICE KEY] : From online.worldpay.com
wpw.initProducer({"psp_name":"worldpayonlinepayments","hte_public_key":"<KEY>", "hte_private_key": "T_S_3bdadc9c-54e0-4587-8d91-29813060fecd", "api_endpoint":"https://api.worldpay.com/v1", "merchant_client_key": "<KEY>", "merchant_service_key": "T_S_3bdadc9c-54e0-4587-8d91-29813060fecd"})
wpw.addService(svc)
broadcastDuration = 20000
durationSeconds = broadcastDuration / 1000
wpw.startServiceBroadcast(broadcastDuration) #20000
repeat = 0
while repeat < durationSeconds:
print "Producer Waiting " + str(durationSeconds - repeat) + " seconds to go..."
time.sleep(1)
repeat = repeat + 1
print "Stopped broadcasting, RPC still running"
repeat2 = 0
while repeat2 < 99999999999:
print "Producer keeping alive (to receive callbacks...)"
time.sleep(1)
repeat2 = repeat2 + 1
except WWTypes.WPWithinGeneralException as e:
print e
run()
| 2.3125 | 2 |
UI_flask_javascript_soundcloud_app/app.py | AdiletGaparov/sentiment-based-song-recommender | 0 | 12785912 | <reponame>AdiletGaparov/sentiment-based-song-recommender
from flask import Flask, request, render_template
import pandas as pd
import numpy as np
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
subject = None
level = None
selected_choice = ""
songs = []
lyrics = pd.read_csv('lyrics_sentiment.csv')
subjectivity_dict = {'subjective-20': 'Python, R, SQL',
'subjective-40': 'Hadoop, Spark, Streaming',
'subjective-60': 'Machine Learning',
'subjective-80': 'Data Visualization',
'subjective-100': 'Ethics, Agile, Design Thinking'}
polarity_dict = {'very-low': 'I am ready!',
'low': 'Need to recap few concepts',
'average': 'I still have few more days',
'high': 'Proficiency is a good grade',
'very-high': 'God bless Gaussian curve at IE'}
if request.method == "POST":
subject = request.form.get('subject-choice')
level = request.form.get('despair-level')
genre = request.form.get('genre')
selected_choice = f'{subjectivity_dict.get(subject)} / {polarity_dict.get(level)} / {genre}'
subj_genre_filter = get_filter(lyrics, subject, genre)
polarity_scores = lyrics.loc[subj_genre_filter, 'polarity_avg'].unique()
t_min, t_max = get_polarity_threshold(polarity_scores, level)
lyrics_filtered = lyrics.loc[subj_genre_filter & (lyrics['polarity_avg'] <= t_max) & (lyrics['polarity_avg'] >= t_min)].sort_values('polarity_avg')
song_names = lyrics_filtered.song
artist_names = lyrics_filtered.artist
songs = [song + " by " + artist for song, artist in zip(song_names, artist_names)]
return render_template('index.html', songs=songs[:20], selected_choice=selected_choice)
def get_polarity_threshold(polarity_scores, level):
"""Get threshold for polarities based on percentile"""
if level == 'very-low':
t_max = np.percentile(polarity_scores, 20)
t_min = np.percentile(polarity_scores, 0)
elif level == 'low':
t_max = np.percentile(polarity_scores, 40)
t_min = np.percentile(polarity_scores, 20)
elif level == 'average':
t_max = np.percentile(polarity_scores, 60)
t_min = np.percentile(polarity_scores, 40)
elif level == 'high':
t_max = np.percentile(polarity_scores, 80)
t_min = np.percentile(polarity_scores, 60)
elif level == 'very-high':
t_max = np.percentile(polarity_scores, 100)
t_min = np.percentile(polarity_scores, 80)
else:
t_max = np.percentile(polarity_scores, 100)
t_min = np.percentile(polarity_scores, 0)
return t_min, t_max
def get_filter(df, subject, genre):
"""Get boolean array that filters based on subjectivity and genre level"""
if genre == '':
genre_list = df.genre.unique()
else:
genre_list = [genre]
if subject == 'subjective-20':
filter_array = (df['subjectivity_avg'] <= 0.2) & (df['genre'].isin(genre_list))
elif subject == 'subjective-40':
filter_array = (df['subjectivity_avg'] > 0.2) & (df['subjectivity_avg'] <= 0.4) & (df['genre'].isin(genre_list))
elif subject == 'subjective-60':
filter_array = (df['subjectivity_avg'] > 0.4) & (df['subjectivity_avg'] <= 0.6) & (df['genre'].isin(genre_list))
elif subject == 'subjective-80':
filter_array = (df['subjectivity_avg'] > 0.6) & (df['subjectivity_avg'] <= 0.8) & (df['genre'].isin(genre_list))
elif subject == 'subjective-100':
filter_array = (df['subjectivity_avg'] > 0.6) & (df['subjectivity_avg'] <= 0.8) & (df['genre'].isin(genre_list))
else:
filter_array = df['genre'].isin(genre_list)
return filter_array
if __name__ == '__main__':
app.run()
| 2.984375 | 3 |
plugins/modules/oci_database_external_database_connector_facts.py | LaudateCorpus1/oci-ansible-collection | 0 | 12785913 | <reponame>LaudateCorpus1/oci-ansible-collection<filename>plugins/modules/oci_database_external_database_connector_facts.py
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_external_database_connector_facts
short_description: Fetches details about one or multiple ExternalDatabaseConnector resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple ExternalDatabaseConnector resources in Oracle Cloud Infrastructure
- Gets a list of the external database connectors in the specified compartment.
- If I(external_database_connector_id) is specified, the details of a single ExternalDatabaseConnector will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
external_database_connector_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the
external database connector resource (`ExternalDatabaseConnectorId`).
- Required to get a specific external_database_connector.
type: str
aliases: ["id"]
compartment_id:
description:
- The compartment L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to list multiple external_database_connectors.
type: str
external_database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the external database whose connectors will be listed.
- Required to list multiple external_database_connectors.
type: str
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`).
Default order for TIMECREATED is descending.
Default order for DISPLAYNAME is ascending.
The DISPLAYNAME sort order is case sensitive.
type: str
choices:
- "DISPLAYNAME"
- "TIMECREATED"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`).
type: str
choices:
- "ASC"
- "DESC"
lifecycle_state:
description:
- A filter to return only resources that match the specified lifecycle state.
type: str
choices:
- "PROVISIONING"
- "AVAILABLE"
- "UPDATING"
- "TERMINATING"
- "TERMINATED"
- "FAILED"
display_name:
description:
- A filter to return only resources that match the entire display name given. The match is not case sensitive.
type: str
aliases: ["name"]
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific external_database_connector
oci_database_external_database_connector_facts:
# required
external_database_connector_id: "ocid1.externaldatabaseconnector.oc1..xxxxxxEXAMPLExxxxxx"
- name: List external_database_connectors
oci_database_external_database_connector_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
external_database_id: "ocid1.externaldatabase.oc1..xxxxxxEXAMPLExxxxxx"
# optional
sort_by: DISPLAYNAME
sort_order: ASC
lifecycle_state: PROVISIONING
display_name: display_name_example
"""
RETURN = """
external_database_connectors:
description:
- List of ExternalDatabaseConnector resources
returned: on success
type: complex
contains:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
display_name:
description:
- The user-friendly name for the
L(external database connector,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
The name does not have to be unique.
returned: on success
type: str
sample: display_name_example
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the
L(external database connector,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The current lifecycle state of the external database connector resource.
returned: on success
type: str
sample: PROVISIONING
lifecycle_details:
description:
- Additional information about the current lifecycle state.
returned: on success
type: str
sample: lifecycle_details_example
time_created:
description:
- The date and time the external connector was created.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
connector_type:
description:
- The type of connector used by the external database resource.
returned: on success
type: str
sample: MACS
external_database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the external database resource.
returned: on success
type: str
sample: "ocid1.externaldatabase.oc1..xxxxxxEXAMPLExxxxxx"
connection_status:
description:
- The status of connectivity to the external database.
returned: on success
type: str
sample: connection_status_example
time_connection_status_last_updated:
description:
- The date and time the connectionStatus of this external connector was last updated.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
connection_string:
description:
- ""
returned: on success
type: complex
contains:
hostname:
description:
- The host name of the database.
returned: on success
type: str
sample: hostname_example
port:
description:
- The port used to connect to the database.
returned: on success
type: int
sample: 56
service:
description:
- The name of the service alias used to connect to the database.
returned: on success
type: str
sample: service_example
protocol:
description:
- The protocol used to connect to the database.
returned: on success
type: str
sample: TCP
connection_credentials:
description:
- ""
returned: on success
type: complex
contains:
credential_type:
description:
- The type of credential used to connect to the database.
returned: on success
type: str
sample: NAME_REFERENCE
credential_name:
description:
- "The name of the credential information that used to connect to the database. The name should be in \\"x.y\\" format, where
the length of \\"x\\" has a maximum of 64 characters, and length of \\"y\\" has a maximum of 199 characters.
The name strings can contain letters, numbers and the underscore character only. Other characters are not valid, except for
the \\".\\" character that separates the \\"x\\" and \\"y\\" portions of the name.
*IMPORTANT* - The name must be unique within the OCI region the credential is being created in. If you specify a name
that duplicates the name of another credential within the same OCI region, you may overwrite or corrupt the credential that is already
using the name."
- "For example: inventorydb.abc112233445566778899"
returned: on success
type: str
sample: credential_name_example
username:
description:
- The username that will be used to connect to the database.
returned: on success
type: str
sample: username_example
password:
description:
- The password that will be used to connect to the database.
returned: on success
type: str
sample: example-password
role:
description:
- The role of the user that will be connecting to the database.
returned: on success
type: str
sample: SYSDBA
connector_agent_id:
description:
- The ID of the agent used for the
L(external database connector,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
returned: on success
type: str
sample: "ocid1.connectoragent.oc1..xxxxxxEXAMPLExxxxxx"
sample: [{
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"display_name": "display_name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "PROVISIONING",
"lifecycle_details": "lifecycle_details_example",
"time_created": "2013-10-20T19:20:30+01:00",
"connector_type": "MACS",
"external_database_id": "ocid1.externaldatabase.oc1..xxxxxxEXAMPLExxxxxx",
"connection_status": "connection_status_example",
"time_connection_status_last_updated": "2013-10-20T19:20:30+01:00",
"connection_string": {
"hostname": "hostname_example",
"port": 56,
"service": "service_example",
"protocol": "TCP"
},
"connection_credentials": {
"credential_type": "NAME_REFERENCE",
"credential_name": "credential_name_example",
"username": "username_example",
"password": "<PASSWORD>",
"role": "SYSDBA"
},
"connector_agent_id": "ocid1.connectoragent.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.database import DatabaseClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ExternalDatabaseConnectorFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"external_database_connector_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
"external_database_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_external_database_connector,
external_database_connector_id=self.module.params.get(
"external_database_connector_id"
),
)
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
"lifecycle_state",
"display_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_external_database_connectors,
compartment_id=self.module.params.get("compartment_id"),
external_database_id=self.module.params.get("external_database_id"),
**optional_kwargs
)
ExternalDatabaseConnectorFactsHelperCustom = get_custom_class(
"ExternalDatabaseConnectorFactsHelperCustom"
)
class ResourceFactsHelper(
ExternalDatabaseConnectorFactsHelperCustom, ExternalDatabaseConnectorFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
external_database_connector_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
external_database_id=dict(type="str"),
sort_by=dict(type="str", choices=["DISPLAYNAME", "TIMECREATED"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
lifecycle_state=dict(
type="str",
choices=[
"PROVISIONING",
"AVAILABLE",
"UPDATING",
"TERMINATING",
"TERMINATED",
"FAILED",
],
),
display_name=dict(aliases=["name"], type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="external_database_connector",
service_client_class=DatabaseClient,
namespace="database",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(external_database_connectors=result)
if __name__ == "__main__":
main()
| 1.476563 | 1 |
source/lib/conditional_resource.py | Snehitha12345/mlops-workload-orchestrator | 20 | 12785914 | <gh_stars>10-100
# #####################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
# #####################################################################################################################
import jsii
from aws_cdk.core import CfnCondition, CfnResource, IAspect, IConstruct
# This code enables `apply_aspect()` to apply conditions to a resource.
# This way we can provision some resources if a condition is true.
# For example, if PROVISIONTYPE parameter is 'Git' then we provision CodePipeline
# with it's source stage being CodeCommit or GitHub
# https://docs.aws.amazon.com/cdk/latest/guide/aspects.html
@jsii.implements(IAspect)
class ConditionalResources:
def __init__(self, condition: CfnCondition):
self.condition = condition
def visit(self, node: IConstruct):
child = node.node.default_child # type: CfnResource
if child:
child.cfn_options.condition = self.condition
| 1.539063 | 2 |
JDI/web/selenium/elements/api_interact/find_element_by.py | jdi-testing/jdi-python | 5 | 12785915 | <filename>JDI/web/selenium/elements/api_interact/find_element_by.py
from selenium.webdriver.common.by import By as Selenium_By
class By:
@staticmethod
def id(by_id):
return Selenium_By.ID, by_id
@staticmethod
def css(by_css):
return Selenium_By.CSS_SELECTOR, by_css
@staticmethod
def xpath(by_xpath):
return Selenium_By.XPATH, by_xpath
@staticmethod
def link_text(link_text):
return Selenium_By.LINK_TEXT, link_text
| 2.328125 | 2 |
srae.py | MHHukiewitz/SRAE_pytorch | 0 | 12785916 | <gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Callable
# TODO: Merging and resetting features
# TODO: Plots of layer activities
# TODO: Stack RAEs for deep network
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"Using {device} to train networks.")
class RAEClassifier(nn.Module):
__constants__ = ['input_size', 'hidden_size', 'output_size']
def __init__(self, input_size, hidden_size, output_size,
reconstruction_activation: Callable[[torch.Tensor], torch.Tensor] = nn.ReLU(),
hidden_activation: Callable[[torch.Tensor], torch.Tensor] = nn.ReLU(),
output_activation: Callable[[torch.Tensor], torch.Tensor] = nn.Softmax(),
reconstruction_loss: nn.Module = nn.MSELoss()):
super(RAEClassifier, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.input = torch.zeros(input_size)
self.output_activation = output_activation
# also possible: CosineEmbeddingLoss
self.reconstruction_loss = reconstruction_loss
self.autoencoder = ReactiveAutoencoder(input_size, hidden_size, self.reconstruction_loss,
hidden_activation,
reconstruction_activation)
self.classifier = nn.Linear(hidden_size, output_size)
self.classifier.weight.register_hook(self.backward_classifier_hook)
def forward(self, input):
"""The forward pass calculates only the h if no error_signal is provided."""
self.input = input
encoding, reconstruction = self.autoencoder(input) # Forward the Autoencoder and detach from the graph
output = self.classifier(encoding) # Forward the detached h through the Classifier
return self.output_activation(output)
def backward_classifier_hook(self, grad):
"""Triggers autoencoder sparsification with classifier, after backward on this classifier."""
with torch.enable_grad():
encoding, reconstruction = self.autoencoder(self.input, torch.sum(grad, 0))
rec_loss = self.reconstruction_loss(reconstruction, self.input)
rec_loss.backward()
class ReactiveAutoencoder(nn.Module):
"""The RAE a.k.a. SRAE a.k.a. the autoencoder with the strict supervised sparsity matrix.
This module provides a framework for training an encoder to maximize information throughput,
while converging on an error_signal. Works currently only for single samples/online learning.
Planned are batch mode as well as multiple layers."""
__constants__ = ['input_size', 'output_size']
def __init__(self, input_size, output_size, reconstruction_loss: nn.Module,
hidden_activation: Callable[[torch.Tensor], torch.Tensor] = None,
reconstruction_activation: Callable[[torch.Tensor], torch.Tensor] = None,
bias=True, reconstruction_bias: str = 'zeros', activation_scaling=True):
super(ReactiveAutoencoder, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_activation = hidden_activation # TODO: what happens if both activations differ?
self.activation_scaling = activation_scaling
if activation_scaling:
self.scaling = None # TODO: Really necessary?
self.encoder = nn.Linear(input_size, output_size, bias=bias)
self.h = torch.zeros(output_size, requires_grad=True)
self.predict = torch.zeros(output_size)
self.reconstruction_activation = reconstruction_activation
self.reconstruction_loss = reconstruction_loss
self.reconstructed_input = torch.zeros(input_size, requires_grad=True)
self.reconstruction_bias_type = reconstruction_bias
self.reconstruction_bias = self.fresh_reconstruction_bias(self.reconstruction_bias_type)
def fresh_reconstruction_bias(self, type: str):
if type == 'none':
return None
elif type == 'zeros':
return torch.zeros(self.input_size, requires_grad=True).to(self.h.device)
elif type == 'ones':
return torch.ones(self.input_size, requires_grad=True).to(self.h.device),
elif type == 'rand':
return torch.rand(self.input_size, requires_grad=True).to(self.h.device),
elif type == 'randn':
return torch.randn(self.input_size, requires_grad=True).to(self.h.device),
def forward(self, x: torch.Tensor, error_signal: torch.Tensor = None):
"""The forward pass calculates only the h if no error_signal is provided.
If an error_signal is provided, then assume same x and use the last h for sparsity and
reconstruction calculation.
"""
# first pass forward
if error_signal is None:
with torch.no_grad():
self.h = self.encoder(x)
if self.hidden_activation is not None:
# save for later
self.h = self.hidden_activation(self.h)
return self.h, None
# reconstruction
self.h.requires_grad_()
self.reconstructed_input = F.linear(self.h, self.encoder.weight.t(), self.reconstruction_bias)
if self.reconstruction_activation is not None:
self.reconstructed_input = self.reconstruction_activation(self.reconstructed_input)
# calculate preliminary loss
rec_loss = self.reconstruction_loss(self.reconstructed_input, x)
rec_loss.backward() # compute gradients for self.encoder.weight & self.bias
# compute strict supervised sparsity mask
# predict output after update
self.predict = F.linear(x, self.encoder.weight + self.encoder.weight.grad,
self.encoder.bias)
delta = self.h - self.predict
if self.activation_scaling:
# adjust own gradient scaling to error_signal magnitude -> compare maxima
self.scaling = (torch.max(torch.abs(error_signal)).item() / torch.max(delta).item())
adjusted_delta = delta * self.scaling
# noinspection PyTypeChecker
mask = torch.where((error_signal - adjusted_delta).abs() < error_signal.abs(), 1, 0)
else:
# noinspection PyTypeChecker
mask = torch.where((error_signal - delta).abs() < error_signal.abs(), 1, 0)
# reset gradients from preliminary backward calculation
self.encoder.zero_grad()
masked_encoding = self.h * mask
# reconstruct using sparsified h
self.reconstructed_input = F.linear(masked_encoding, self.encoder.weight.t(), self.reconstruction_bias)
return self.h, self.reconstructed_input
def backward(self):
super(ReactiveAutoencoder, self).backward()
if self.activation_scaling:
self.encoder.weight.grad *= self.scaling
self.encoder.bias.grad *= self.scaling
self.reconstruction_bias.grad += self.scaling
def reset_parameters(self) -> None:
super(ReactiveAutoencoder, self).reset_parameters()
self.reconstruction_bias = self.fresh_reconstruction_bias(self.reconstruction_bias_type)
| 2.609375 | 3 |
spectrometer_functions.py | jhoyland/spectrum-workshop | 0 | 12785917 | # Find the slit. This function finds the location of the slit in the photograph of the spectrum
# The function takes a single line of the data and scans it to find the maximum value.
# If it finds a block of saturated pixels it finds the middle pixel to be the slit.
# The function returns the column number of the slit.
import math
def find_slit(data):
mx = 0
mxc = 0
startslit = 0
endslit = 0
for c,d in enumerate(data):
if d > mx:
mx = d
mxc = c
if startslit == 0 and d >= 255:
startslit = c
if endslit == 0 and startslit > 0 and d < 254:
endslit = c
break
# We found a slit of saturated values
if startslit > 0 and endslit > startslit:
return math.ceil(0.5 * (endslit - startslit) + startslit)
# Or just return the location of the biggest value found
else:
return mxc
# Reads in the data along with the grating pitch (g in lines/mm) and resolution in radians per pixel
def get_spectrum(data,g,res):
s = find_slit(data)
d2 = data[s::-1]
d = 0.001 / g # convert lines/mm into grating spacing in m
wvl = [ 1e9* d * math.sin(i * res) for i in range(len(d2))]
return (wvl,d2) | 3.46875 | 3 |
ramscube/ramscube.py | freemansw1/ramscube | 0 | 12785918 | import warnings
warnings.filterwarnings('ignore', category=UserWarning, append=True)
RAMS_Units=dict()
# winds
RAMS_Units['UC']='m s-1'
RAMS_Units['VC']='m s-1'
RAMS_Units['WC']='m s-1'
# potential temperature
RAMS_Units['THETA']='K'
RAMS_Units['PI']='J kg-1 K-1'
RAMS_Units['DN0']='kg m-3'
# water vapour mixing ratio:
RAMS_Units['RV']='kg kg-1'
# hydrometeor mass mixing ratios:
mass_mixing_ratios=['RCP','RDP','RRP','RPP','RSP','RAP','RGP','RHP']
for variable in mass_mixing_ratios:
RAMS_Units[variable]='kg kg-1'
# hydrometeor number mixing ratios:
mass_mixing_ratios=['CCP','CDP','CRP','CPP','CSP','CAP','CGP','CHP']
for variable in mass_mixing_ratios:
RAMS_Units[variable]='kg-1'
#hydrometeor precipitation rates:
precipitation_rates=['PCPRR','PCPRD','PCPRS','PCPRH','PCPRP','PCPRA','PCPRG']
for variable in precipitation_rates:
RAMS_Units[variable]='kg m-2'
# hydrometeor precipitation accumulated:
precipitation_accumulated=['ACCPR','ACCPD','ACCPS','ACCPH','ACCPP','ACCPA','ACCPG']
for variable in precipitation_accumulated:
RAMS_Units[variable]='kg m-2 s-1'
# radiation:
RAMS_Units['LWUP']='W m-2'
RAMS_Units['LWDN']='W m-2'
RAMS_Units['SWUP']='W m-2'
RAMS_Units['SWDN']='W m-2'
# individual microphysics processes accumulated
RAMS_processes_mass=[
'NUCCLDRT',
'NUCICERT',
'INUCHOMRT',
'INUCCONTR',
'INUCIFNRT',
'INUCHAZRT',
'VAPCLDT',
'VAPRAINT',
'VAPPRIST',
'VAPSNOWT',
'VAPAGGRT',
'VAPGRAUT',
'VAPHAILT',
'VAPDRIZT',
'MELTSNOWT',
'MELTAGGRT',
'MELTGRAUT',
'MELTHAILT',
'RIMECLDSNOWT',
'RIMECLDAGGRT',
'RIMECLDGRAUT',
'RIMECLDHAILT',
'RAIN2PRT',
'RAIN2SNT',
'RAIN2AGT',
'RAIN2GRT',
'RAIN2HAT',
'AGGRSELFPRIST',
'AGGRSELFSNOWT',
'AGGRPRISSNOWT'
]
for variable in RAMS_processes_mass:
RAMS_Units[variable]='kg kg-1'
# grouped microphysics processes accumulated:
RAMS_processes_mass_grouped=[
'VAPLIQT',
'VAPICET',
'MELTICET',
'CLD2RAINT',
'RIMECLDT',
'RAIN2ICET',
'ICE2RAINT',
'AGGREGATET'
]
for variable in RAMS_processes_mass_grouped:
RAMS_Units[variable]='kg kg-1'
# grouped microphysics processes instantaneous:
RAMS_processes_mass_grouped_instantaneous=[
'VAPLIQ',
'VAPICE',
'MELTICE',
'CLD2RAIN',
'RIMECLD',
'RAIN2ICE',
'ICE2RAIN',
'NUCCLDR',
'NUCICER'
]
for variable in RAMS_processes_mass_grouped_instantaneous:
RAMS_Units[variable]='kg kg-1 s-1'
RAMS_standard_name=dict()
variable_list_derive=[
'air_temperature',
'air_pressure',
'temperature',
'air_density',
'OLR',
'LWC',
'IWC',
'LWP',
'IWP',
'IWV',
'airmass',
'airmass_path',
'surface_precipitation',
'surface_precipitation_average',
'surface_precipitation_accumulated',
'surface_precipitation_instantaneous',
'LWup_TOA',
'LWup_sfc',
'LWdn_TOA',
'LWdn_sfc',
'SWup_TOA',
'SWup_sfc',
'SWdn_TOA',
'SWdn_sfc'
]
def variable_list(filenames):
from iris import load
cubelist=load(filenames[0])
variable_list = [cube.name() for cube in cubelist]
return variable_list
def load(filenames,variable,mode='auto',**kwargs):
if variable in variable_list_derive:
variable_cube=deriveramscube(filenames,variable,**kwargs)
else:
variable_cube=loadramscube(filenames,variable,**kwargs)
# if mode=='auto':
# variable_list_file=variable_list(filenames)
# if variable in variable_list_file:
# variable_cube=loadramscube(filenames,variable,**kwargs)
# elif variable in variable_list_derive:
# variable_cube=deriveramscube(filenames,variable,**kwargs)
# elif variable in variable_dict_pseudonym.keys():
# variable_load=variable_dict_pseudonym[variable]
# variable_cube=loadramscube(filenames,variable_load,**kwargs)
# else:
# raise SystemExit('variable not found')
# elif mode=='file':
# variable_list_file=variable_list(filenames)
# if variable in variable_list_file:
# variable_cube=loadramscube(filenames,variable,**kwargs)
# elif mode=='derive':
# variable_cube=deriveramscube(filenames,variable,**kwargs)
# elif mode=='pseudonym':
# variable_load=variable_dict_pseudonym[variable]
# variable_cube=loadramscube(filenames,variable_load,**kwargs)
# else:
# print("mode=",mode)
# raise SystemExit('unknown mode')
return variable_cube
def loadramscube(filenames,variable,**kwargs):
if type(filenames) is list:
variable_cube=loadramscube_mult(filenames,variable,**kwargs)
elif type(filenames) is str:
variable_cube=loadramscube_single(filenames,variable,**kwargs)
else:
print("filenames=",filenames)
raise SystemExit('Type of input unknown: Must be str of list')
return variable_cube
def loadramscube_single(filenames,variable,constraint=None,add_coordinates=None):
from iris import load_cube
variable_cube=load_cube(filenames,variable)
variable_cube.units=RAMS_Units[variable]
variable_cube=addcoordinates(filenames, variable,variable_cube,add_coordinates=add_coordinates)
return variable_cube
def loadramscube_mult(filenames,variable,constraint=None,add_coordinates=None):
from iris.cube import CubeList
cube_list=[]
for i in range(len(filenames)):
cube_list.append(loadramscube_single(filenames[i],variable,add_coordinates=add_coordinates) )
for member in cube_list:
member.attributes={}
variable_cubes=CubeList(cube_list)
variable_cube=variable_cubes.merge_cube()
variable_cube=variable_cube.extract(constraint)
return variable_cube
def readramsheader(filename):
from numpy import array
searchfile = open(filename, "r")
coord_dict=dict()
variable_dict=dict()
coord_part=False
i_variable=0
n_variable=0
for i,line in enumerate(searchfile):
if (i==0):
num_variables=int(line[:-1])
if (i>0 and i<=num_variables):
line_split=line[:-1].split()
variable_dict[line_split[0]]=int(line_split[2])
if ('__') in line:
coord_part=True
i_variable=i
variable_name=line[2:-1]
variable_list=[]
if coord_part:
if (i==i_variable+1):
n_variable=int(line[:-1])
if n_variable>0:
if (i>=i_variable+2 and i<=i_variable+1+n_variable):
try:
value_out=array(float(line[:-1]))
except:
value_out=line[:-1]
variable_list.append(value_out)
if (i==i_variable+1+n_variable):
coord_dict[variable_name]=array(variable_list)
coord_part=False
# else:
# coord_part=False
return variable_dict, coord_dict
def addcoordinates(filename, variable,variable_cube,**kwargs):
filename_header=filename[:-5]+'head.txt'
domain=filename[-4]
variable_dict, coord_dict=readramsheader(filename_header)
variable_cube=add_dim_coordinates(filename, variable,variable_cube,variable_dict, coord_dict,domain,**kwargs)
variable_cube=add_aux_coordinates(filename, variable,variable_cube,variable_dict, coord_dict,domain,**kwargs)
return variable_cube
def make_time_coord(coord_dict):
from datetime import datetime,timedelta
from iris import coords
timestr=str(int(coord_dict['iyear1'][0]))+str(int(coord_dict['imonth1'][0])).zfill(2)+str(int(coord_dict['idate1'][0])).zfill(2)+str(int(coord_dict['itime1'][0])).zfill(4)
timeobj = datetime.strptime(timestr,"%Y%m%d%H%M")+timedelta(seconds=1)*coord_dict['time'][0]
if timeobj<datetime(100,1,1):
base_date=datetime(1,1,1)
else:
base_date=datetime(1970,1,1)
time_units='days since '+ base_date.strftime('%Y-%m-%d')
time_days=(timeobj - base_date).total_seconds() / timedelta(days=1).total_seconds()
time_coord=coords.DimCoord(time_days, standard_name='time', long_name='time', var_name='time', units=time_units, bounds=None, attributes=None, coord_system=None, circular=False)
return time_coord
def make_model_level_number_coordinate(n_level):
from iris import coords
from numpy import arange
MODEL_LEVEL_NUMBER=arange(0,n_level)
model_level_number=coords.AuxCoord(MODEL_LEVEL_NUMBER, standard_name='model_level_number', units='1')
return model_level_number
def add_dim_coordinates(filename, variable,variable_cube,variable_dict, coord_dict,domain,add_coordinates=None):
from iris import coords
import numpy as np
# from iris import coord_systems
# coord_system=coord_systems.LambertConformal(central_lat=MOAD_CEN_LAT, central_lon=CEN_LON, false_easting=0.0, false_northing=0.0, secant_latitudes=(TRUELAT1, TRUELAT2))
coord_system=None
if (variable_dict[variable]==3):
time_coord=make_time_coord(coord_dict)
variable_cube.add_aux_coord(time_coord)
z_coord=coords.DimCoord(coord_dict['ztn01'], standard_name='geopotential_height', long_name='z', var_name='z', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(z_coord,0)
model_level_number_coord=make_model_level_number_coordinate(len(z_coord.points))
variable_cube.add_aux_coord(model_level_number_coord,0)
x_coord=coords.DimCoord(np.arange(len(coord_dict['xtn0'+domain])), long_name='x', units='1', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(x_coord,2)
y_coord=coords.DimCoord(np.arange(len(coord_dict['ytn0'+domain])), long_name='y', units='1', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(y_coord,1)
projection_x_coord=coords.DimCoord(coord_dict['xtn0'+domain], standard_name='projection_x_coordinate', long_name='x', var_name='x', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_aux_coord(projection_x_coord,(2))
projection_y_coord=coords.DimCoord(coord_dict['ytn0'+domain], standard_name='projection_y_coordinate', long_name='y', var_name='y', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_aux_coord(projection_y_coord,(1))
elif (variable_dict[variable]==2):
x_coord=coords.DimCoord(np.arange(len(coord_dict['xtn0'+domain])), long_name='x', units='1', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(x_coord,1)
y_coord=coords.DimCoord(np.arange(len(coord_dict['ytn0'+domain])), long_name='y', units='1', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(y_coord,0)
projection_x_coord=coords.DimCoord(coord_dict['xtn0'+domain], standard_name='projection_x_coordinate', long_name='x', var_name='x', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_aux_coord(projection_x_coord,(1))
projection_y_coord=coords.DimCoord(coord_dict['ytn0'+domain], standard_name='projection_y_coordinate', long_name='y', var_name='y', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_aux_coord(projection_y_coord,(0))
time_coord=make_time_coord(coord_dict)
variable_cube.add_aux_coord(time_coord)
return variable_cube
def add_aux_coordinates(filename,variable,variable_cube,variable_dict, coord_dict,domain,**kwargs):
from iris import load_cube,coords
coord_system=None
latitude=load_cube(filename,'GLAT').core_data()
longitude=load_cube(filename,'GLON').core_data()
lat_coord=coords.AuxCoord(latitude, standard_name='latitude', long_name='latitude', var_name='latitude', units='degrees', bounds=None, attributes=None, coord_system=coord_system)
lon_coord=coords.AuxCoord(longitude, standard_name='longitude', long_name='longitude', var_name='longitude', units='degrees', bounds=None, attributes=None, coord_system=coord_system)
if (variable_dict[variable]==3):
variable_cube.add_aux_coord(lon_coord,(1,2))
variable_cube.add_aux_coord(lat_coord,(1,2))
elif (variable_dict[variable]==2):
variable_cube.add_aux_coord(lon_coord,(0,1))
variable_cube.add_aux_coord(lat_coord,(0,1))
# add_coordinates=kwargs.pop('add_coordinates')
# if type(add_coordinates)!=list:
# add_coordinates1=add_coordinates
# add_coordinates=[]
# add_coordinates.append(add_coordinates1)
# for coordinate in add_coordinates:
# if coordinate=='latlon':
# latitude=load_cube(filename,'GLAT').data
# longitude=load_cube(filename,'GLON').data
# lat_coord=coords.AuxCoord(latitude, standard_name='latitude', long_name='latitude', var_name='latitude', units='degrees', bounds=None, attributes=None, coord_system=coord_system)
# lon_coord=coords.AuxCoord(longitude, standard_name='longitude', long_name='longitude', var_name='longitude', units='degrees', bounds=None, attributes=None, coord_system=coord_system)
# if (variable_dict[variable]==3):
# variable_cube.add_aux_coord(lon_coord,(1,2))
# variable_cube.add_aux_coord(lat_coord,(1,2))
# elif (variable_dict[variable]==2):
# variable_cube.add_aux_coord(lon_coord,(0,1))
# variable_cube.add_aux_coord(lat_coord,(0,1))
return variable_cube
def calculate_rams_LWC(filenames,**kwargs):
RCP=loadramscube(filenames, 'RCP',**kwargs)
RDP=loadramscube(filenames, 'RDP',**kwargs)
RRP=loadramscube(filenames, 'RRP',**kwargs)
LWC=RCP+RDP+RRP
LWC.rename('liquid water content')
#LWC.rename('mass_concentration_of_liquid_water_in_air')
return LWC
#
def calculate_rams_IWC(filenames,**kwargs):
RPP=loadramscube(filenames, 'RPP',**kwargs)
RSP=loadramscube(filenames, 'RSP',**kwargs)
RAP=loadramscube(filenames, 'RAP',**kwargs)
RGP=loadramscube(filenames, 'RGP',**kwargs)
RHP=loadramscube(filenames, 'RHP',**kwargs)
IWC=RPP+RSP+RAP+RGP+RHP
IWC.rename('ice water content')
#IWC.rename('mass_concentration_of_ice_water_in_air')
return IWC
def calculate_rams_airmass(filenames,**kwargs):
from iris.coords import AuxCoord
from numpy import diff
rho=loadramscube(filenames,'DN0',**kwargs)
z=rho.coord('geopotential_height')
z_dim=rho.coord_dims('geopotential_height')
z_diff=AuxCoord(mydiff(z.points),var_name='z_diff')
rho.add_aux_coord(z_diff,data_dims=z_dim)
dx=diff(rho.coord('projection_x_coordinate').points[0:2])
dy=diff(rho.coord('projection_y_coordinate').points[0:2])
Airmass=rho*rho.coord('z_diff')*dx*dy
Airmass.remove_coord('z_diff')
Airmass.rename('mass_of_air')
Airmass.units='kg'
return Airmass
def calculate_rams_airmass_path(filenames,**kwargs):
from iris.coords import AuxCoord
rho=loadramscube(filenames,'DN0',**kwargs)
z=rho.coord('geopotential_height')
z_dim=rho.coord_dims('geopotential_height')
z_diff=AuxCoord(mydiff(z.points),var_name='z_diff')
rho.add_aux_coord(z_diff,data_dims=z_dim)
Airmass=rho*rho.coord('z_diff')
Airmass.remove_coord('z_diff')
Airmass.rename('airmass_path')
Airmass.units='kg m-2'
return Airmass
def calculate_rams_air_temperature(filenames,**kwargs):
from iris.coords import AuxCoord
theta=loadramscube(filenames,'THETA',**kwargs)
pi=loadramscube(filenames,'PI',**kwargs)
cp=AuxCoord(1004,long_name='cp',units='J kg-1 K-1')
t=theta*pi/cp
t.rename('air_temperature')
return t
def calculate_rams_air_pressure(filenames,**kwargs):
from iris.coords import AuxCoord
pi=loadramscube(filenames,'PI',**kwargs)
cp=AuxCoord(1004,long_name='cp',units='J kg-1 K-1')
rd=AuxCoord(287,long_name='rd',units='J kg-1 K-1')
p = 100000 * (pi/cp)**(cp.points/rd.points) # Pressure in Pa
p.rename('air_pressure')
p.units='Pa'
return p
def calculate_rams_density(filenames,**kwargs):
rho=loadramscube(filenames,'DN0',**kwargs)
rho.rename('air_density')
rho.units='kg m-3'
return rho
def calculate_rams_LWP(filenames,**kwargs):
from iris.analysis import SUM
LWC=deriveramscube(filenames,'LWC',**kwargs)
Airmass=deriveramscube(filenames,'airmass_path',**kwargs)
LWP=(LWC*Airmass).collapsed(('geopotential_height'),SUM)
LWP.rename('liquid water path')
#LWP.rename('atmosphere_mass_content_of_cloud_liquid_water')
return LWP
#
def calculate_rams_IWP(filenames,**kwargs):
from iris.analysis import SUM
IWC=deriveramscube(filenames,'IWC',**kwargs)
Airmass=deriveramscube(filenames,'airmass_path',**kwargs)
IWP=(IWC*Airmass).collapsed(('geopotential_height'),SUM)
IWP.rename('ice water path')
#IWP.rename('atmosphere_mass_content_of_cloud_ice_water')
return IWP
def calculate_rams_IWV(filenames,**kwargs):
from iris.analysis import SUM
RV=loadramscube(filenames,'RV',**kwargs)
Airmass=deriveramscube(filenames,'airmass_path',**kwargs)
IWV=(RV*Airmass).collapsed(('geopotential_height'),SUM)
IWV.rename('integrated water vapor')
#IWP.rename('atmosphere_mass_content_of_cloud_ice_water')
return IWV
# Radiation fluxed at the top of the atmospere and at the surface
def calculate_rams_LWup_TOA(filenames,**kwargs):
from iris import Constraint
LWUP=loadramscube(filenames,'LWUP',**kwargs)
LWup_TOA=LWUP.extract(Constraint(model_level_number=LWUP.coord('model_level_number').points[-1]))
LWup_TOA.rename('LWup_TOA')
return LWup_TOA
def calculate_rams_LWup_sfc(filenames,**kwargs):
from iris import Constraint
LWUP=loadramscube(filenames,'LWUP',**kwargs)
LWup_sfc=LWUP.extract(Constraint(model_level_number=0))
LWup_sfc.rename('LWup_sfc')
return LWup_sfc
def calculate_rams_LWdn_TOA(filenames,**kwargs):
from iris import Constraint
LWDN=loadramscube(filenames,'LWDN',**kwargs)
LWdn_TOA=LWDN.extract(Constraint(model_level_number=LWDN.coord('model_level_number').points[-1]))
LWdn_TOA.rename('LWdn_TOA')
return LWdn_TOA
def calculate_rams_LWdn_sfc(filenames,**kwargs):
from iris import Constraint
LWDN=loadramscube(filenames,'LWDN',**kwargs)
LWdn_sfc=LWDN.extract(Constraint(model_level_number=0))
LWdn_sfc.rename('LWdn_sfc')
return LWdn_sfc
def calculate_rams_SWup_TOA(filenames,**kwargs):
from iris import Constraint
SWUP=loadramscube(filenames,'SWUP',**kwargs)
SWup_TOA=SWUP.extract(Constraint(model_level_number=SWUP.coord('model_level_number').points[-1]))
SWup_TOA.rename('SWup_TOA')
return SWup_TOA
def calculate_rams_SWup_sfc(filenames,**kwargs):
from iris import Constraint
SWUP=loadramscube(filenames,'SWUP',**kwargs)
SWup_sfc=SWUP.extract(Constraint(model_level_number=0))
SWup_sfc.rename('SWup_sfc')
return SWup_sfc
def calculate_rams_SWdn_TOA(filenames,**kwargs):
from iris import Constraint
SWDN=loadramscube(filenames,'SWDN',**kwargs)
SWdn_TOA=SWDN.extract(Constraint(model_level_number=SWDN.coord('model_level_number').points[-1]))
SWdn_TOA.rename('SWdn_TOA')
return SWdn_TOA
def calculate_rams_SWdn_sfc(filenames,**kwargs):
from iris import Constraint
SWDN=loadramscube(filenames,'SWDN',**kwargs)
SWdn_sfc=SWDN.extract(Constraint(model_level_number=0))
SWdn_sfc.rename('SWdn_sfc')
return SWdn_sfc
def calculate_rams_surface_precipitation_instantaneous(filenames,**kwargs):
PCPRR=loadramscube(filenames,'PCPRR',**kwargs)
PCPRD=loadramscube(filenames,'PCPRD',**kwargs)
PCPRS=loadramscube(filenames,'PCPRS',**kwargs)
PCPRP=loadramscube(filenames,'PCPRP',**kwargs)
PCPRA=loadramscube(filenames,'PCPRA',**kwargs)
PCPRH=loadramscube(filenames,'PCPRH',**kwargs)
PCPRG=loadramscube(filenames,'PCPRG',**kwargs)
surface_precip=PCPRR+PCPRD+PCPRS+PCPRP+PCPRA+PCPRG+PCPRH
surface_precip.rename('surface_precipitation_instantaneous')
return surface_precip
def calculate_rams_surface_precipitation_accumulated(filenames,**kwargs):
ACCPR=loadramscube(filenames,'ACCPR',**kwargs)
ACCPD=loadramscube(filenames,'ACCPD',**kwargs)
ACCPS=loadramscube(filenames,'ACCPS',**kwargs)
ACCPP=loadramscube(filenames,'ACCPP',**kwargs)
ACCPA=loadramscube(filenames,'ACCPA',**kwargs)
ACCPH=loadramscube(filenames,'ACCPH',**kwargs)
ACCPG=loadramscube(filenames,'ACCPG',**kwargs)
surface_precip_acc=ACCPR+ACCPD+ACCPS+ACCPP+ACCPA+ACCPG+ACCPH
surface_precip_acc.rename('surface_precipitation_accumulated')
#IWP.rename('atmosphere_mass_content_of_cloud_ice_water')
return surface_precip_acc
def calculate_rams_surface_precipitation_average(filenames,**kwargs):
from dask.array import concatenate
surface_precip_accum=calculate_rams_surface_precipitation_accumulated(filenames,**kwargs)
#caclulate timestep in hours
time_coord=surface_precip_accum.coord('time')
dt=(time_coord.units.num2date(time_coord.points[1])-time_coord.units.num2date(time_coord.points[0])).total_seconds()/3600.
#divide difference in precip between timesteps (in mm/h) by timestep (in h):
surface_precip=surface_precip_accum
surface_precip.data=concatenate((0*surface_precip.core_data()[[1],:,:],surface_precip.core_data()[1:,:,:]-surface_precip.core_data()[:-1:,:,:]),axis=0)/dt
surface_precip.rename('surface_precipitation_average')
surface_precip.units= 'mm/h'
return surface_precip
def mydiff(A):
import numpy as np
d1=np.diff(A)
d=np.zeros(A.shape)
d[0]=d1[0]
d[1:-1]=0.5*(d1[0:-1]+d1[1:])
d[-1]=d1[-1]
return d
def deriveramscube(filenames,variable,**kwargs):
# if variable in ['temperature','air_temperature']:
# variable_cube=calculate_rams_temperature(filenames,**kwargs)
# #variable_cube_out=addcoordinates(filenames, 'T',variable_cube,add_coordinates)
# elif variable == 'density':
# variable_cube=calculate_rams_density(filenames,**kwargs)
if variable == 'LWC':
variable_cube=calculate_rams_LWC(filenames,**kwargs)
elif variable == 'IWC':
variable_cube=calculate_rams_IWC(filenames,**kwargs)
elif variable == 'LWP':
variable_cube=calculate_rams_LWP(filenames,**kwargs)
elif variable == 'IWP':
variable_cube=calculate_rams_IWP(filenames,**kwargs)
elif variable == 'IWV':
variable_cube=calculate_rams_IWV(filenames,**kwargs)
elif variable == 'airmass':
variable_cube=calculate_rams_airmass(filenames,**kwargs)
elif variable == 'air_temperature':
variable_cube=calculate_rams_air_temperature(filenames,**kwargs)
elif variable=='air_pressure':
variable_cube=calculate_rams_air_pressure(filenames,**kwargs)
elif variable == 'air_density':
variable_cube=calculate_rams_density(filenames,**kwargs)
elif variable == 'airmass_path':
variable_cube=calculate_rams_airmass_path(filenames,**kwargs)
elif variable == 'surface_precipitation_average':
variable_cube=calculate_rams_surface_precipitation_average(filenames,**kwargs)
elif variable == 'surface_precipitation_accumulated':
variable_cube=calculate_rams_surface_precipitation_accumulated(filenames,**kwargs)
elif (variable == 'surface_precipitation_instantaneous') or (variable == 'surface_precipitation'):
variable_cube=calculate_rams_surface_precipitation_instantaneous(filenames,**kwargs)
elif (variable == 'LWup_TOA'):
variable_cube=calculate_rams_LWup_TOA(filenames,**kwargs)
elif (variable == 'LWup_sfc'):
variable_cube=calculate_rams_LWup_sfc(filenames,**kwargs)
elif (variable == 'LWdn_TOA'):
variable_cube=calculate_rams_LWdn_TOA(filenames,**kwargs)
elif (variable == 'LWdn_sfc'):
variable_cube=calculate_rams_LWdn_sfc(filenames,**kwargs)
elif (variable == 'SWup_TOA'):
variable_cube=calculate_rams_SWup_TOA(filenames,**kwargs)
elif (variable == 'SWup_sfc'):
variable_cube=calculate_rams_SWup_sfc(filenames,**kwargs)
elif (variable == 'SWdn_TOA'):
variable_cube=calculate_rams_SWdn_TOA(filenames,**kwargs)
elif (variable == 'SWdn_sfc'):
variable_cube=calculate_rams_SWdn_sfc(filenames,**kwargs)
else:
raise NameError(variable, 'is not a known variable')
return variable_cube
| 1.945313 | 2 |
recipe/run_test.py | regro-cf-autotick-bot/fractopo-feedstock | 0 | 12785919 | <reponame>regro-cf-autotick-bot/fractopo-feedstock
"""
Simple test case for fractopo conda build.
"""
import geopandas as gpd
from fractopo import Network
kb11_network = Network(
name="KB11",
trace_gdf=gpd.read_file(
"https://raw.githubusercontent.com/nialov/"
"fractopo/master/tests/sample_data/KB11/KB11_traces.geojson"
),
area_gdf=gpd.read_file(
"https://raw.githubusercontent.com/nialov/"
"fractopo/master/tests/sample_data/KB11/KB11_area.geojson"
),
truncate_traces=True,
circular_target_area=False,
determine_branches_nodes=True,
snap_threshold=0.001,
)
kb11_network.parameters
| 1.703125 | 2 |
unittest/t_region_reference.py | bendichter/api-python | 32 | 12785920 | <gh_stars>10-100
#!/usr/bin/python
import sys
from nwb import nwb_file
from nwb import nwb_utils as utils
from nwb import value_summary as vs
import numpy as np
import h5py
from sys import version_info
import re
# Test creation of region references
# Region references are references to regions in a dataset. They can be
# stored in datasets or attributes. If storing in a dataset, apparently
# region references must be stored in an array (an array of region references).
# i.e. it id not allowed to store a region reference in a dataset without
# it being in an array.
# The NWB format does not currently include region references as part
# of the standard. The reason for this, is that reading region references
# requires a different procedure than reading data stored directly in
# datasets and attributes and this requires additional complexity
# in the software to read NWB files.
# Nevertheless, there maybe instances in which region references could be
# useful when organizing data in an NWB file.
# This script demonstrates how to create region references
# using the h5py interface along with the NWB API and also reading region
# references.
def create_nwb_file():
if __file__.startswith("./"):
fname = "s" + __file__[3:-3] + ".nwb"
else:
fname = "s" + __file__[1:-3] + ".nwb"
settings = {}
settings["file_name"] = fname
settings["identifier"] = utils.create_identifier("Region reference test")
settings["mode"] = "w"
settings["start_time"] = "Sat Jul 04 2015 3:14:16"
settings["description"] = "Test file with region reference"
settings["verbosity"] = "none"
f = nwb_file.open(**settings)
return (f, fname)
# return value referenced by region reference ref
# f is an h5py File object
def get_ref_value(ref, f):
assert isinstance(f, h5py.File)
assert isinstance(ref, h5py.h5r.RegionReference)
fid = f.id
reftype = h5py.h5r.get_obj_type(ref, fid)
assert reftype == h5py.h5g.DATASET
refname = h5py.h5r.get_name(ref, fid) # path to target of reference
refds = f[refname] # referenced dataset
val = refds[ref] # get referenced region
return val
# display information about region reference. Used for development.
# ref is the region reference, f is the h5py File object
def show_ref_info(ref, f):
fid = f.id
reftype = h5py.h5r.get_obj_type(ref, fid)
refregion = h5py.h5r.get_region(ref, fid)
refname = h5py.h5r.get_name(ref, fid)
refderef = h5py.h5r.dereference(ref, fid)
assert reftype == h5py.h5g.DATASET
print("reftype=%i, refregion=%s, refname=%s, refderef=%s" % (reftype, refregion, refname, refderef))
refds = f[refname]
refreg = refds[ref]
refreg_shape = refreg.shape
refreg_dype = refreg.dtype
print("Referenced region, shape=%s, type=%s, val=" % (refreg_shape, refreg_dype))
print("%s" % refreg)
# make value summary
hash = vs.hashval(refregion.encode())
value_summary = "<Region Reference: target='%s', hash='%s'>" % (refname, hash)
print("value_summary=%s" % value_summary)
expected_value = np.arange(2., 14., 2.); # [ 2. 4. 6. 8. 10. 12.]
if not values_match(expected_value, refreg):
print("expected values NOT found")
# raise SystemError("** Error: Unable to find object base type in %s or %s" %
# (base_type, type(val)))
import pdb; pdb.set_trace()
def test_region_reference():
f, fname = create_nwb_file()
# make some fake raw data
raw = f.make_group("<TimeSeries>", name="raw_data", path="/acquisition/timeseries/")
raw_data = np.arange(0.0, 100.0, 0.5)
rd = raw.set_dataset("data", raw_data, attrs={"unit": "watt", "conversion":1.0, "resolution": 0.1,
"source": "microelectrodes"})
raw.set_dataset("starting_time", 0.1, attrs={"rate":0.1})
raw.set_dataset("num_samples", 1000)
# create a TimeSeries which has data referencing the raw data using a region reference
ag = f.make_group("analysis")
ag2 = ag.make_custom_group("<TimeSeries>", name="regref_data", attrs={"unit": "watt",
"conversion":1.0, "resolution": 0.1, "source": "raw_data"})
# below used to set as link
# ag2.set_dataset("data", rd, attrs={"unit": "watt", "conversion":1.0, "resolution": 0.1})
# set as region reference
rawds = f.file_pointer[rd.full_path] # h5py dataset
# create region reference
raw_regref = rawds.regionref[4:26:4]
# create 1-element array containing region_reference
ref_dtype = h5py.special_dtype(ref=h5py.h5r.RegionReference)
rrds = np.array([raw_regref,], dtype=ref_dtype)
# get h5py parent group for the dataset that will have the region reference
ag2_h5py = f.file_pointer[ag2.full_path]
ag2ds = ag2_h5py.create_dataset("raw_rref", data=rrds)
# set an attribute to the region reference
ag2ds.attrs["raw_rref"] = raw_regref
# add TimeSeries datasets. Note, dataset 'data' is normally required, not currently
# checked for since TimeSeries group (ag2) is a custom group
ag2.set_dataset("starting_time", 0.1, attrs={"rate":0.1})
ag2.set_dataset("num_samples", 10)
f.close()
# now try to read region references
f = h5py.File(fname, "r")
path = "/analysis/regref_data/raw_rref"
rrds_in = f[path]
val = rrds_in.value
if not (isinstance(val, np.ndarray) and val.shape == (1,) and val.size == 1 and
isinstance(val[0], h5py.h5r.RegionReference)):
raise SystemError("Failed to read RegionReference, found val=%s, type=%s" % (val, type(val)))
ref = val[0]
# show_ref_info(ref, f)
found = get_ref_value(ref, f)
expected = np.arange(2., 14., 2.); # [ 2. 4. 6. 8. 10. 12.]
errors = []
if not values_match(expected, found):
errors.append("Region Reference from dataset does not match. Expected=%s, found=%s" % (
expected, found))
# attribute region reference
aref = rrds_in.attrs["raw_rref"]
found = get_ref_value(aref, f)
if not values_match(expected, found):
errors.append("Region Reference from attribute does not match. Expected=%s, found=%s" % (
expected, found))
f.close()
if len(errors) > 0:
raise SystemError("Errors found:\n%s" % "\n".join(errors))
print("%s PASSED" % __file__)
# print ("Dataset ref info:")
# show_ref_info(ref, f)
# aref = rrds_in.attrs["raw_rref"]
# print ("Attribute ref info:")
# show_ref_info(aref, f)
# # import pdb; pdb.set_trace()
# f.close()
def vals_match(a, b):
match = a == b
if not isinstance(match, bool):
match = match.all()
return match
def make_str(val):
# convert val from bytes to unicode string
if isinstance(val, (list, tuple, np.ndarray)) and len(val) > 0:
return [make_str(v) for v in val]
elif isinstance(val, (bytes, np.bytes_)):
return val.decode('utf-8')
def values_match(expected, found):
match = vals_match(expected, found)
if not match and version_info[0] > 2:
# try matching after converting bytes to unicode (python 3 strings)
# in python 3, default string type is unicode, but these are stored as
# ascii bytes if possible in the hdf5 file, and read back as bytes
# for match to work, they must be converted back to unicode strings
match = vals_match(expected, make_str(found))
return match
# display_examples()
test_region_reference()
| 2.390625 | 2 |
src/pretix/control/forms/renderers.py | pajowu/pretix | 1 | 12785921 | from bootstrap3.renderers import FieldRenderer
from bootstrap3.text import text_value
from django.forms import CheckboxInput
from django.forms.utils import flatatt
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext
from i18nfield.forms import I18nFormField
def render_label(content, label_for=None, label_class=None, label_title='', optional=False):
"""
Render a label with content
"""
attrs = {}
if label_for:
attrs['for'] = label_for
if label_class:
attrs['class'] = label_class
if label_title:
attrs['title'] = label_title
builder = '<{tag}{attrs}>{content}{opt}</{tag}>'
return format_html(
builder,
tag='label',
attrs=mark_safe(flatatt(attrs)) if attrs else '',
opt=mark_safe('<br><span class="optional">{}</span>'.format(pgettext('form', 'Optional'))) if optional else '',
content=text_value(content),
)
class ControlFieldRenderer(FieldRenderer):
def __init__(self, *args, **kwargs):
kwargs['layout'] = 'horizontal'
super().__init__(*args, **kwargs)
def add_label(self, html):
label = self.get_label()
if hasattr(self.field.field, '_required'):
# e.g. payment settings forms where a field is only required if the payment provider is active
required = self.field.field._required
elif isinstance(self.field.field, I18nFormField):
required = self.field.field.one_required
else:
required = self.field.field.required
html = render_label(
label,
label_for=self.field.id_for_label,
label_class=self.get_label_class(),
optional=not required and not isinstance(self.widget, CheckboxInput)
) + html
return html
| 1.960938 | 2 |
tests/integration/dao/test_dao_aluno.py | douglasdcm/easy_db | 0 | 12785922 | from src.dao.dao_aluno import DaoAluno
from tests.massa_dados import aluno_nome_1
from src.enums.enums import Situacao
from src.model.aluno import Aluno
from tests.massa_dados import materia_nome_2, materia_nome_3
class TestDaoAluno:
def _setup_aluno(self, cria_banco, id=1, nome=aluno_nome_1, cr=0,
situacao=Situacao.em_curso.value):
aluno, dao = self._salva_aluno_banco(cria_banco, id, nome, cr,
situacao)
actual = dao.pega_tudo()
return actual, aluno
def _salva_aluno_banco(self, cria_banco, id, nome, cr, situacao):
aluno = Aluno(nome)
aluno.define_cr(cr)
aluno.define_id(id)
aluno.define_situacao(situacao)
dao = DaoAluno(aluno, cria_banco)
dao.salva()
return aluno, dao
def _setup_lista_alunos(self, cria_banco, id_=3,
situacao=Situacao.em_curso.value,
cr=0, nome=None):
self._setup_aluno(cria_banco)
self._setup_aluno(cria_banco)
expected, actual = self._setup_aluno(cria_banco, id=id_,
situacao=situacao,
cr=cr, nome=nome)
return expected, actual
def test_aluno_pode_ser_atualizado_banco(self, cria_banco, cria_massa_dados,
cria_curso_com_materias):
cria_massa_dados
id_ = "1"
aluno = DaoAluno(None, cria_banco).pega_por_id(id_)
curso = cria_curso_com_materias
materias = {materia_nome_2: 7, materia_nome_3: 9}
expected = 8
aluno.inscreve_curso(curso).atualiza_materias_cursadas(materias)
aluno.pega_coeficiente_rendimento(auto_calculo=True)
DaoAluno(aluno, cria_banco).atualiza(id_)
aluno = DaoAluno(None, cria_banco).pega_por_id(id_)
actual = aluno.pega_coeficiente_rendimento()
assert actual == expected
def test_dao_pega_por_id_retorna_objeto_aluno_com_id_correto(self,
cria_banco):
id_ = 3
_, expected = self._setup_lista_alunos(cria_banco, id_)
actual = DaoAluno(None, cria_banco).pega_por_id(id_)
assert actual.pega_id() == expected.pega_id()
def test_lista_alunos_recuperada_banco_com_nome_correto(self, cria_banco):
indice = 2
nome = aluno_nome_1
expected, actual = self._setup_lista_alunos(cria_banco, nome=nome)
assert actual.pega_nome() == expected[indice].pega_nome()
def test_lista_alunos_recuperada_banco_com_cr_correto(self, cria_banco):
indice = 2
cr = 9
expected, actual = self._setup_lista_alunos(cria_banco, cr=cr)
assert actual.pega_coeficiente_rendimento() == \
expected[indice].pega_coeficiente_rendimento()
def test_lista_alunos_recuperada_banco_com_situacao_correta(self,
cria_banco):
indice = 2
situacao = Situacao.reprovado.value
expected, actual = self._setup_lista_alunos(cria_banco,
situacao=situacao)
assert actual.pega_situacao() == expected[indice].pega_situacao()
def test_lista_alunos_recuperada_banco_com_id_correto(self, cria_banco):
indice = 2
expected, actual = self._setup_lista_alunos(cria_banco)
assert actual.pega_id() == expected[indice].pega_id()
def test_situacao_aluno_recuperado_banco(self, cria_banco):
situacao = "trancado"
expected, actual = self._setup_aluno(cria_banco, situacao=situacao)
assert actual.pega_situacao() == expected[0].pega_situacao()
def test_id_aluno_recuperado_banco(self, cria_banco):
id_ = 1
expected, actual = self._setup_aluno(cria_banco, id=id_)
assert actual.pega_id() == expected[0].pega_id()
def test_cr_diferente_zero_retornado_banco(self, cria_banco):
cr = 7
expected, actual = self._setup_aluno(cria_banco, cr)
assert actual.pega_coeficiente_rendimento() == \
expected[0].pega_coeficiente_rendimento()
def test_coeficiente_rendimento_objeto_aluno_recuperado_banco(self,
cria_banco):
actual, expected = self._setup_aluno(cria_banco)
assert actual[0].pega_coeficiente_rendimento() == \
expected.pega_coeficiente_rendimento()
def test_situacao_objeto_aluno_recuperado_banco(self, cria_banco):
actual, expected = self._setup_aluno(cria_banco)
assert actual[0].pega_situacao() == expected.pega_situacao()
def test_nome_objeto_aluno_recuperado_banco(self, cria_banco):
actual, expected = self._setup_aluno(cria_banco)
assert actual[0].pega_nome() == expected.pega_nome()
| 2.4375 | 2 |
calaccess_processed/models/tracking.py | dwillis/django-calaccess-processed-data | 1 | 12785923 | <reponame>dwillis/django-calaccess-processed-data
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models for tracking processing of CAL-ACCESS snapshots over time.
"""
from __future__ import unicode_literals
from django.db import models
from hurry.filesize import size as sizeformat
from django.utils.encoding import python_2_unicode_compatible
from calaccess_processed import archive_directory_path
@python_2_unicode_compatible
class ProcessedDataVersion(models.Model):
"""
A version of CAL-ACCESS processed data.
"""
raw_version = models.OneToOneField(
'calaccess_raw.RawDataVersion',
related_name='processed_version',
verbose_name='raw data version',
help_text='Foreign key referencing the raw data version processed'
)
process_start_datetime = models.DateTimeField(
null=True,
verbose_name='date and time processing started',
help_text='Date and time when the processing of the CAL-ACCESS version'
' started',
)
process_finish_datetime = models.DateTimeField(
null=True,
verbose_name='date and time update finished',
help_text='Date and time when the processing of the CAL-ACCESS version'
' finished',
)
zip_archive = models.FileField(
blank=True,
max_length=255,
upload_to=archive_directory_path,
verbose_name='cleaned files zip archive',
help_text='An archive zip of processed files'
)
zip_size = models.BigIntegerField(
null=True,
verbose_name='zip of size (in bytes)',
help_text='The expected size (in bytes) of the zip of processed files'
)
class Meta:
"""
Meta model options.
"""
app_label = 'calaccess_processed'
verbose_name = 'TRACKING: CAL-ACCESS processed data version'
ordering = ('-process_start_datetime',)
get_latest_by = 'process_start_datetime'
def __str__(self):
return str(self.raw_version.release_datetime)
@property
def update_completed(self):
"""
Check if the database update to the version completed.
Return True or False.
"""
if self.process_finish_datetime:
is_completed = True
else:
is_completed = False
return is_completed
@property
def update_stalled(self):
"""
Check if the database update to the version started but did not complete.
Return True or False.
"""
if self.process_start_datetime and not self.update_finish_datetime:
is_stalled = True
else:
is_stalled = False
return is_stalled
def pretty_zip_size(self):
"""
Returns a prettified version (e.g., "725M") of the zip's size.
"""
if not self.zip_size:
return None
return sizeformat(self.clean_zip_size)
pretty_zip_size.short_description = 'processed zip size'
pretty_zip_size.admin_order_field = 'processed zip size'
@python_2_unicode_compatible
class ProcessedDataFile(models.Model):
"""
A data file included in a processed version of CAL-ACCESS.
"""
version = models.ForeignKey(
'ProcessedDataVersion',
on_delete=models.CASCADE,
related_name='files',
verbose_name='processed data version',
help_text='Foreign key referencing the processed version of CAL-ACCESS'
)
file_name = models.CharField(
max_length=100,
verbose_name='processed data file name',
help_text='Name of the processed data file without extension',
)
process_start_datetime = models.DateTimeField(
null=True,
verbose_name='date and time processing started',
help_text='Date and time when the processing of the file started',
)
process_finish_datetime = models.DateTimeField(
null=True,
verbose_name='date and time processing finished',
help_text='Date and time when the processing of the file finished',
)
records_count = models.IntegerField(
null=False,
default=0,
verbose_name='clean records count',
help_text='Count of records in the processed file'
)
file_archive = models.FileField(
blank=True,
max_length=255,
upload_to=archive_directory_path,
verbose_name='archive of processed file',
help_text='An archive of the processed file'
)
file_size = models.BigIntegerField(
null=False,
default=0,
verbose_name='size of processed data file (in bytes)',
help_text='Size of the processed file (in bytes)'
)
class Meta:
"""
Meta model options.
"""
app_label = 'calaccess_processed'
unique_together = (('version', 'file_name'),)
verbose_name = 'TRACKING: processed CAL-ACCESS data file'
ordering = ('-version_id', 'file_name',)
def __str__(self):
return self.file_name
def pretty_file_size(self):
"""
Returns a prettified version (e.g., "725M") of the processed file's size.
"""
return sizeformat(self.file_size)
pretty_file_size.short_description = 'processed file size'
pretty_file_size.admin_order_field = 'processed file size'
| 1.90625 | 2 |
examples/mt_model.py | Jincheng-Sun/Kylearn | 0 | 12785924 | <filename>examples/mt_model.py
from framework.model import Model
import tensorflow as tf
class Mt_model(Model):
def __init__(self, Network, ckpt_path, tsboard_path, x_shape, num_classes):
super().__init__(Network, ckpt_path, tsboard_path)
with tf.name_scope('inputs'):
self.features = tf.placeholder(dtype=tf.float32, shape=x_shape, name= 'features')
self.labels = tf.placeholder(dtype=tf.int32, shape=(None,), name='labels')
self.is_training = tf.placeholder(dtype=tf.bool, shape=(), name='is_training')
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.graph = tf.Graph()
self.session = None
with self.graph.as_default():
self.step = tf.train.get_or_create_global_step()
tf.add_to_collection('global_variables', self.step)
self.num_classes = num_classes
def initialize_variables(self):
# with tf.get_collection("global_variables"):
pass
def loss(self):
logits_labeled = self.network(input=self.features,
num_classes= self.num_classes,
reuse = True,
scope='res_43',
is_training=True)
def load_model(self):
pass
def train(self):
pass
def evaluate(self):
pass
def test(self):
pass | 2.859375 | 3 |
orders/tasks.py | SergePogorelov/myshop | 0 | 12785925 | <reponame>SergePogorelov/myshop
from celery import task
from django.core.mail import send_mail
from .models import Order
@task
def order_created(order_id):
""""Задача отправки email-уведомлений при успешном оформлении заказа."""
order = Order.objects.get(id=order_id)
subject = f"order.nr. {order.id}"
message = "Dear {}, \n\nYou have successfully placed an order.\nYour order id is {}".format(
order.first_name, order.id
)
mail_sent = send_mail(subject, message, "<EMAIL>", [order.email])
return mail_sent | 2.328125 | 2 |
nblog/core/views.py | NestorMonroy/BlogTemplate | 0 | 12785926 |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.views.generic import TemplateView
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.mail import send_mail
from django.shortcuts import redirect, render
from django.utils.html import mark_safe
User = get_user_model()
def message_view(request, message=None, title=None):
"""
provides a generic way to render any old message in a template
(used for when a user is disabled, or unapproved, or unverified, etc.)
"""
context = {"message": mark_safe(message), "title": title or settings.PROJECT_NAME}
return render(request, "core/message.html", context)
def home_page(request):
# print(request.session.get("first_name", "Unknown"))
# request.session['first_name']
context = {
"title": "Hello World!",
"content": " Welcome to the homepage.",
}
if request.user.is_authenticated:
context["premium_content"] = "YEAHHHHHH"
return render(request, "core/index.html", context)
class IndexView(TemplateView):
template_name = "core/index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["users"] = User.objects.filter(is_active=True)
# context["customers"] = Customer.objects.filter(is_active=True)
return context
| 2.046875 | 2 |
oi/Contest/self/IOI-Test-Round/puzzle/data/checker.py | Riteme/test | 3 | 12785927 | #!/usr/bin/env python
#
# Copyright 2017 riteme
#
from sys import argv, version
from os.path import *
if version[0] == '3':
xrange = range
if len(argv) == 1 or "--help" in argv or "-h" in argv:
print("Participate answer checker & grader.")
print("Usage: %s [ID] [--no-limit] [--help/-h]" % argv[0])
print("\t--no-limit: Ignore the attempt limit (Set the limit to 1,000,000,000).")
print("\t--help / -h: Show this message.")
exit(0)
def ASSERT(expr, message):
if not expr:
print(message)
exit(-1)
idx = int(argv[1])
INPUT_FILE = "puzzle{}.in".format(idx)
OUTPUT_FILE = "puzzle{}.out".format(idx)
ASSERT(exists(INPUT_FILE), "'{}' not found.".format(INPUT_FILE))
ASSERT(exists(OUTPUT_FILE), "'{}' not found.".format(OUTPUT_FILE))
inp = open(INPUT_FILE)
out = open(OUTPUT_FILE)
T, n, m, LIMIT = map(int, inp.readline().split())
if "--no-limit" in argv:
LIMIT = 10**9
DOWN = 1
RIGHT = 2
def read_graph():
G = [[0] * (m + 1) for i in xrange(n + 1)]
x = 0
while x < n:
line = inp.readline().strip()
if len(line) == 0:
continue
x += 1
for y in xrange(1, m + 1):
if line[y - 1] == '$' and G[x - 1][y] != DOWN:
G[x][y] = DOWN
elif line[y - 1] == '#' and G[x][y - 1] != RIGHT:
G[x][y] = RIGHT
return G
last = read_graph()
def rotate(x, y, line):
if x < n and last[x][y] == RIGHT and last[x + 1][y] == RIGHT:
last[x][y] = last[x][y + 1] = DOWN
last[x + 1][y] = 0
elif y < m and last[x][y] == DOWN and last[x][y + 1] == DOWN:
last[x][y] = last[x + 1][y] = RIGHT
last[x][y + 1] = 0
else:
ASSERT(False, "Can't rotate at ({}, {}) (at line {}).".format(x, y, line))
score = line = 0
cnt = LIMIT
for i in xrange(1, T + 1):
cur = read_graph()
try:
k = int(out.readline())
line += 1
except:
ASSERT(False, "Can't read integer 'k' at gate {}.".format(i))
reported = False
for j in xrange(k):
if cnt <= 0:
print("No opportunities left.")
reported = True
break
cnt -= 1
try:
x, y = map(int, out.readline().split())
line += 1
except:
ASSERT(False, "Can't read integer 'x' and 'y' at gate {}.".format(i))
rotate(x, y, line)
if last != cur:
print("Can't open the gate {}.".format(i))
break
score = i
last = cur
if cnt <= 0:
if not reported:
print("No opportunities left after gate {}.".format(i))
break
print("Score: {}\nTried {} times.".format(score, LIMIT - cnt))
| 3.515625 | 4 |
src/drugstone/scripts/add_edges_to_genes.py | realugur/drugst.one-py | 0 | 12785928 | <gh_stars>0
def add_edges_to_genes(
genes: list,
edges: list, ) -> dict:
for gene in genes:
if "netexId" in gene:
netex_edges = [n["proteinB"] for n in edges if gene["netexId"] == n["proteinA"]]
symbol_edges = []
for e in netex_edges:
for g in genes:
if "symbol" in g and "netexId" in g and e == g["netexId"]:
symbol_edges.append(g["symbol"])
gene["has_edges_to"] = symbol_edges
else:
gene["has_edges_to"] = []
result = {"drugs": {}, "genes": {}}
for gene in genes:
gene.pop("netexId", None)
result["genes"][gene["id"]] = gene
return result
| 2.875 | 3 |
star_realms_cards/all_cards.py | samervin/star-realms-database | 5 | 12785929 | <filename>star_realms_cards/all_cards.py<gh_stars>1-10
# Fields
NAME = 'name'
FLAVOR = 'flavor'
FACTION = 'faction'
TYPE = 'type'
SHIELD = 'shield'
COST = 'cost'
SET = 'set'
QUANTITY = 'quantity'
ABILITIES = 'abilities'
ALLY_ABILITIES = 'ally-abilities'
SCRAP_ABILITIES = 'scrap-abilities'
TRADE = 'trade'
COMBAT = 'combat'
AUTHORITY = 'authority'
DRAW = 'draw'
OPPONENT_DISCARD = 'opponent-discard'
TRADE_ROW_SCRAP = 'trade-row-scrap'
# TODO: Should scrap be a number? Categorized under Other abilities?
SCRAP = 'scrap'
OTHER_ABILITY = 'other-ability'
# Other ability fields
DESTROY_BASE = 'destroy-base'
ALLY_PLACE_INTO_HAND = 'ally-place-into-hand'
# For cards that have 'or' text
OR = 'or'
# Faction values
MACHINE_CULT = 'Machine Cult'
STAR_EMPIRE = 'Star Empire'
BLOB = 'Blob'
TRADE_FEDERATION = 'Trade Federation'
UNALIGNED = 'Unaligned'
# Type values
SHIP = 'ship'
OUTPOST = 'outpost'
BASE = 'base'
# Scrap values
SCRAP_HAND = 'scrap-hand'
SCRAP_DISCARD = 'scrap-discard'
# TODO: Should this just be contained in an OR? Seems poor
SCRAP_HAND_DISCARD = 'scrap-hand-discard'
# Set values
STAR_REALMS = 'Star Realms'
CRISIS_BASES_BATTLESHIPS = 'Crisis: Bases and Battleships'
CRISIS_EVENTS = 'Crisis: Events'
CRISIS_HEROES = 'Crisis: Heroes'
CRISIS_FLEETS_FORTRESSES = 'Crisis: Fleets and Fortresses'
GAMBIT_EXP = 'Gambit Expansion'
PROMOTIONAL = 'Promotional Set'
COLONY_WARS = 'Colony Wars'
class StarRealmsCards:
ALL_STAR_REALMS_CARDS = [
{
NAME: 'Scout',
FACTION: UNALIGNED,
TYPE: SHIP,
SET: STAR_REALMS,
QUANTITY: 16,
TRADE: 1
},
{
NAME: 'Viper',
FACTION: UNALIGNED,
TYPE: SHIP,
SET: STAR_REALMS,
QUANTITY: 4,
COMBAT: 1
},
{
NAME: 'Explorer',
FACTION: UNALIGNED,
TYPE: SHIP,
COST: 2,
SET: STAR_REALMS,
QUANTITY: 10,
ABILITIES: [{
TRADE: 2
}],
SCRAP_ABILITIES: [{
COMBAT: 2
}]
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 1,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
TRADE: 1,
SCRAP: SCRAP_HAND_DISCARD
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 2,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 2,
SCRAP: SCRAP_HAND_DISCARD
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 3,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
TRADE: 2,
SCRAP: SCRAP_HAND_DISCARD
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: '<NAME>',
FLAVOR: 'With the Blobs an ever present danger, '
'even the Cult\'s cargo carrying mechs bristle with firepower.',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 4,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
OR: {
TRADE: 3,
COMBAT: 5
}
},
ALLY_ABILITIES: {
SCRAP: SCRAP_HAND_DISCARD
}
},
{
NAME: 'Stealth Needle',
FLAVOR: 'The Needle\'s ability to mimic other ships represents the pinnacle of Cult technology.',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 4,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
OTHER_ABILITY: 'Copy another ship you\'ve played this turn. '
'Stealth Needle has that ship\'s faction in addition to Machine Cult.'
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 5,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 4,
SCRAP: SCRAP_HAND_DISCARD
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 6,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 6,
OTHER_ABILITY: DESTROY_BASE
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: 'Battle Station',
FLAVOR: 'A Battle Station fusion core can double as a devastating weapon... once.',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 5,
COST: 3,
SET: STAR_REALMS,
QUANTITY: 2,
SCRAP_ABILITIES: {
COMBAT: 5
}
},
{
NAME: '<NAME>',
FLAVOR: 'This man-made planet is a galactic center for open source tech.',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 6,
COST: 5,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
OTHER_ABILITY: 'Mech World counts as an ally for all factions.'
}
},
{
NAME: 'Junkyard',
FLAVOR: 'The Machine Cult\'s first commandment: "Thou shalt not waste tech."',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 5,
COST: 6,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
SCRAP: SCRAP_HAND_DISCARD
}
},
{
NAME: '<NAME>',
FLAVOR: 'This high-tech city is like a beehive: '
'it looks chaotic but vital work is being done efficiently at a frenetic pace.',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 6,
COST: 7,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
OTHER_ABILITY: 'Draw a card, then scrap a card from your hand.'
}
},
{
NAME: '<NAME>',
FLAVOR: 'The Machine Cult built these supercomputing space stations to run every aspect of their society. '
'Now they worship them as gods.',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 6,
COST: 8,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
SCRAP: 'Scrap up to two cards from your hand and/or discard pile. Draw a card for each card scrapped this way.'
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 1,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 2,
OPPONENT_DISCARD: 1
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: 'Corvette',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 2,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 1,
DRAW: 1
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 3,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
TRADE: 1,
DRAW: 1
},
SCRAP_ABILITIES: {
OPPONENT_DISCARD: 1
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 3,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 4,
OPPONENT_DISCARD: 1
},
ALLY_ABILITIES: {
COMBAT: 2
},
SCRAP_ABILITIES: {
DRAW: 1
}
},
{
NAME: 'Battlecruiser',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 6,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 5,
DRAW: 1
},
ALLY_ABILITIES: {
OPPONENT_DISCARD: 1
},
SCRAP_ABILITIES: {
DRAW: 1,
DESTROY_BASE: 1
}
},
{
NAME: 'Dreadnaught',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 7,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 7,
DRAW: 1
},
SCRAP_ABILITIES: {
COMBAT: 5
}
},
{
NAME: 'Space Station',
FACTION: STAR_EMPIRE,
TYPE: OUTPOST,
SHIELD: 4,
COST: 4,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 2
},
ALLY_ABILITIES: {
COMBAT: 2
},
SCRAP_ABILITIES: {
TRADE: 4
}
},
{
NAME: 'Recycling Station',
FACTION: STAR_EMPIRE,
TYPE: OUTPOST,
SHIELD: 4,
COST: 4,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
OR: {
TRADE: 1,
OTHER_ABILITY: 'Discard up to two cards, then draw that many cards.'
}
}
},
{
NAME: 'War World',
FACTION: STAR_EMPIRE,
TYPE: OUTPOST,
SHIELD: 4,
COST: 5,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 3
},
ALLY_ABILITIES: {
COMBAT: 4
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: OUTPOST,
SHIELD: 6,
COST: 6,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 3
},
ALLY_ABILITIES: {
OPPONENT_DISCARD: 1
}
},
{
NAME: 'Fleet HQ',
FLAVOR: 'When an Imperial Fleet goes into battle, '
'it\'s usually coordinated from afar by one of these mobile command centers.',
FACTION: STAR_EMPIRE,
TYPE: BASE,
SHIELD: 8,
COST: 8,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
OTHER_ABILITY: 'All of your ships get 1 Combat.'
}
},
{
NAME: '<NAME>',
FLAVOR: 'Either kill it before it signals the hive or run. '
'There are other choices, but none you\'ll live through.',
FACTION: BLOB,
TYPE: SHIP,
COST: 1,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 3
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: 'Trade Pod',
FLAVOR: 'The loading and offloading process is efficient, but disgusting.',
FACTION: BLOB,
TYPE: SHIP,
COST: 2,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
TRADE: 3
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: 'Battle Pod',
FACTION: BLOB,
TYPE: SHIP,
COST: 2,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 4,
TRADE_ROW_SCRAP: 1
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: 'Ram',
FACTION: BLOB,
TYPE: SHIP,
COST: 3,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 5
},
ALLY_ABILITIES: {
COMBAT: 2
},
SCRAP_ABILITIES: {
TRADE: 3
}
},
{
NAME: '<NAME>',
FLAVOR: 'When this monstrous ship shows up on a colony\'s sensors, they know the end is near...',
FACTION: BLOB,
TYPE: SHIP,
COST: 4,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 6
},
ALLY_ABILITIES: {
DESTROY_BASE: 1,
TRADE_ROW_SCRAP: 1
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: SHIP,
COST: 6,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 8
},
ALLY_ABILITIES: {
DRAW: 1
},
SCRAP_ABILITIES: {
COMBAT: 4
}
},
{
NAME: '<NAME>',
FLAVOR: '"Is that... a whale?" - HMS Defender, final transmission.',
FACTION: BLOB,
TYPE: SHIP,
COST: 6,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 7
},
ALLY_ABILITIES: {
OTHER_ABILITY: 'Acquire any ship without paying its cost and put it on top of your deck.'
}
},
{
NAME: 'Mothership',
FACTION: BLOB,
TYPE: SHIP,
COST: 7,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 6,
DRAW: 1
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: BASE,
SHIELD: 5,
COST: 3,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 1
},
SCRAP_ABILITIES: {
TRADE: 3
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: BASE,
SHIELD: 5,
COST: 5,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 3
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: BASE,
SHIELD: 7,
COST: 8,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
OR: {
COMBAT: 5,
OTHER_ABILITY: 'Draw a card for each Blob card that you\'ve played this turn.'
}
}
},
{
NAME: '<NAME>',
FLAVOR: '"Fast? This baby doesn\'t just haul cargo. She hauls..."',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 1,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
TRADE: 2
},
ALLY_ABILITIES: {
AUTHORITY: 4
}
},
{
NAME: 'Cutter',
FLAVOR: '"Built for cargo, armed for conflict. Versatility for an unpredictable universe."'
' - Premier Aerospace Cargo Enterprises',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 2,
SET: STAR_REALMS,
QUANTITY: 3,
ABILITIES: {
AUTHORITY: 4,
TRADE: 2
},
ALLY_ABILITIES: {
COMBAT: 4
}
},
{
NAME: '<NAME>',
FLAVOR: 'War should always be a last resort, it\'s bad for the bottom line.',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 3,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
AUTHORITY: 3,
TRADE: 2,
OTHER_ABILITY: 'If you have two or more bases in play, draw two cards.'
}
},
{
NAME: 'Freighter',
FLAVOR: 'This class of mammoth cargo ships is one of the keys '
'to the Federation\'s vast trade-based wealth.',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 4,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
TRADE: 4
},
ALLY_ABILITIES: {
# TODO: Should putting things on top of your deck be standardized?
OTHER_ABILITY: 'You may put the next ship you acquire this turn on top of your deck.'
}
},
{
NAME: '<NAME>',
FLAVOR: 'The heavily-armored Escort class was the Federation\'s first response to the Blob threat.',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 5,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
AUTHORITY: 4,
COMBAT: 4
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: 'Flagship',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 6,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 5,
DRAW: 1
},
ALLY_ABILITIES: {
AUTHORITY: 5
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 8,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
AUTHORITY: 4,
COMBAT: 5,
DRAW: 2
},
ALLY_ABILITIES: {
OTHER_ABILITY: DESTROY_BASE
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: OUTPOST,
SHIELD: 4,
COST: 3,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
OR: {
AUTHORITY: 1,
TRADE: 1
}
},
SCRAP_ABILITIES: {
COMBAT: 3
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: BASE,
SHIELD: 4,
COST: 4,
SET: STAR_REALMS,
QUANTITY: 2,
ABILITIES: {
OR: {
AUTHORITY: 2,
TRADE: 2
}
},
SCRAP_ABILITIES: {
COMBAT: 5
}
},
{
NAME: 'Defense Center',
FACTION: TRADE_FEDERATION,
TYPE: OUTPOST,
SHIELD: 5,
COST: 5,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
OR: {
AUTHORITY: 3,
COMBAT: 2
}
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: '<NAME> Call',
FACTION: TRADE_FEDERATION,
TYPE: OUTPOST,
SHIELD: 6,
COST: 6,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
TRADE: 3
},
SCRAP_ABILITIES: {
DRAW: 1,
OTHER_ABILITY: DESTROY_BASE
}
},
{
NAME: 'Central Office',
FACTION: TRADE_FEDERATION,
TYPE: BASE,
SHIELD: 6,
COST: 7,
SET: STAR_REALMS,
QUANTITY: 1,
ABILITIES: {
TRADE: 2,
OTHER_ABILITY: 'You may put the next ship you acquire this turn on top of your deck.'
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: 'Battle Bot',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 1,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 2,
SCRAP: SCRAP_HAND
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: 'Repair Bot',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 2,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
TRADE: 2,
SCRAP: SCRAP_DISCARD
},
SCRAP_ABILITIES: {
COMBAT: 2
}
},
{
NAME: 'Convoy Bot',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 3,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 4,
SCRAP: SCRAP_HAND_DISCARD
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 4,
SET: COLONY_WARS,
QUANTITY: 2,
ABILITIES: {
TRADE: 3,
SCRAP: SCRAP_HAND_DISCARD
},
ALLY_ABILITIES: {
COMBAT: 3
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 5,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 6,
SCRAP: SCRAP_HAND_DISCARD
},
ALLY_ABILITIES: {
OTHER_ABILITY: DESTROY_BASE
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 7,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 6,
SCRAP: 'You may scrap up to two cards in your hand and/or discard pile.'
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 2,
COST: 2,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 2,
OTHER_ABILITY: ALLY_PLACE_INTO_HAND
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 5,
COST: 4,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
SCRAP: SCRAP_HAND
},
ALLY_ABILITIES: {
COMBAT: 3
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 5,
COST: 5,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
OTHER_ABILITY: 'Until your turn ends, Stealth Tower becomes a copy of any base in play. '
'Stealth Tower has that base\'s faction in addition to Machine Cult.'
}
},
{
NAME: 'Frontier Station',
FLAVOR: '"Supply and Protect" - Station Invocation',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 6,
COST: 6,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
OR: {
TRADE: 2,
COMBAT: 3
}
}
},
{
NAME: 'The Incinerator',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 6,
COST: 8,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
SCRAP: 'Scrap up to two cards in your hand and/or discard pile.'
},
ALLY_ABILITIES: {
OTHER_ABILITY: 'Gain 2 Combat for each card scrapped from your hand and/or discard pile this turn.'
}
},
{
NAME: '<NAME>',
FLAVOR: 'Trade on the fringe: high risk, high reward.',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 1,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
TRADE: 2
},
ALLY_ABILITIES: {
COMBAT: 2,
OPPONENT_DISCARD: 1
}
},
{
NAME: 'Lancer',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 2,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 4,
OTHER_ABILITY: 'If an opponent controls a base, gain an additional 2 Combat.'
},
ALLY_ABILITIES: {
OPPONENT_DISCARD: 1
}
},
{
NAME: 'Falcon',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 3,
SET: COLONY_WARS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 2,
DRAW: 1
},
SCRAP_ABILITIES: {
OPPONENT_DISCARD: 1
}
},
{
NAME: 'Gunship',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 4,
SET: COLONY_WARS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 5,
OPPONENT_DISCARD: 1
},
SCRAP_ABILITIES: {
TRADE: 4
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 5,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 4,
DRAW: 1
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: 'Aging Battleship',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 5,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 5
},
ALLY_ABILITIES: {
DRAW: 1
},
SCRAP_ABILITIES: {
COMBAT: 2,
DRAW: 2
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 8,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 8,
DRAW: 1,
OPPONENT_DISCARD: 1,
OTHER_ABILITY: ALLY_PLACE_INTO_HAND
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: BASE,
SHIELD: 4,
COST: 3,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
OTHER_ABILITY: 'Discard a card. If you do, draw a card.'
},
ALLY_ABILITIES: {
COMBAT: 3
}
},
{
NAME: 'Command Center',
FACTION: STAR_EMPIRE,
TYPE: OUTPOST,
SHIELD: 4,
COST: 4,
SET: COLONY_WARS,
QUANTITY: 2,
ABILITIES: {
TRADE: 2,
OTHER_ABILITY: 'Whenever you play a Star Empire ship, gain 2 Combat.'
}
},
{
NAME: 'Supply Depot',
FACTION: STAR_EMPIRE,
TYPE: OUTPOST,
SHIELD: 5,
COST: 6,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
OTHER_ABILITY: 'Discard up to two cards. Gain 2 Trade or 2 Combat for each card discarded this way.'
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: OUTPOST,
SHIELD: 6,
COST: 7,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
DRAW: 1,
OPPONENT_DISCARD: 1
},
ALLY_ABILITIES: {
COMBAT: 4
}
},
{
NAME: 'Swarmer',
FACTION: BLOB,
TYPE: SHIP,
COST: 1,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 3,
TRADE_ROW_SCRAP: 1
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: 'Predator',
FLAVOR: 'You\'re the prey.',
FACTION: BLOB,
TYPE: SHIP,
COST: 2,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
COMBAT: 4
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: SHIP,
COST: 3,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
TRADE: 3,
},
ALLY_ABILITIES: {
COMBAT: 3
},
SCRAP_ABILITIES: {
COMBAT: 3
}
},
{
NAME: 'Ravager',
FLAVOR: 'Even heavily armed convoys fear the Ravagers.',
FACTION: BLOB,
TYPE: SHIP,
COST: 3,
SET: COLONY_WARS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 6,
TRADE_ROW_SCRAP: 2
}
},
{
NAME: 'Parasite',
FACTION: BLOB,
TYPE: SHIP,
COST: 5,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
OR: {
COMBAT: 6,
OTHER_ABILITY: 'Acquire a card of cost six or less for free.'
}
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: 'Moonwurm',
FACTION: BLOB,
TYPE: SHIP,
COST: 7,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 8,
DRAW: 1
},
ALLY_ABILITIES: {
# TODO: This is similar to Parasite and Leviathan, should this be a keyword?
OTHER_ABILITY: 'Acquire a card of cost two or less for free and put it into your hand.'
}
},
{
NAME: 'Leviathan',
FACTION: BLOB,
TYPE: SHIP,
COST: 8,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 9,
DRAW: 1,
OTHER_ABILITY: DESTROY_BASE
},
ALLY_ABILITIES: {
OTHER_ABILITY: 'Acquire a card of cost three or less for free and put it into your hand.'
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: BASE,
SHIELD: 3,
COST: 2,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
TRADE: 1
},
SCRAP_ABILITIES: {
COMBAT: 3
}
},
{
NAME: 'Bioformer',
FACTION: BLOB,
TYPE: BASE,
SHIELD: 4,
COST: 4,
SET: COLONY_WARS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 3
},
SCRAP_ABILITIES: {
TRADE: 3
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: BASE,
SHIELD: 5,
COST: 6,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 4,
OTHER_ABILITY: ALLY_PLACE_INTO_HAND
},
SCRAP_ABILITIES: {
OTHER_ABILITY: DESTROY_BASE
}
},
{
NAME: '<NAME>',
FLAVOR: 'Trade is a colony\'s life blood.',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 1,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
TRADE: 2
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FLAVOR: '"A well supplied colony is a loyal colony." - <NAME>',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 2,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
TRADE: 3
},
ALLY_ABILITIES: {
AUTHORITY: 3
}
},
{
NAME: '<NAME>',
FLAVOR: 'Cutters are the life line of Federation colonies.',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 3,
SET: COLONY_WARS,
QUANTITY: 3,
ABILITIES: {
TRADE: 2,
COMBAT: 3
},
ALLY_ABILITIES: {
AUTHORITY: 4
}
},
{
NAME: '<NAME>',
FLAVOR: 'Suited for ferrying colonists or supplies.',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 4,
SET: COLONY_WARS,
QUANTITY: 2,
ABILITIES: {
TRADE: 3,
AUTHORITY: 4
},
SCRAP_ABILITIES: {
OTHER_ABILITY: DESTROY_BASE
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 5,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
TRADE: 3,
COMBAT: 3,
AUTHORITY: 3,
OTHER_ABILITY: ALLY_PLACE_INTO_HAND
}
},
{
NAME: 'Peacekeeper',
FLAVOR: 'Might makes peace.',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 6,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 6,
AUTHORITY: 6
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: BASE,
SHIELD: 3,
COST: 2,
SET: COLONY_WARS,
QUANTITY: 2,
ABILITIES: {
AUTHORITY: 2
},
ALLY_ABILITIES: {
TRADE: 2
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: BASE,
SHIELD: 5,
COST: 4,
SET: COLONY_WARS,
QUANTITY: 2,
ABILITIES: {
TRADE: 2,
OTHER_ABILITY: 'If you have three or more bases in play (including this one), gain 4 Authority and draw a card.'
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: OUTPOST,
SHIELD: 6,
COST: 6,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
TRADE: 2
},
ALLY_ABILITIES: {
OTHER_ABILITY: 'Put the next ship or base you acquire this turn on top of your deck.'
}
},
{
NAME: '<NAME>',
FLAVOR: 'Colonies still loyal to the Federation are precious.',
FACTION: TRADE_FEDERATION,
TYPE: BASE,
SHIELD: 6,
COST: 7,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
TRADE: 3,
COMBAT: 3,
AUTHORITY: 3
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: OUTPOST,
SHIELD: 6,
COST: 8,
SET: COLONY_WARS,
QUANTITY: 1,
ABILITIES: {
TRADE: 3,
OTHER_ABILITY: 'Put the next ship or base you acquire this turn into your hand.'
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 2,
SET: CRISIS_BASES_BATTLESHIPS,
QUANTITY: 2,
ABILITIES: {
COMBAT: 1,
SCRAP: SCRAP_HAND_DISCARD,
OTHER_ABILITY: 'If you control two or more bases, gain 8 Combat.'
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 5,
SET: CRISIS_BASES_BATTLESHIPS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 6,
OTHER_ABILITY: 'You may return target base from play to its owner\'s hand.'
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 5,
SET: CRISIS_BASES_BATTLESHIPS,
QUANTITY: 1,
ABILITIES: {
TRADE: 3,
DRAW: 1
},
ALLY_ABILITIES: {
COMBAT: 4
}
},
{
NAME: '<NAME>',
FLAVOR: 'These bases play a key role in expanding the empire\'s influence.',
FACTION: STAR_EMPIRE,
TYPE: OUTPOST,
SHIELD: 5,
COST: 3,
SET: CRISIS_BASES_BATTLESHIPS,
QUANTITY: 2,
ALLY_ABILITIES: {
OPPONENT_DISCARD: 1
}
},
{
NAME: 'Obliterator',
FACTION: BLOB,
TYPE: SHIP,
COST: 6,
SET: CRISIS_BASES_BATTLESHIPS,
QUANTITY: 1,
ABILITIES: {
COMBAT: 7,
OTHER_ABILITY: 'If your opponent has two or more bases in play, gain 6 Combat.'
},
ALLY_ABILITIES: {
DRAW: 1
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: BASE,
SHIELD: 5,
COST: 3,
SET: CRISIS_BASES_BATTLESHIPS,
QUANTITY: 2,
ABILITIES: {
TRADE: 1
},
ALLY_ABILITIES: {
COMBAT: 2
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 1,
SET: CRISIS_BASES_BATTLESHIPS,
QUANTITY: 2,
ABILITIES: {
TRADE: 1
},
ALLY_ABILITIES: {
DRAW: 1
},
SCRAP_ABILITIES: {
TRADE: 1
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 6,
SET: CRISIS_BASES_BATTLESHIPS,
QUANTITY: 1,
ABILITIES: {
AUTHORITY: 3,
TRADE: 2,
DRAW: 1
},
ALLY_ABILITIES: {
OTHER_ABILITY: 'You may put the next base you acquire this turn directly into play.'
}
},
{
NAME: '<NAME>',
FLAVOR: 'When it comes to Patrol Bot tactics, <NAME> wrote the scripture.',
FACTION: MACHINE_CULT,
TYPE: SHIP,
COST: 2,
SET: CRISIS_FLEETS_FORTRESSES,
QUANTITY: 2,
ABILITIES: {
OR: {
TRADE: 2,
COMBAT: 4
}
},
ALLY_ABILITIES: {
SCRAP: SCRAP_HAND_DISCARD
}
},
{
NAME: '<NAME>',
FACTION: MACHINE_CULT,
TYPE: OUTPOST,
SHIELD: 5,
COST: 4,
SET: CRISIS_FLEETS_FORTRESSES,
QUANTITY: 1,
ABILITIES: {
OR: {
TRADE: 1,
COMBAT: 2
}
},
ALLY_ABILITIES: {
SCRAP: SCRAP_HAND_DISCARD
}
},
{
NAME: '<NAME>',
FLAVOR: 'These cargo ships were originally designed as combat drones by Federation CEO <NAME>.',
FACTION: STAR_EMPIRE,
TYPE: SHIP,
COST: 1,
SET: CRISIS_FLEETS_FORTRESSES,
QUANTITY: 2,
ABILITIES: {
DRAW: 1
},
SCRAP_ABILITIES: {
TRADE: 1
}
},
{
NAME: '<NAME>',
FACTION: STAR_EMPIRE,
TYPE: OUTPOST,
SHIELD: 6,
COST: 7,
SET: CRISIS_FLEETS_FORTRESSES,
QUANTITY: 1,
ABILITIES: {
COMBAT: 3,
OTHER_ABILITY: 'Draw a card, then discard a card.'
},
ALLY_ABILITIES: {
OTHER_ABILITY: 'Draw a card, then discard a card.'
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: SHIP,
COST: 1,
SET: CRISIS_FLEETS_FORTRESSES,
QUANTITY: 2,
ABILITIES: {
COMBAT: 3,
TRADE_ROW_SCRAP: 2
},
SCRAP_ABILITIES: {
COMBAT: 2
}
},
{
NAME: '<NAME>',
FACTION: BLOB,
TYPE: BASE,
SHIELD: 6,
COST: 7,
SET: CRISIS_FLEETS_FORTRESSES,
QUANTITY: 1,
ABILITIES: {
COMBAT: 4,
OTHER_ABILITY: 'You may scrap a Trade Federation, Machine Cult, or Star Empire card '
'from your hand or discard pile. If you do, draw a card.'
}
},
{
NAME: '<NAME>',
FACTION: TRADE_FEDERATION,
TYPE: SHIP,
COST: 4,
SET: CRISIS_FLEETS_FORTRESSES,
QUANTITY: 2,
ABILITIES: {
OTHER_ABILITY: 'You may acquire a ship of cost four or less and put it on top of your deck.'
},
ALLY_ABILITIES: {
COMBAT: 4
},
SCRAP_ABILITIES: {
DRAW: 1
}
},
{
NAME: 'Capitol World',
FLAVOR: '"Wealth is power" - CEO <NAME>',
FACTION: TRADE_FEDERATION,
TYPE: OUTPOST,
SHIELD: 6,
COST: 8,
SET: CRISIS_FLEETS_FORTRESSES,
QUANTITY: 1,
ABILITIES: {
AUTHORITY: 6,
DRAW: 1
}
},
{
NAME: '<NAME>',
FLAVOR: 'They have as much courage as you have coin.',
FACTION: UNALIGNED,
TYPE: SHIP,
COST: 3,
SET: PROMOTIONAL,
QUANTITY: 3,
ABILITIES: {
COMBAT: 5,
OTHER_ABILITY: 'Choose a faction as you play Merc Cruiser. Merc Cruiser has that faction.'
}
},
{
NAME: '<NAME>',
FACTION: UNALIGNED,
TYPE: OUTPOST,
SHIELD: 5,
COST: 4,
SET: PROMOTIONAL,
QUANTITY: 3,
ALLY_ABILITIES: {
OTHER_ABILITY: 'Star Empire ally: 2 Combat',
OTHER_ABILITY: 'Machine Cult ally: Scrap a card from your hand or discard pile.',
OTHER_ABILITY: 'Trade Federation ally: 3 Authority',
OTHER_ABILITY: 'Blob ally: Scrap up to two cards currently in the trade row.'
}
}
]
| 1.945313 | 2 |
authlib/client/errors.py | moriyoshi/authlib | 0 | 12785930 | <filename>authlib/client/errors.py
from authlib.integrations.base_client import *
| 1.148438 | 1 |
ovpr_atp/awards/models.py | ravikumargo/awdportal | 0 | 12785931 | # Defines the data models used within the application
#
# See the Django documentation at https://docs.djangoproject.com/en/1.6/topics/db/models/
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.contrib.auth.models import User, Group
from django.contrib.admin.models import LogEntry
from django.core.urlresolvers import reverse
from django.utils.html import format_html
from django.utils import timezone
from itertools import chain
from decimal import Decimal
from datetime import datetime, date, timedelta, tzinfo
from dateutil.tz import tzutc, tzlocal
from multiselectfield import MultiSelectField
import reversion
def get_value_from_choices(choices, code_to_find):
"""Returns the value that corresponds to the given code in the list of choices.
This is used to translate a code value, as stored in the database, to its
corresponding text value from the choices tuple.
"""
return next((value for code, value in choices if code == code_to_find), '')
class FieldIteratorMixin(models.Model):
"""Returns the verbose_name and value for each non-HIDDEN_FIELD on an object"""
def _get_field(self, field):
"""Gets the specified field from the model"""
model_field = self._meta.get_field(field)
name = model_field.verbose_name
if model_field.choices:
display_method = getattr(self, 'get_' + field + '_display')
data = display_method()
else:
data = getattr(self, field)
boolean_field = isinstance(model_field, models.NullBooleanField)
return (name, data, boolean_field)
def _get_field_full(self, field):
"""Gets the specified field from the model, along with the field name"""
model_field = self._meta.get_field(field)
name = model_field.verbose_name
if model_field.choices:
display_method = getattr(self, 'get_' + field + '_display')
data = display_method()
else:
data = getattr(self, field)
boolean_field = isinstance(model_field, models.NullBooleanField)
return (name, data, boolean_field, model_field.name)
def get_model_fields(self):
"""Gets all fields from the model that aren't defined in HIDDEN_FIELDS"""
fields = [field.name for field in self._meta.fields]
fields.remove('id')
for field in self.HIDDEN_FIELDS:
fields.remove(field)
return fields
def get_table_fields(self):
"""Gets all fields from the model to display in table format
Fields defined in HIDDEN_TABLE_FIELDS are excluded.
"""
fields = self.get_model_fields()
for field in self.HIDDEN_TABLE_FIELDS:
fields.remove(field)
field_data = [self._get_field(field) for field in fields]
return field_data
def get_all_fields(self):
"""Gets all non-HIDDEN_FIELDs from the model and their data"""
fields = self.get_model_fields()
field_data = [self._get_field(field) for field in fields]
return field_data
def get_search_fields(self):
"""Gets fields necessary for searching
Fields defined in HIDDEN_SEARCH_FIELDS are excluded
"""
fields = self.get_model_fields()
for field in self.HIDDEN_SEARCH_FIELDS:
fields.remove(field)
field_data = [self._get_field_full(field) for field in fields]
if isinstance(self, Subaward) and hasattr(self, 'comments'):
field_data.append(self._get_field_full('comments'))
return field_data
def get_fieldsets(self):
"""Gets the model's fields and separates them out into the defined FIELDSETS"""
fields = self.get_model_fields()
fieldset_data = []
for fieldset in self.FIELDSETS:
fieldset_fields = []
for field in fieldset['fields']:
fieldset_fields.append(self._get_field(field))
fields.remove(field)
fieldset_data.append((fieldset['title'], fieldset_fields))
if hasattr(self, 'DISPLAY_TABLES'):
for display_table in self.DISPLAY_TABLES:
for row in display_table['rows']:
for field in row['fields']:
fields.remove(field)
fieldset_data.append(
(None, [self._get_field(field) for field in fields]))
return fieldset_data
def get_display_tables(self):
"""Gets the fields and data defined in DISPLAY_TABLES for tabular display"""
display_tables = []
for item in self.DISPLAY_TABLES:
rows = []
for row in item['rows']:
data = {'label': row['label']}
data['fields'] = [
self._get_field(field) for field in row['fields']]
rows.append(data)
display_table = {
'title': item['title'],
'columns': item['columns'],
'rows': rows}
display_tables.append(display_table)
return display_tables
def get_award_setup_report_fields(self):
"""Gets the fields needed for EAS report"""
return [self._get_field(field) for field in self.EAS_REPORT_FIELDS]
class Meta:
abstract = True
class EASUpdateMixin(object):
"""If it's expired or inactive, unset this object from any foriegn key fields"""
def save(self, *args, **kwargs):
super(EASUpdateMixin, self).save(*args, **kwargs)
expired = False
if hasattr(self, 'end_date'):
if self.end_date:
if isinstance(self.end_date, date):
expired = self.end_date < date.today()
else:
expired = self.end_date < datetime.now()
else:
expired = False
if not self.active or expired:
for related_object in self._meta.get_all_related_objects():
accessor_name = related_object.get_accessor_name()
if not hasattr(self, accessor_name):
break
related_queryset = eval('self.%s' % accessor_name)
field_name = related_object.field.name
for item in related_queryset.all():
setattr(item, field_name, None)
item.save()
class AllowedCostSchedule(EASUpdateMixin, models.Model):
"""Model for the AllowedCostSchedule data"""
EAS_FIELD_ORDER = [
'id',
'name',
'end_date',
'active'
]
id = models.BigIntegerField(primary_key=True, unique=True)
name = models.CharField(max_length=30)
end_date = models.DateField(null=True, blank=True)
active = models.BooleanField()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class AwardManager(FieldIteratorMixin, EASUpdateMixin, models.Model):
"""Model for the AwardManager data"""
EAS_FIELD_ORDER = [
'id',
'full_name',
'gwid',
'system_user',
'end_date',
'active'
]
CAYUSE_FIELDS = [
'title',
'first_name',
'middle_name',
'last_name',
'phone',
'email'
]
FIELDSETS = []
HIDDEN_FIELDS = [
'system_user',
'end_date',
'active',
'first_name',
'middle_name',
'last_name'
]
id = models.BigIntegerField(primary_key=True, unique=True)
full_name = models.CharField(max_length=240)
gwid = models.CharField(
max_length=150,
blank=True,
null=True,
verbose_name='GWID')
system_user = models.BooleanField()
end_date = models.DateField(null=True, blank=True)
active = models.BooleanField()
# Cayuse fields
title = models.CharField(max_length=64, blank=True, null=True)
first_name = models.CharField(max_length=64, blank=True)
middle_name = models.CharField(max_length=32, blank=True)
last_name = models.CharField(max_length=64, blank=True)
phone = models.CharField(max_length=32, blank=True, null=True)
email = models.CharField(max_length=64, blank=True, null=True)
def __unicode__(self):
return self.full_name
class AwardOrganization(EASUpdateMixin, models.Model):
"""Model for the AwardOrganization data"""
EAS_FIELD_ORDER = [
'id',
'name',
'organization_type',
'org_info1_meaning',
'org_info2_meaning',
'end_date',
'active'
]
id = models.BigIntegerField(primary_key=True, unique=True)
name = models.CharField(max_length=240)
organization_type = models.CharField(max_length=30, blank=True, null=True)
org_info1_meaning = models.CharField(max_length=80)
org_info2_meaning = models.CharField(max_length=80)
end_date = models.DateField(null=True, blank=True)
active = models.BooleanField()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class AwardTemplate(EASUpdateMixin, models.Model):
"""Model for the AwardTemplate data"""
EAS_FIELD_ORDER = [
'id',
'number',
'short_name',
'active'
]
id = models.BigIntegerField(primary_key=True, unique=True)
number = models.CharField(max_length=15)
short_name = models.CharField(max_length=30)
active = models.BooleanField()
def __unicode__(self):
return u'%s - %s' % (self.number, self.short_name)
class Meta:
ordering = ['number']
class CFDANumber(EASUpdateMixin, models.Model):
"""Model for the CFDANumber data"""
EAS_FIELD_ORDER = [
'flex_value',
'description',
'end_date',
'active'
]
flex_value = models.CharField(
max_length=150,
primary_key=True,
unique=True)
description = models.CharField(max_length=240)
end_date = models.DateField(null=True, blank=True)
active = models.BooleanField()
def __unicode__(self):
return u'%s - %s' % (self.flex_value, self.description)
class Meta:
ordering = ['flex_value']
class FedNegRate(EASUpdateMixin, models.Model):
"""Model for the FedNegRate data"""
EAS_FIELD_ORDER = [
'flex_value',
'description',
'end_date',
'active'
]
flex_value = models.CharField(
max_length=150,
primary_key=True,
unique=True)
description = models.CharField(max_length=240)
end_date = models.DateField(null=True, blank=True)
active = models.BooleanField()
def __unicode__(self):
return self.description
class Meta:
ordering = ['description']
class FundingSource(EASUpdateMixin, models.Model):
"""Model for the FundingSource data"""
EAS_FIELD_ORDER = [
'name',
'number',
'id',
'active',
'end_date'
]
id = models.BigIntegerField(primary_key=True, unique=True)
name = models.CharField(max_length=50)
number = models.CharField(max_length=10)
end_date = models.DateField(null=True, blank=True)
active = models.BooleanField()
def __unicode__(self):
return u'%s - %s' % (self.number, self.name)
class Meta:
ordering = ['number']
class IndirectCost(EASUpdateMixin, models.Model):
"""Model for the IndirectCost data"""
EAS_FIELD_ORDER = [
'id',
'rate_schedule',
'end_date',
'active'
]
id = models.BigIntegerField(primary_key=True, unique=True)
rate_schedule = models.CharField(max_length=30)
end_date = models.DateField(null=True, blank=True)
active = models.BooleanField()
def __unicode__(self):
return self.rate_schedule
class Meta:
ordering = ['rate_schedule']
class PrimeSponsor(EASUpdateMixin, models.Model):
"""Model for the PrimeSponsor data"""
EAS_FIELD_ORDER = [
'name',
'number',
'id',
'active',
]
id = models.BigIntegerField(primary_key=True, unique=True)
name = models.CharField(max_length=50)
number = models.IntegerField()
active = models.BooleanField()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class EASMapping(models.Model):
"""Model used to define a mapping between EAS data and the corresponding value in ATP"""
INTERFACE_CHOICES = (
('C', 'Cayuse'),
('L', 'Lotus'),
)
interface = models.CharField(
choices=INTERFACE_CHOICES,
max_length=1,
default='C')
field = models.CharField(max_length=50)
incoming_value = models.CharField(max_length=250)
atp_model = models.CharField(max_length=50)
atp_pk = models.IntegerField()
def __unicode__(self):
return u'(%s) %s=%s -> %s=%s' % (self.interface,
self.field,
self.incoming_value,
self.atp_model,
self.atp_pk)
class Meta:
unique_together = (
'interface',
'field',
'incoming_value',
'atp_model',
'atp_pk')
class EASMappingException(Exception):
"""Custom exception import processes throw when a new mapping is required"""
def __init__(self, message, interface, field, incoming_value, atp_model):
super(EASMappingException, self).__init__(self, message)
self.interface = interface
self.field = field
self.incoming_value = incoming_value
self.atp_model = atp_model
class ATPAuditTrail(models.Model):
"""It is used internally to track each point of time when an award assinged and completed from a particular stage"""
award = models.IntegerField()
modification = models.CharField(max_length=100)
workflow_step = models.CharField(max_length=100)
date_created = models.DateTimeField(blank=True, null=True)
date_completed = models.DateTimeField(blank=True, null=True)
assigned_user = models.CharField(max_length=100)
class Award(models.Model):
"""The primary model"""
WAIT_FOR = {'RB': 'Revised Budget', 'PA': 'PI Access', 'CA': 'Cost Share Approval', 'FC': 'FCOI',
'PS': 'Proposal Submission', 'SC': 'Sponsor Clarity', 'NO': 'New Org needed',
'IC': 'Internal Clarification', 'DC': 'Documents not in GW Docs'
}
# These fields aren't displayed by the FieldIteratorMixin
HIDDEN_FIELDS = [
'subaward_done',
'award_management_done',
'extracted_to_eas',
]
# Workflow statuses
STATUS_CHOICES = (
(0, 'New'),
(1, 'Award Intake'),
(2, 'Award Negotiation'),
(3, 'Award Setup'),
(4, 'Subaward & Award Management'),
(5, 'Award Closeout'),
(6, 'Complete'),
)
# A mapping for which sections are active in which statuses
STATUS_SECTION_MAPPING = [
[],
['AwardAcceptance'],
['AwardNegotiation'],
['AwardSetup', 'AwardModification'],
['Subaward', 'AwardManagement'],
['AwardCloseout'],
[]
]
# A mapping for relevant user fields, groups, URLs, and statuses for each section
SECTION_FIELD_MAPPING = {
'ProposalIntake': {
'user_field': None,
'group': 'Proposal Intake',
'edit_url': 'edit_proposal_intake',
'edit_status': 0},
'AwardAcceptance': {
'user_field': 'award_acceptance_user',
'group': 'Award Acceptance',
'edit_url': 'edit_award_acceptance',
'edit_status': 1},
'AwardNegotiation': {
'user_field': 'award_negotiation_user',
'group': 'Award Negotiation',
'edit_url': 'edit_award_negotiation',
'edit_status': 2},
'AwardSetup': {
'user_field': 'award_setup_user',
'group': 'Award Setup',
'edit_url': 'edit_award_setup',
'edit_status': 3},
'AwardModification': {
'user_field': 'award_modification_user',
'group': 'Award Modification',
'edit_url': 'edit_award_setup',
'edit_status': 3},
'Subaward': {
'user_field': 'subaward_user',
'group': 'Subaward Management',
'edit_url': 'edit_subawards',
'edit_status': 4},
'AwardManagement': {
'user_field': 'award_management_user',
'group': 'Award Management',
'edit_url': 'edit_award_management',
'edit_status': 4},
'AwardCloseout': {
'user_field': 'award_closeout_user',
'group': 'Award Closeout',
'edit_url': 'edit_award_closeout',
'edit_status': 5},
}
# Associates subsections with their parent sections (used in edit permission checks)
SECTION_PARENT_MAPPING = {
'PTANumber': 'AwardSetup',
'PriorApproval': 'AwardManagement',
'ReportSubmission': 'AwardManagement',
'FinalReport': 'AwardCloseout',
}
START_STATUS = 0
END_STATUS = 6
AWARD_SETUP_STATUS = 3
AWARD_ACCEPTANCE_STATUS = 1
status = models.IntegerField(choices=STATUS_CHOICES, default=0)
creation_date = models.DateField(auto_now_add=True)
extracted_to_eas = models.BooleanField(default=False)
# Limit assignment users to members of the appropriate group
award_acceptance_user = models.ForeignKey(
User,
related_name='+',
verbose_name='Award Intake User',
limit_choices_to=Q(
groups__name='Award Acceptance'))
award_negotiation_user = models.ForeignKey(
User,
null=True,
blank=True,
related_name='+',
verbose_name='Award Negotiation User',
limit_choices_to=Q(
groups__name='Award Negotiation'))
award_setup_user = models.ForeignKey(
User,
related_name='+',
verbose_name='Award Setup User',
limit_choices_to=Q(
groups__name='Award Setup'))
award_modification_user = models.ForeignKey(
User,
null=True,
blank=True,
related_name='+',
verbose_name='Award Modification User',
limit_choices_to=Q(
groups__name='Award Modification'))
subaward_user = models.ForeignKey(
User,
null=True,
blank=True,
related_name='+',
verbose_name='Subaward User',
limit_choices_to=Q(
groups__name='Subaward Management'))
award_management_user = models.ForeignKey(
User,
related_name='+',
verbose_name='Award Management User',
limit_choices_to=Q(
groups__name='Award Management'))
award_closeout_user = models.ForeignKey(
User,
related_name='+',
verbose_name='Award Closeout User',
limit_choices_to=Q(
groups__name='Award Closeout'))
# Because these two sections are active in the same status, we need to
# track their completion independently
subaward_done = models.BooleanField(default=False)
award_management_done = models.BooleanField(default=False)
send_to_modification = models.BooleanField(default=False)
send_to_setup = models.BooleanField(default=False)
common_modification = models.BooleanField(default=False)
award_dual_negotiation = models.BooleanField(default=False)
award_dual_setup = models.BooleanField(default=False)
award_dual_modification = models.BooleanField(default=False)
award_text = models.CharField(max_length=50, blank=True, null=True)
# If an award has a proposal, use that to determine its name. Otherwise,
# use its internal ID
def __unicode__(self):
proposal = self.get_first_real_proposal()
if proposal and proposal.get_unique_identifier() != '':
return u'Award for proposal #%s' % proposal.get_unique_identifier()
else:
return u'Award #%s' % self.id
@classmethod
def get_priority_assignments_for_award_setup_user(cls, user):
assignment_list = []
assign_filter = cls.objects.filter(
(Q(Q(award_setup_user=user) & Q(status=2) & Q(award_dual_setup=True)) | Q(Q(award_setup_user=user) & Q(status=3) & Q(award_dual_setup=True))) |
(Q(award_setup_user=user) & Q(status=3) & Q(send_to_modification=False)) |
(Q(award_modification_user=user) & Q(status=3) & Q(send_to_modification=True)) |
(Q(award_modification_user=user) & Q(status=2) & Q(award_dual_modification=True))
)
award_ids = []
temp_ids = []
award_assignments = []
for award_ in assign_filter:
award_ids.append(award_.id)
assignments_on = AwardAcceptance.objects.filter(award_id__in=award_ids, award_setup_priority='on',
current_modification=True).order_by('creation_date')
assignments_tw = AwardAcceptance.objects.filter(award_id__in=award_ids, award_setup_priority='tw',
current_modification=True).order_by('creation_date')
assignments_th = AwardAcceptance.objects.filter(award_id__in=award_ids, award_setup_priority='th',
current_modification=True).order_by('creation_date')
assignments_fo = AwardAcceptance.objects.filter(award_id__in=award_ids, award_setup_priority='fo',
current_modification=True).order_by('creation_date')
assignments_fi = AwardAcceptance.objects.filter(award_id__in=award_ids, award_setup_priority='fi',
current_modification=True).order_by('creation_date')
assignments_ni = AwardAcceptance.objects.filter(award_id__in=award_ids, award_setup_priority='ni',
current_modification=True).order_by('creation_date')
assignments_none = AwardAcceptance.objects.filter(award_id__in=award_ids, award_setup_priority='',
current_modification=True).order_by('creation_date')
assignments = list(chain(assignments_on, assignments_tw, assignments_th,
assignments_fo, assignments_fi, assignments_ni, assignments_none))
for award in assignments:
if award.award_id in award_ids:
temp_ids.append(award.award_id)
assignments = cls.objects.filter(id__in=temp_ids)
for id in temp_ids:
for award in assignments:
if award.id == id:
award_assignments.append(award)
for award in award_assignments:
active_sections = award.STATUS_SECTION_MAPPING[award.status]
for section in active_sections:
for user_group in user.groups.all():
if section == 'AwardNegotiation' and user_group.name == 'Award Setup':
section = 'AwardSetup'
if section == 'AwardNegotiation' and user_group.name == 'Award Modification':
section = 'AwardModification'
if award.get_user_for_section(section) == user:
edit_url = reverse(
award.SECTION_FIELD_MAPPING[section]['edit_url'],
kwargs={
'award_pk': award.pk})
assignment_list.append((award, edit_url))
return assignment_list
@classmethod
def get_assignments_for_user(cls, user):
"""Given a user, find all currently assigned awards"""
assignments = cls.objects.filter(
(Q(award_acceptance_user=user) & Q(status=1)) |
(Q(Q(award_negotiation_user=user) & Q(status=2)) | Q(Q(award_negotiation_user=user) & Q(status=2) & Q(award_dual_negotiation=True))) |
(Q(Q(award_setup_user=user) & Q(status=2) & Q(award_dual_setup=True)) | Q(Q(award_setup_user=user) & Q(status=3) & Q(award_dual_setup=True))) |
(Q(award_setup_user=user) & Q(status=3) & Q(send_to_modification=False)) |
(Q(award_modification_user=user) & Q(status=3) & Q(Q(send_to_modification=True))) |
(Q(award_modification_user=user) & Q(status=2) & Q(Q(award_dual_modification=True))) |
(Q(subaward_user=user) & Q(status=4)) |
(Q(award_management_user=user) & Q(status=4)) |
(Q(award_closeout_user=user) & Q(status=5))
)
assignment_list = []
for award in assignments:
active_sections = award.STATUS_SECTION_MAPPING[award.status]
for section in active_sections:
for user_group in user.groups.all():
if section == 'AwardNegotiation' and user_group.name == 'Award Setup':
section = 'AwardSetup'
if section == 'AwardNegotiation' and user_group.name == 'Award Modification':
section = 'AwardModification'
if award.get_user_for_section(section) == user:
edit_url = reverse(
award.SECTION_FIELD_MAPPING[section]['edit_url'],
kwargs={
'award_pk': award.pk})
assignment_list.append((award, edit_url))
return assignment_list
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse('award_detail', kwargs={'award_pk': self.pk})
def save(self, *args, **kwargs):
# On initial save, create a dummy proposal and blank sections
if not self.pk:
super(Award, self).save(*args, **kwargs)
Proposal.objects.create(award=self, dummy=True)
AwardAcceptance.objects.create(award=self)
AwardNegotiation.objects.create(award=self)
AwardSetup.objects.create(award=self)
AwardManagement.objects.create(award=self)
AwardCloseout.objects.create(award=self)
else:
check_status = kwargs.pop('check_status', True)
try:
old_object = Award.objects.get(pk=self.pk)
except Award.DoesNotExist:
super(Award, self).save(*args, **kwargs)
return
if any([self.award_acceptance_user != old_object.award_acceptance_user, self.award_closeout_user != old_object.award_closeout_user,
self.award_management_user != old_object.award_management_user, self.award_modification_user != old_object.award_modification_user,
self.award_negotiation_user != old_object.award_negotiation_user, self.award_setup_user != old_object.award_setup_user]):
self.send_to_setup = old_object.send_to_setup
self.send_to_modification = old_object.send_to_modification
self.common_modification = old_object.common_modification
self.award_dual_modification = old_object.award_dual_modification
self.award_dual_setup = old_object.award_dual_setup
self.award_dual_negotiation = old_object.award_dual_negotiation
super(Award, self).save(*args, **kwargs)
if check_status and old_object.status > 1 and self.status == 1 and self.get_current_award_acceptance().phs_funded:
self.send_phs_funded_notification()
def get_proposals(self):
"""Gets all Proposals associated with this Award"""
proposals = []
first_proposal = self.get_first_real_proposal()
if first_proposal:
proposals.append(first_proposal)
proposals.extend(self.get_supplemental_proposals())
return proposals
def get_first_real_proposal(self):
"""Gets the first non-dummy Proposal associated with this Award"""
try:
first_proposal = self.proposal_set.get(
is_first_proposal=True,
dummy=False)
except Proposal.DoesNotExist:
first_proposal = None
return first_proposal
def get_supplemental_proposals(self):
"""Gets all non-dummy Proposals after the first one"""
first_proposal = self.get_first_real_proposal()
supplemental_proposals = None
if first_proposal:
supplemental_proposals = self.proposal_set.filter(dummy=False).exclude(id=first_proposal.id).order_by('id')
return supplemental_proposals
def get_most_recent_proposal(self):
"""Gets the most recent Proposal"""
return self.proposal_set.filter(dummy=False).order_by('id').last()
def get_current_award_acceptance(self, acceptance_flag=False):
if acceptance_flag:
acceptance_object = self.awardacceptance_set.filter(current_modification=True)
if acceptance_object:
return acceptance_object[0]
else:
acceptance_object = AwardAcceptance()
return acceptance_object
award_acceptance = self.awardacceptance_set.filter(current_modification=True).order_by('-creation_date')
if len(award_acceptance) > 1:
for award in award_acceptance[1:]:
award.current_modification = False
award.save()
return award_acceptance[0]
else:
return self.awardacceptance_set.get(current_modification=True)
def get_previous_award_acceptances(self):
return self.awardacceptance_set.filter(current_modification=False)
def get_current_award_negotiation(self):
try:
negotiation_obj = self.awardnegotiation_set.get(current_modification=True)
except:
negotiation_obj = None
award_negotiation = self.awardnegotiation_set.filter(current_modification=True).order_by('-date_assigned')
if len(award_negotiation) > 1:
for award in award_negotiation[1:]:
award.current_modification = False
award.save()
return award_negotiation[0]
elif negotiation_obj:
return self.awardnegotiation_set.get(current_modification=True)
else:
return AwardNegotiation()
def get_previous_award_negotiations(self):
return self.awardnegotiation_set.filter(current_modification=False)
def get_first_pta_number(self):
pta_number = self.ptanumber_set.all().order_by('id')[:1]
if pta_number:
return pta_number[0]
else:
return None
def get_award_numbers(self):
"""Returns a comma-delimited string of award numbers from all PTANumbers in this Award"""
award_numbers = self.ptanumber_set.exclude(award_number='').values_list('award_number', flat=True)
return ', '.join(award_numbers)
def get_date_assigned_to_current_stage(self):
"""Returns the date this Award was moved on to its current stage"""
dates_assigned = []
for section in self.get_active_sections():
try:
if section == 'AwardAcceptance':
correct_instance = AwardAcceptance.objects.get(award=self, current_modification=True)
local_date = correct_instance.creation_date.astimezone(tzlocal())
dates_assigned.append(local_date.strftime('%m/%d/%Y'))
elif section == 'Subaward' or section == 'AwardManagement':
if Subaward.objects.filter(award=self).count() > 0:
correct_instance = Subaward.objects.filter(award=self).latest('creation_date')
local_date = correct_instance.creation_date.astimezone(tzlocal())
dates_assigned.append(local_date.strftime('%m/%d/%Y'))
else:
correct_instance = AwardManagement.objects.get(award=self)
local_date = correct_instance.date_assigned.astimezone(tzlocal())
dates_assigned.append(local_date.strftime('%m/%d/%Y'))
else:
if section == 'AwardNegotiation':
correct_instance = AwardNegotiation.objects.get(award=self, current_modification=True)
elif section == 'AwardSetup':
correct_instance = AwardSetup.objects.get(award=self)
elif section == 'AwardCloseout':
correct_instance = AwardCloseout.objects.get(award=self)
if correct_instance.date_assigned:
local_date = correct_instance.date_assigned.astimezone(tzlocal())
dates_assigned.append(local_date.strftime('%m/%d/%Y'))
except:
pass
dates_assigned = list(set(dates_assigned))
if len(dates_assigned) > 0:
return ', '.join(dates_assigned)
else:
return ''
def get_user_for_section(self, section, modification_flag=False):
"""Uses the SECTION_PARENT_MAPPING to determine the user assigned to the given section"""
if section == 'AwardSetup' and self.award_dual_modification:
section = 'AwardModification'
if modification_flag:
section = 'AwardModification'
if section in self.SECTION_PARENT_MAPPING:
section = self.SECTION_PARENT_MAPPING[section]
try:
return getattr(
self,
self.SECTION_FIELD_MAPPING[section]['user_field'])
except TypeError:
return None
def get_current_award_status_for_display(self):
return 'Award Negotiation and Setup'
def get_award_setup_modification_status(self):
if self.status == 2:
return True
else:
return False
def get_active_sections(self, dual_mode=False):
"""Gets the names of the currently active sections"""
if self.status == self.AWARD_SETUP_STATUS:
active_sections = ['AwardSetup']
elif dual_mode:
active_sections = ['AwardNegotiation', 'AwardSetup']
else:
active_sections = self.STATUS_SECTION_MAPPING[self.status]
return active_sections
def get_users_for_dual_active_sections(self):
active_users = []
for section in ['AwardNegotiation', 'AwardSetup']:
user = self.get_user_for_section(section)
if user:
active_users.append(user)
return active_users
def get_users_for_negotiation_and_moidification_sections(self):
active_users = []
for section in ['AwardNegotiation', 'AwardModification']:
user = self.get_user_for_section(section)
if user:
active_users.append(user)
return active_users
def get_users_for_active_sections(self, section_flag=False):
"""Gets the users assigned to the currently active sections"""
active_users = []
if self.status == 3 and self.send_to_modification:
user_section = "AwardModification"
user = self.get_user_for_section(user_section)
if user:
active_users.append(user)
return active_users
for section in self.get_active_sections():
user = self.get_user_for_section(section)
if user:
active_users.append(user)
return active_users
def get_current_active_users(self):
"""Returns a comma-delimited list of users assigned to the currently active sections"""
if self.award_dual_setup and self.award_dual_negotiation and self.status == 2:
users = self.get_users_for_dual_active_sections()
elif self.award_dual_modification and self.status == 2:
users = self.get_users_for_negotiation_and_moidification_sections()
else:
users = self.get_users_for_active_sections()
names = []
for user in users:
names.append(user.get_full_name())
return ', '.join(names)
def get_award_priority_number(self):
award_accept = self.awardacceptance_set.get(award_id=self.id, current_modification=True)
if award_accept.award_setup_priority:
return AwardAcceptance.PRIORITY_STATUS_DICT[award_accept.award_setup_priority]
else:
return ''
def get_edit_status_for_section(self, section, setup_flow_flag=False):
"""Gets the edit_status for the given section"""
if setup_flow_flag:
return self.SECTION_FIELD_MAPPING['AwardNegotiation']['edit_status']
if section in self.SECTION_PARENT_MAPPING:
section = self.SECTION_PARENT_MAPPING[section]
return self.SECTION_FIELD_MAPPING[section]['edit_status']
def get_editable_sections(self):
"""Returns a list of editable sections.
A section is editable if the Award's status is at or beyond that section
"""
if self.award_dual_negotiation and self.award_dual_setup:
editable_sections = [section for section in self.SECTION_FIELD_MAPPING.keys(
) if self.SECTION_FIELD_MAPPING[section]['edit_status'] <= self.status + 1]
else:
editable_sections = [section for section in self.SECTION_FIELD_MAPPING.keys(
) if self.SECTION_FIELD_MAPPING[section]['edit_status'] <= self.status]
return editable_sections
def send_email_update_if_subaward_user(self):
"""Sends an email update to subaward user if the award send to award setup"""
recipients = [self.get_user_for_section('Subaward').email]
pi_name = ''
most_recent_proposal = self.get_most_recent_proposal()
if most_recent_proposal:
pi_name = ' (PI: {0})'.format(most_recent_proposal.principal_investigator)
send_mail(
'OVPR ATP Update',
'Award for proposal #%s%s has been assigned to Award Setup in ATP. Go to %s%s to review it.' %
(self.id,
pi_name,
settings.EMAIL_URL_HOSTNAME,
self.get_absolute_url()),
'reply<EMAIL>',
recipients,
fail_silently=False)
def send_email_update(self, modification_flag=False):
"""Sends an email update to a user when they've been assigned an active section"""
if self.status == 1:
origional_text = 'Original Award'
workflow = 'AwardAcceptance'
acceptance_count = AwardAcceptance.objects.filter(award=self).count()
if acceptance_count < 2:
self.record_current_state_to_atptrail(origional_text, workflow)
else:
modification = "Modification #%s" % (acceptance_count - 1)
self.record_current_state_to_atptrail(modification, workflow)
if modification_flag:
recipients = [self.get_user_for_section('AwardSetup', modification_flag).email]
else:
if self.award_dual_negotiation and self.award_dual_setup:
recipients = [user.email for user in self.get_users_for_dual_active_sections()]
elif self.award_dual_modification:
recipients = [user.email for user in self.get_users_for_negotiation_and_moidification_sections()]
else:
recipients = [user.email for user in self.get_users_for_active_sections()]
pi_name = ''
most_recent_proposal = self.get_most_recent_proposal()
if most_recent_proposal:
pi_name = ' (PI: {0})'.format(most_recent_proposal.principal_investigator)
send_mail(
'OVPR ATP Update',
'%s%s has been assigned to you in ATP. Go to %s%s to review it.' %
(self,
pi_name,
settings.EMAIL_URL_HOSTNAME,
self.get_absolute_url()),
'<EMAIL>',
recipients,
fail_silently=False)
def send_award_setup_notification(self):
"""Sends an email to the AwardAcceptance user to let them know the award is in Award Setup"""
recipients = [self.get_user_for_section('AwardAcceptance').email]
send_mail(
'OVPR ATP Update',
'%s has been sent to the Award Setup step. This email is simply a notification \
- you are not assigned to perform Award Setup for this award. \
You can view it here: %s%s' %
(self,
settings.EMAIL_URL_HOSTNAME,
self.get_absolute_url()),
'<EMAIL>',
recipients,
fail_silently=False)
def send_fcoi_cleared_notification(self, fcoi_cleared_date):
"""Sends an email to the AwardSetup user when the Award's fcoi_cleared_date is set"""
recipients = [self.get_user_for_section('AwardSetup').email]
send_mail('OVPR ATP Update',
'The FCOI cleared date has been entered on %s - it is %s. \
You can view it here: %s%s' % (self, fcoi_cleared_date, settings.EMAIL_URL_HOSTNAME, self.get_absolute_url()),
'<EMAIL>',
recipients, fail_silently=False)
def send_phs_funded_notification(self):
"""Sends an email to the PHS_FUNDED_RECIPIENTS when the Award has been marked as PHS funded"""
recipients = settings.PHS_FUNDED_RECIPIENTS
send_mail('OVPR ATP Update',
'PHS funded for %s has been received and requires FCOI verification. \
Please go to %s%s to review it.' % (self, settings.EMAIL_URL_HOSTNAME, self.get_absolute_url()),
'<EMAIL>',
recipients, fail_silently=False)
def send_phs_funded_notification_with_modification(self):
"""Sends an email to the PHS_FUNDED_RECIPIENTS when and Award Modification is created
and it's marked as PHS funded
"""
recipients = settings.PHS_FUNDED_RECIPIENTS
send_mail('OVPR ATP Update',
'PHS funded for %s (Modification) has been received and may require FCOI verification. \
Please go to %s%s to review it.' % (self, settings.EMAIL_URL_HOSTNAME, self.get_absolute_url()),
'<EMAIL>',
recipients, fail_silently=False)
def set_date_assigned_for_active_sections(self):
"""Sets the date_assigned, if appliccable, for the currently active section(s)"""
for section in self.get_active_sections():
if section in self.SECTION_FIELD_MAPPING:
current_mod = Q()
if section in ['AwardNegotiation', 'AwardAcceptance']:
current_mod = Q(current_modification=True)
for instance in eval(section).objects.filter(current_mod, award=self):
try:
instance.set_date_assigned()
except AttributeError:
pass
def record_wait_for_reason(self, workflow_old, workflow_new, model_name):
WAIT_FOR = {'RB': 'Revised Budget', 'PA': 'PI Access', 'CA': 'Cost Share Approval', 'FC': 'FCOI',
'PS': 'Proposal Submission', 'SC': 'Sponsor Clarity', 'NO': 'New Org needed',
'IC': 'Internal Clarification', 'DC': 'Documents not in GW Docs'
}
count_value = AwardAcceptance.objects.filter(award=self).count()
if count_value < 2:
origional_text = 'Original Award'
else:
origional_text = "Modification #%s" % (count_value - 1)
user_name = self.get_user_full_name(model_name)
if workflow_new:
try:
trail_object = ATPAuditTrail.objects.get(award=self.id, modification=origional_text,
workflow_step=WAIT_FOR[workflow_new], assigned_user=user_name)
except:
trail_object = None
if trail_object:
trail_object.date_completed = datetime.now()
else:
trail_object = ATPAuditTrail(award=self.id, modification=origional_text, workflow_step=WAIT_FOR[workflow_new],
date_created=datetime.now(), assigned_user=user_name)
trail_object.save()
if workflow_old:
try:
trail_object = ATPAuditTrail.objects.get(award=self.id, modification=origional_text,
workflow_step=WAIT_FOR[workflow_old], assigned_user=user_name)
except:
trail_object = None
if trail_object:
trail_object.date_completed = datetime.now()
trail_object.save()
elif 'Modification' in origional_text:
pass
else:
trail_object = ATPAuditTrail(award=self.id, modification=origional_text, workflow_step=WAIT_FOR[workflow_old],
date_created=datetime.now(), assigned_user=user_name)
trail_object.save()
def record_current_state_to_atptrail(self, modification, workflow):
user_name = self.get_user_full_name(workflow)
try:
trail_object = ATPAuditTrail.objects.get(award=self.id, modification=modification, workflow_step=workflow,
assigned_user=user_name)
except:
trail_object = None
if trail_object:
trail_object.date_completed = datetime.now()
else:
trail_object = ATPAuditTrail(award=self.id, modification=modification, workflow_step=workflow,
date_created=datetime.now(), assigned_user=user_name)
trail_object.save()
def get_user_full_name(self, section):
user = self.get_user_for_section(section)
if user:
return user.first_name + ' ' + user.last_name
else:
return None
def update_completion_date_in_atp_award(self):
origional_text = 'Original Award'
acceptance_workflow = 'AwardAcceptance'
negotiation_workflow = 'AwardNegotiation'
setup_workflow = 'AwardSetup'
modification_workflow = 'AwardModification'
subaward_workflow = 'Subaward'
management_workflow = 'AwardManagement'
closeout_workflow = 'AwardCloseout'
count_value = AwardAcceptance.objects.filter(award=self).count()
modification = "Modification #%s" % (count_value - 1)
if all([self.status == 2, self.award_dual_modification]):
acceptance_object = self.get_current_award_acceptance()
acceptance_object.acceptance_completion_date = timezone.localtime(timezone.now())
acceptance_object.save()
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, acceptance_workflow)
self.record_current_state_to_atptrail(origional_text, negotiation_workflow)
else:
self.record_current_state_to_atptrail(modification, acceptance_workflow)
self.record_current_state_to_atptrail(modification, negotiation_workflow)
self.record_current_state_to_atptrail(modification, modification_workflow)
elif all([self.status == 2, self.award_dual_setup, self.award_dual_negotiation]):
acceptance_object = self.get_current_award_acceptance()
acceptance_object.acceptance_completion_date = timezone.localtime(timezone.now())
acceptance_object.save()
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, acceptance_workflow)
self.record_current_state_to_atptrail(origional_text, negotiation_workflow)
self.record_current_state_to_atptrail(origional_text, setup_workflow)
else:
self.record_current_state_to_atptrail(modification, acceptance_workflow)
self.record_current_state_to_atptrail(modification, negotiation_workflow)
self.record_current_state_to_atptrail(modification, setup_workflow)
elif self.status == 2:
acceptance_object = self.get_current_award_acceptance()
acceptance_object.acceptance_completion_date = timezone.localtime(timezone.now())
acceptance_object.save()
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, acceptance_workflow)
self.record_current_state_to_atptrail(origional_text, negotiation_workflow)
else:
self.record_current_state_to_atptrail(modification, acceptance_workflow)
self.record_current_state_to_atptrail(modification, negotiation_workflow)
elif self.status == 3:
negotiation_user = self.get_user_for_section(negotiation_workflow)
if negotiation_user:
negotiation_object = self.get_current_award_negotiation()
negotiation_object.negotiation_completion_date = timezone.localtime(timezone.now())
negotiation_object.save()
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, negotiation_workflow)
else:
self.record_current_state_to_atptrail(modification, negotiation_workflow)
else:
acceptance_object = self.get_current_award_acceptance()
acceptance_object.acceptance_completion_date = timezone.localtime(timezone.now())
acceptance_object.save()
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, acceptance_workflow)
else:
self.record_current_state_to_atptrail(modification, acceptance_workflow)
if all([not self.award_dual_modification, not self.send_to_modification, not self.award_dual_setup]):
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, setup_workflow)
else:
self.record_current_state_to_atptrail(modification, setup_workflow)
elif self.send_to_modification and not self.send_to_setup:
self.record_current_state_to_atptrail(modification, modification_workflow)
elif self.status == 4:
if all([not self.award_dual_modification, not self.send_to_modification, not self.award_dual_setup]):
setup_object = AwardSetup.objects.get(award=self)
if setup_object.setup_completion_date and count_value == 1:
pass
else:
setup_object.setup_completion_date = timezone.localtime(timezone.now())
setup_object.save()
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, setup_workflow)
else:
self.record_current_state_to_atptrail(modification, setup_workflow)
elif all([not self.send_to_modification, self.award_dual_setup, self.award_dual_negotiation]):
pass
elif all([self.award_dual_modification, self.common_modification]):
pass
elif self.award_dual_modification or self.send_to_modification:
modification_object = AwardModification.objects.all().filter(award=self, is_edited=True).order_by('-id')
if modification_object:
modification_obj = modification_object[0]
modification_obj.modification_completion_date = timezone.localtime(timezone.now())
modification_obj.save()
self.record_current_state_to_atptrail(modification, modification_workflow)
if self.subaward_user:
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, subaward_workflow)
else:
self.record_current_state_to_atptrail(modification, subaward_workflow)
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, management_workflow)
else:
self.record_current_state_to_atptrail(modification, management_workflow)
elif self.status == 5:
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, closeout_workflow)
else:
self.record_current_state_to_atptrail(modification, closeout_workflow)
elif self.status == 6:
closeout = AwardCloseout.objects.get(award=self)
closeout.closeout_completion_date = timezone.localtime(timezone.now())
closeout.save()
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, closeout_workflow)
else:
self.record_current_state_to_atptrail(modification, closeout_workflow)
def move_to_next_step(self, section=None):
"""Moves this Award to the next step in the process"""
# A while loop because we want to advance the status until we find the next
# section with an assigned user
while True:
# We have to do extra work to make sure both Subawards and Award Management
# are complete before we move to the next status
if section in ['Subaward', 'AwardManagement']:
origional_text = 'Original Award'
subaward_workflow = 'Subaward'
management_workflow = 'AwardManagement'
count_value = AwardAcceptance.objects.filter(award=self).count()
modification = "Modification #%s" % (count_value - 1)
if section == 'Subaward' or self.get_user_for_section(
'Subaward') is None:
self.subaward_done = True
if self.subaward_user:
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, subaward_workflow)
else:
self.record_current_state_to_atptrail(modification, subaward_workflow)
try:
correct_instance = Subaward.objects.filter(award=self).latest('creation_date')
if correct_instance:
correct_instance.subaward_completion_date = timezone.localtime(timezone.now())
correct_instance.save()
except:
pass
if section == 'AwardManagement' or self.get_user_for_section(
'AwardManagement') is None:
self.award_management_done = True
if count_value < 2:
self.record_current_state_to_atptrail(origional_text, management_workflow)
else:
self.record_current_state_to_atptrail(modification, management_workflow)
management_object = AwardManagement.objects.get(award=self)
management_object.management_completion_date = timezone.localtime(timezone.now())
management_object.save()
if not (self.subaward_done and self.award_management_done):
self.save()
return False
if self.status == 2 and self.award_dual_negotiation:
self.award_dual_negotiation = False
self.save()
if self.status == 3 and self.award_dual_setup:
self.award_dual_setup = False
self.save()
if self.status == 4 and self.award_dual_modification:
self.award_dual_modification = False
self.save()
if self.status == 2 and self.send_to_modification:
modification_object = AwardModification.objects.all().filter(award=self, is_edited=False).order_by('-id')
if modification_object:
section_object = modification_object[0]
section_object.date_assigned = timezone.localtime(timezone.now())
section_object.save()
self.status += 1
if self.status == self.END_STATUS:
self.save()
break
elif not all(user is None for user in self.get_users_for_active_sections()):
self.set_date_assigned_for_active_sections()
self.save()
break
if self.status not in (self.START_STATUS, self.END_STATUS) and not self.award_dual_setup:
self.send_email_update()
# Send an additional notification when we reach Award Setup
if self.status == 3:
self.awardsetup.copy_from_proposal(self.get_most_recent_proposal())
self.send_award_setup_notification()
if all([self.status == 3, self.subaward_user, not self.send_to_modification, not self.award_dual_setup]):
self.send_email_update_if_subaward_user()
self.update_completion_date_in_atp_award()
return True
def move_award_to_multiple_steps(self, dual_mode):
""" Move award to multiple steps so that multiple teams can work parallel """
if self.award_negotiation_user:
self.status += 1
else:
if self.status == 1:
self.status += 2
try:
setup_obj = AwardSetup.objects.get(award=self)
except AwardSetup.DoesNotExist:
setup_obj = None
if setup_obj:
setup_obj.date_assigned = timezone.localtime(timezone.now())
setup_obj.save()
if dual_mode:
try:
setup_object = AwardSetup.objects.get(award=self)
except AwardSetup.DoesNotExist:
setup_object = None
try:
negotiation_object = AwardNegotiation.objects.get(award=self, current_modification=True)
except AwardNegotiation.DoesNotExist:
negotiation_object = None
if negotiation_object:
negotiation_object.date_assigned = timezone.localtime(timezone.now())
negotiation_object.save()
if setup_object:
setup_object.date_assigned = timezone.localtime(timezone.now())
setup_object.save()
self.award_dual_negotiation = True
self.award_dual_setup = True
self.save()
if self.status not in (self.START_STATUS, self.END_STATUS):
self.send_email_update()
if all([self.status == 2, self.subaward_user, self.award_dual_setup]):
self.send_email_update_if_subaward_user()
self.update_completion_date_in_atp_award()
return True
def move_award_to_negotiation_and_modification(self, dual_modification):
""" Move award to award negotiation and modification steps so that these two teams can work parallel """
if self.award_negotiation_user:
self.status += 1
try:
negotiation_object = AwardNegotiation.objects.get(award=self, current_modification=True)
except AwardNegotiation.DoesNotExists:
negotiation_object = None
if negotiation_object:
if not negotiation_object.date_assigned:
negotiation_object.date_assigned = timezone.localtime(timezone.now())
negotiation_object.save()
else:
if self.status == 1:
self.status += 2
try:
setup_obj = AwardSetup.objects.get(award=self)
except AwardSetup.DoesNotExist:
setup_obj = None
if setup_obj:
setup_obj.date_assigned = timezone.localtime(timezone.now())
setup_obj.save()
modification_object = AwardModification.objects.all().filter(award=self).order_by('-id')
if modification_object:
section_object = modification_object[0]
section_object.date_assigned = timezone.localtime(timezone.now())
section_object.save()
if dual_modification:
self.common_modification = True
self.award_dual_modification = True
self.save()
if self.status not in (self.START_STATUS, self.END_STATUS):
self.send_email_update()
self.update_completion_date_in_atp_award()
return True
def move_setup_or_modification_step(self, modification_flag=False, setup_flag=False):
if self.award_negotiation_user:
self.status += 1
try:
negotiation_object = AwardNegotiation.objects.get(award=self, current_modification=True)
except AwardNegotiation.DoesNotExists:
negotiation_object = None
if negotiation_object:
if not negotiation_object.date_assigned:
negotiation_object.date_assigned = timezone.localtime(timezone.now())
negotiation_object.save()
else:
if self.status == 1:
self.status += 2
try:
setup_obj = AwardSetup.objects.get(award=self)
except AwardSetup.DoesNotExist:
setup_obj = None
if setup_obj:
setup_obj.date_assigned = timezone.localtime(timezone.now())
setup_obj.save()
if modification_flag:
self.send_to_modification = True
self.save()
if setup_flag:
self.send_email_update()
if self.status == self.AWARD_SETUP_STATUS and modification_flag:
self.send_email_update()
# Send an additional notification when we reach Award Setup
if self.status == 3:
self.awardsetup.copy_from_proposal(self.get_most_recent_proposal())
if modification_flag:
try:
modification = AwardModification.objects.get(award_id=self.id, is_edited=False)
except AwardModification.DoesNotExist:
modification = None
if modification:
modification.is_edited = True,
modification.save()
award_setup_object = AwardSetup.objects.filter(award=self).values()
for setup in award_setup_object:
del(setup['id'], setup['is_edited'], setup['setup_completion_date'], setup['wait_for_reson'])
award_modification_object = AwardModification.objects.create(**setup)
self.send_to_modification = True
award_modification_object.save()
self.save()
self.update_completion_date_in_atp_award()
return True
# Django admin helper methods
def get_section_admin_link(self, section):
"""Gets the link to the Django Admin site for the given section"""
return format_html(
'<a href="{0}">{1}</a>',
reverse(
'admin:awards_%s_change' %
section.__class__.__name__.lower(),
args=(
section.id,
)),
section)
def get_foreignkey_admin_link(self, section_class):
"""Gets the link to the Django Admin site for the given section that has a
foreign key to this Award
"""
section_objects = section_class.objects.filter(award=self)
if len(section_objects) == 0:
return '(None)'
elif len(section_objects) == 1:
return self.get_section_admin_link(section_objects[0])
else:
return format_html(
'<a href="{0}?award__id__exact={1}">{2}s</a>',
reverse(
'admin:awards_%s_changelist' %
section_class.__name__.lower()),
self.id,
section_class._meta.verbose_name.capitalize())
# The following methods are referenced in the list_display section of the AwardAdmin class.
# They return the Django Admin links to their respective sections
def proposalintake_admin(self):
return self.get_section_admin_link(self.proposalintake)
def proposal_admin(self):
return format_html('<a href="{0}?award__id__exact={1}">{2}</a>',
reverse('admin:awards_proposal_changelist'),
self.id,
'Proposals')
def awardacceptance_admin(self):
return self.get_foreignkey_admin_link(AwardAcceptance)
def awardnegotiation_admin(self):
return self.get_foreignkey_admin_link(AwardNegotiation)
def awardsetup_admin(self):
return self.get_section_admin_link(self.awardsetup)
def subaward_admin(self):
return self.get_foreignkey_admin_link(Subaward)
def awardmanagement_admin(self):
return self.get_section_admin_link(self.awardmanagement)
def awardcloseout_admin(self):
return self.get_section_admin_link(self.awardcloseout)
class AwardSection(FieldIteratorMixin, models.Model):
"""Abstract base class for all award sections"""
HIDDEN_FIELDS = ['award', 'comments', 'is_edited']
HIDDEN_SEARCH_FIELDS = []
FIELDSETS = []
comments = models.TextField(blank=True, verbose_name='Comments')
is_edited = models.BooleanField(default=False)
class Meta:
abstract = True
def get_class_name(self):
"""Gets the Python class name"""
return self.__class__.__name__
def get_verbose_class_name(self):
return self._meta.verbose_name
def get_most_recent_revision(self):
latest_revision = reversion.get_for_object(self)
if latest_revision:
latest_revision = latest_revision[0].revision
user = latest_revision.user.get_full_name()
else:
user = 'ATP'
if latest_revision:
return (user, latest_revision.date_created)
else:
return (user, None)
class AssignableAwardSection(AwardSection):
"""Base model class for an Award section that can be assigned to a user"""
date_assigned = models.DateTimeField(blank=True, null=True, verbose_name='Date Assigned')
class Meta:
abstract = True
def set_date_assigned(self):
self.date_assigned = datetime.now()
self.save()
class ProposalIntake(AwardSection):
"""Model for the ProposalIntake data"""
user_list = User.objects.filter(is_active=True).order_by('first_name')
users = [(user.first_name + ' ' + user.last_name, user.first_name + ' ' + user.last_name) for user in user_list]
PROPOSAL_STATUS_CHOICES = (
('NS', 'Cancelled - not submitted'),
('PE', 'Planned'),
('RO', 'Routing'),
('SB', 'Submitted'),
)
PROPOSAL_OUTCOME_CHOICES = (
('AW', 'Awarded'),
('UN', 'Unfunded'),
)
SPA1_CHOICES = (
('', ''),
)
SPA1_CHOICES = tuple(users) if users else SPA1_CHOICES
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'principal_investigator',
'agency',
'prime_sponsor',
'program_announcement',
'announcement_link',
'proposal_due_to_sponsor',
'proposal_due_to_ovpr',
'proposal_due_to_aor',
'school',
'phs_funded',
'fcoi_submitted',
'date_received',
'proposal_status',
'proposal_outcome',
'proposal_number',
'five_days_requested',
'five_days_granted',
'jit_request',
'jit_response_submitted',
'creation_date']
minimum_fields = (
)
award = models.OneToOneField(Award, null=True, blank=True)
creation_date = models.DateTimeField(auto_now_add=True, blank=True, null=True, verbose_name='Date Created')
principal_investigator = models.ForeignKey(
AwardManager,
blank=True,
null=True,
limit_choices_to={
'active': True},
verbose_name='Principal Investigator')
agency = models.CharField(max_length=255, blank=True)
prime_sponsor = models.CharField(
max_length=255,
blank=True,
verbose_name='Prime (if GW is subawardee)')
program_announcement = models.CharField(
max_length=50,
blank=True,
verbose_name='Program announcement number')
announcement_link = models.CharField(max_length=250, blank=True)
proposal_due_to_sponsor = models.DateField(null=True, blank=True)
proposal_due_to_ovpr = models.DateField(
null=True,
blank=True,
verbose_name='Proposal due to OVPR')
proposal_due_to_aor = models.DateField(
null=True,
blank=True,
verbose_name='Proposal due to AOR')
spa1 = models.CharField(blank=False, verbose_name='SPA I*', max_length=150, choices=SPA1_CHOICES, null=True)
school = models.CharField(max_length=150, blank=True)
department = models.ForeignKey(
AwardOrganization,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Department')
phs_funded = models.NullBooleanField(verbose_name='PHS funded?')
fcoi_submitted = models.NullBooleanField(
verbose_name='FCOI disclosure submitted for each investigator?')
date_received = models.DateField(
null=True,
blank=True,
verbose_name='Date received by SPA I')
proposal_status = models.CharField(
choices=PROPOSAL_STATUS_CHOICES,
max_length=2,
blank=True)
proposal_outcome = models.CharField(
choices=PROPOSAL_OUTCOME_CHOICES,
max_length=2,
blank=True)
proposal_number = models.CharField(max_length=15, blank=True, verbose_name="Cayuse Proposal Number")
five_days_requested = models.DateField(
null=True,
blank=True,
verbose_name='Date 5 days waiver requested')
five_days_granted = models.DateField(
null=True,
blank=True,
verbose_name='Date 5 days waiver granted')
jit_request = models.NullBooleanField(verbose_name='JIT request?')
jit_response_submitted = models.DateField(
null=True,
blank=True,
verbose_name='JIT response submitted?')
five_days_waiver_request = models.NullBooleanField(
null=True,
blank=True,
verbose_name="5 day waiver granted?")
def __unicode__(self):
return u'Proposal Intake %s' % (self.id)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
if self.award:
return reverse(
'edit_proposal_intake',
kwargs={
'award_pk': self.award.pk})
else:
return reverse(
'edit_standalone_proposal_intake',
kwargs={
'proposalintake_pk': self.id})
def get_proposal_status(self):
"""Gets the human-readable value of the Proposal's status"""
return get_value_from_choices(self.PROPOSAL_STATUS_CHOICES, self.proposal_status)
def get_proposal_outcome(self):
return get_value_from_choices(self.PROPOSAL_OUTCOME_CHOICES, self.proposal_outcome)
class Proposal(AwardSection):
"""Model for the Proposal data"""
# HIDDEN_FIELDS aren't rendered by FieldIteratorMixin
HIDDEN_FIELDS = AwardSection.HIDDEN_FIELDS + [
'dummy',
'is_first_proposal',
'lotus_id',
'lotus_agency_name',
'lotus_department_code',
'employee_id',
'proposal_id']
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'creation_date',
'sponsor_deadline',
'is_subcontract',
'federal_identifier',
'is_change_in_grantee_inst',
'responsible_entity',
'departmental_id_primary',
'departmental_id_secondary',
'departmental_name_primary',
'departmental_name_secondary',
'are_vertebrate_animals_used',
'is_iacuc_review_pending',
'iacuc_protocol_number',
'iacuc_approval_date',
'are_human_subjects_used',
'is_irb_review_pending',
'irb_protocol_number',
'irb_review_date',
'budget_first_per_start_date',
'budget_first_per_end_date',
'cost_shr_mand_is_committed',
'cost_shr_mand_source',
'cost_shr_vol_is_committed',
'cost_shr_vol_source',
'tracking_number',
'total_costs_y1',
'total_costs_y2',
'total_costs_y3',
'total_costs_y4',
'total_costs_y5',
'total_costs_y6',
'total_costs_y7',
'total_costs_y8',
'total_costs_y9',
'total_costs_y10',
'total_direct_costs_y1',
'total_direct_costs_y2',
'total_direct_costs_y3',
'total_direct_costs_y4',
'total_direct_costs_y5',
'total_direct_costs_y6',
'total_direct_costs_y7',
'total_direct_costs_y8',
'total_direct_costs_y9',
'total_direct_costs_y10',
'total_indirect_costs_y1',
'total_indirect_costs_y2',
'total_indirect_costs_y3',
'total_indirect_costs_y4',
'total_indirect_costs_y5',
'total_indirect_costs_y6',
'total_indirect_costs_y7',
'total_indirect_costs_y8',
'total_indirect_costs_y9',
'total_indirect_costs_y10']
# Fieldsets are grouped together at the top of the section under the title
FIELDSETS = [{'title': 'Proposal Summary',
'fields': ('creation_date',
'proposal_number',
'proposal_title',
'proposal_type',
'principal_investigator',
'project_title',
'department_name',
'division_name',
'agency_name',
'is_subcontract',
'who_is_prime',
'tracking_number',
'project_start_date',
'project_end_date',
'submission_date',
'sponsor_deadline'
)},
{'title': 'Project Data',
'fields': ('agency_type',
'application_type_code',
'federal_identifier',
'is_change_in_grantee_inst',
'project_type'
)},
{'title': 'Project Administration',
'fields': ('responsible_entity',
'departmental_id_primary',
'departmental_id_secondary',
'departmental_name_primary',
'departmental_name_secondary'
)},
{'title': 'Compliance: Animal Subjects',
'fields': ('are_vertebrate_animals_used',
'is_iacuc_review_pending',
'iacuc_protocol_number',
'iacuc_approval_date'
)},
{'title': 'Compliance: Human Subjects',
'fields': ('are_human_subjects_used',
'is_irb_review_pending',
'irb_protocol_number',
'irb_review_date'
)},
{'title': 'Compliance: Lab Safety',
'fields': ('is_haz_mat',
)},
{'title': 'Compliance: Export Controls',
'fields': ('will_involve_foreign_nationals',
'will_involve_shipment',
'will_involve_foreign_contract'
)},
{'title': 'Budget Data',
'fields': ('budget_first_per_start_date',
'budget_first_per_end_date',
'cost_shr_mand_is_committed',
'cost_shr_mand_amount',
'cost_shr_mand_source',
'cost_shr_vol_is_committed',
'cost_shr_vol_amount',
'cost_shr_vol_source'
)}
]
# Display tables are displayed at the end of a section in an HTML table
DISPLAY_TABLES = [
{
'title': 'Budgeted Costs', 'columns': (
'Direct Costs', 'Indirect Costs', 'Total Costs'), 'rows': [
{
'label': 'Total', 'fields': (
'total_direct_costs', 'total_indirect_costs', 'total_costs')}, {
'label': 'Y1', 'fields': (
'total_direct_costs_y1', 'total_indirect_costs_y1', 'total_costs_y1')}, {
'label': 'Y2', 'fields': (
'total_direct_costs_y2', 'total_indirect_costs_y2', 'total_costs_y2')}, {
'label': 'Y3', 'fields': (
'total_direct_costs_y3', 'total_indirect_costs_y3', 'total_costs_y3')}, {
'label': 'Y4', 'fields': (
'total_direct_costs_y4', 'total_indirect_costs_y4', 'total_costs_y4')}, {
'label': 'Y5', 'fields': (
'total_direct_costs_y5', 'total_indirect_costs_y5', 'total_costs_y5')}, {
'label': 'Y6', 'fields': (
'total_direct_costs_y6', 'total_indirect_costs_y6', 'total_costs_y6')}, {
'label': 'Y7', 'fields': (
'total_direct_costs_y7', 'total_indirect_costs_y7', 'total_costs_y7')}, {
'label': 'Y8', 'fields': (
'total_direct_costs_y8', 'total_indirect_costs_y8', 'total_costs_y8')}, {
'label': 'Y9', 'fields': (
'total_direct_costs_y9', 'total_indirect_costs_y9', 'total_costs_y9')}, {
'label': 'Y10', 'fields': (
'total_direct_costs_y10', 'total_indirect_costs_y10', 'total_costs_y10')}, ]
}
]
# Entries here appear on the EAS Award Setup report screen
EAS_REPORT_FIELDS = [
'proposal_id',
'project_title',
'department_name',
'is_subcontract',
'who_is_prime',
'agency_name',
]
# A small mapping to help figure out which field data to use when conforming
# Lotus Notes legacy data to EAS data when importing a proposal from Lotus
LOTUS_FK_LOOKUPS = {
'lotus_agency_name': 'agency_name',
'lotus_department_code': 'department_name',
'employee_id': 'principal_investigator'
}
award = models.ForeignKey(
Award,
null=True,
blank=True,
on_delete=models.SET_NULL)
dummy = models.BooleanField(default=False)
is_first_proposal = models.BooleanField(default=False)
creation_date = models.DateTimeField(auto_now_add=True, blank=True, null=True, verbose_name='Date Created')
lotus_id = models.CharField(max_length=20, blank=True)
employee_id = models.CharField(
max_length=40,
blank=True,
verbose_name='Employee ID')
proposal_id = models.BigIntegerField(
unique=True,
null=True,
blank=True,
verbose_name='Proposal ID')
proposal_number = models.CharField(
max_length=50,
null=True,
blank=True,
verbose_name='Proposal Number')
proposal_title = models.CharField(
max_length=256,
blank=True,
verbose_name='Internal Proposal Title')
proposal_type = models.CharField(max_length=256, blank=True)
principal_investigator = models.ForeignKey(
AwardManager,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Principal Investigator')
project_title = models.CharField(max_length=255, blank=True)
lotus_department_code = models.CharField(max_length=128, blank=True)
department_name = models.ForeignKey(
AwardOrganization,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Department Code & Name')
division_name = models.CharField(max_length=150, blank=True)
agency_name = models.ForeignKey(
FundingSource,
null=True,
blank=True,
limit_choices_to={
'active': True})
is_subcontract = models.CharField(
max_length=10,
blank=True,
verbose_name='Is this a subcontract?')
who_is_prime = models.ForeignKey(
PrimeSponsor,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Prime Sponsor')
tracking_number = models.CharField(
max_length=15,
blank=True,
verbose_name='Grants.gov tracking number')
project_start_date = models.DateField(null=True, blank=True)
project_end_date = models.DateField(null=True, blank=True)
submission_date = models.DateField(null=True, blank=True)
sponsor_deadline = models.DateField(null=True, blank=True)
lotus_agency_name = models.CharField(max_length=250, blank=True)
project_title = models.CharField(max_length=256, blank=True)
agency_type = models.CharField(max_length=256, blank=True)
application_type_code = models.CharField(
max_length=25,
blank=True,
verbose_name='Kind of application')
federal_identifier = models.CharField(max_length=256, blank=True, verbose_name='Previous Grant #')
is_change_in_grantee_inst = models.CharField(
max_length=10,
blank=True,
verbose_name='Change in grantee institution?')
project_type = models.CharField(max_length=256, blank=True)
responsible_entity = models.CharField(max_length=256, blank=True)
departmental_id_primary = models.CharField(
max_length=256,
blank=True,
verbose_name='Departmental ID primary')
departmental_id_secondary = models.CharField(
max_length=256,
blank=True,
verbose_name='Departmental ID secondary')
departmental_name_primary = models.CharField(max_length=256, blank=True)
departmental_name_secondary = models.CharField(max_length=256, blank=True)
are_vertebrate_animals_used = models.CharField(
max_length=10,
blank=True,
verbose_name='Are vertebrate animals used?')
is_iacuc_review_pending = models.CharField(
max_length=10,
blank=True,
verbose_name='Is IACUC review pending?')
iacuc_protocol_number = models.CharField(
max_length=256,
blank=True,
verbose_name='IACUC protocol number')
iacuc_approval_date = models.DateField(
null=True,
blank=True,
verbose_name='IACUC approval date')
are_human_subjects_used = models.CharField(
max_length=10,
blank=True,
verbose_name='Are human subjects used?')
is_irb_review_pending = models.CharField(
max_length=10,
blank=True,
verbose_name='Is IRB review pending?')
irb_protocol_number = models.CharField(
max_length=256,
blank=True,
verbose_name='IRB protocol number')
irb_review_date = models.DateField(
null=True,
blank=True,
verbose_name='IRB review date')
is_haz_mat = models.CharField(max_length=10, blank=True, verbose_name='Uses hazardous materials')
budget_first_per_start_date = models.DateField(
null=True,
blank=True,
verbose_name='Budget first period start date')
budget_first_per_end_date = models.DateField(
null=True,
blank=True,
verbose_name='Budget first period end date')
cost_shr_mand_is_committed = models.CharField(max_length=10, blank=True)
cost_shr_mand_amount = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
cost_shr_mand_source = models.CharField(max_length=256, blank=True)
cost_shr_vol_is_committed = models.CharField(max_length=10, blank=True)
cost_shr_vol_amount = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
cost_shr_vol_source = models.CharField(max_length=256, blank=True)
will_involve_foreign_nationals = models.CharField(
max_length=10,
blank=True)
will_involve_shipment = models.CharField(max_length=10, blank=True)
will_involve_foreign_contract = models.CharField(max_length=10, blank=True)
total_costs = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y1 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y2 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y3 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y4 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y5 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y6 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y7 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y8 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y9 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_costs_y10 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y1 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y2 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y3 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y4 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y5 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y6 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y7 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y8 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y9 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_direct_costs_y10 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y1 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y2 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y3 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y4 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y5 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y6 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y7 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y8 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y9 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
total_indirect_costs_y10 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
def __unicode__(self):
return u'Proposal #%s' % (self.get_unique_identifier())
class Meta:
index_together = [
["award", "is_first_proposal"],
]
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse(
'edit_proposal',
kwargs={
'award_pk': self.award.pk,
'proposal_pk': self.id})
def get_unique_identifier(self):
"""Gets a value that uniquely identifies this Proposal"""
return self.proposal_number
def save(self, *args, **kwargs):
"""Overrides the parent save method.
If this is a new Proposal, copy certain fields over to the AwardAcceptance object
"""
if not self.dummy and not self.pk:
try:
award_intake = self.award.get_current_award_acceptance()
award_intake.copy_from_proposal(self)
except:
pass
super(Proposal, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
"""Overrides the parent delete method.
If this Proposal came from Lotus, just remove the reference to the Award instead of
deleting from the database.
"""
if self.lotus_id:
self.award = None
self.save()
else:
super(Proposal, self).delete(*args, **kwargs)
def set_first_proposal(award, proposals):
"""Set the is_first_proposal flag on the appropriate proposal"""
proposals.update(is_first_proposal=False)
first_proposal = proposals.order_by('id').first()
first_proposal.is_first_proposal = True
first_proposal.save()
@receiver(post_delete, sender=Proposal)
@receiver(post_save, sender=Proposal)
def check_first_proposal(sender, instance, **kwargs):
"""Use Django signals to keep the is_first_proposal flag up to date"""
try:
award = instance.award
except Award.DoesNotExist:
award = None
if not award:
return
proposals = Proposal.objects.filter(award=award)
try:
dummy_proposal = Proposal.objects.get(award=award, dummy=True)
except Proposal.DoesNotExist:
dummy_proposal = None
if len(proposals) == 0:
Proposal.objects.create(award=award, dummy=True)
return
elif len(proposals) > 1 and dummy_proposal:
dummy_proposal.delete()
first_proposals = Proposal.objects.filter(
award=award,
is_first_proposal=True)
if len(first_proposals) != 1:
set_first_proposal(award, proposals)
class KeyPersonnel(FieldIteratorMixin, models.Model):
"""Model for the KeyPersonnel data"""
HIDDEN_FIELDS = ['proposal']
HIDDEN_TABLE_FIELDS = []
proposal = models.ForeignKey(Proposal)
employee_id = models.CharField(
max_length=40,
blank=True,
verbose_name='Emp ID')
last_name = models.CharField(max_length=64, blank=True)
first_name = models.CharField(max_length=64, blank=True)
middle_name = models.CharField(max_length=32, blank=True)
project_role = models.CharField(max_length=128, blank=True)
calendar_months = models.DecimalField(
decimal_places=3,
max_digits=5,
null=True,
blank=True,
verbose_name='Calendar mos.')
academic_months = models.DecimalField(
decimal_places=3,
max_digits=5,
null=True,
blank=True,
verbose_name='Academic mos.')
summer_months = models.DecimalField(
decimal_places=3,
max_digits=5,
null=True,
blank=True,
verbose_name='Summer mos.')
effort = models.CharField(max_length=10, blank=True)
def __unicode__(self):
return u'%s, %s %s on %s' % (
self.last_name, self.first_name, self.middle_name, self.proposal)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse(
'edit_key_personnel',
kwargs={
'award_pk': self.proposal.award.pk,
'proposal_pk': self.proposal.pk,
'key_personnel_pk': self.id})
def get_delete_url(self):
"""Gets the URL used to delete this object"""
return reverse(
'delete_key_personnel',
kwargs={
'award_pk': self.proposal.award.pk,
'proposal_pk': self.proposal.pk,
'key_personnel_pk': self.id})
class PerformanceSite(FieldIteratorMixin, models.Model):
"""Model for the PerformanceSite data"""
HIDDEN_FIELDS = ['proposal']
HIDDEN_TABLE_FIELDS = []
proposal = models.ForeignKey(Proposal)
ps_organization = models.CharField(
max_length=255,
blank=True,
verbose_name='Organization')
ps_duns = models.BigIntegerField(
null=True,
blank=True,
verbose_name='DUNS')
ps_street1 = models.CharField(
max_length=255,
blank=True,
verbose_name='Street 1')
ps_street2 = models.CharField(
max_length=255,
blank=True,
verbose_name='Street 2')
ps_city = models.CharField(max_length=255, blank=True, verbose_name='City')
ps_state = models.CharField(
max_length=100,
blank=True,
verbose_name='State')
ps_zipcode = models.CharField(
max_length=128,
blank=True,
verbose_name='Zip')
ps_country = models.CharField(
max_length=128,
blank=True,
verbose_name='Country')
def __unicode__(self):
return u'%s %s, %s' % (self.ps_street1, self.ps_city, self.ps_state)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse(
'edit_performance_site',
kwargs={
'award_pk': self.proposal.award.pk,
'proposal_pk': self.proposal.pk,
'performance_site_pk': self.id})
def get_delete_url(self):
"""Gets the URL used to delete this object"""
return reverse(
'delete_performance_site',
kwargs={
'award_pk': self.proposal.award.pk,
'proposal_pk': self.proposal.pk,
'performance_site_pk': self.id})
class AwardModificationMixin(object):
"""Mixin used for Award sections that can have modifications"""
def clean(self, *args, **kwargs):
"""Overrides the base clean method. Verifies there are no other current modifications."""
section = self.__class__
active_modifications = section.objects.filter(
award=self.award,
current_modification=True).exclude(
pk=self.id)
if self.current_modification and len(active_modifications) > 0:
raise ValidationError(
'Another %s is already the current modification for %s. \
Set "current modification" on all other %s objects and try again.' %
(section.__name__, self.award, section.__name__))
super(AwardModificationMixin, self).clean(*args, **kwargs)
class AwardAcceptance(AwardModificationMixin, AwardSection):
"""Model for the AwardAcceptance data"""
EAS_STATUS_CHOICES = (
('A', 'Active'),
('OH', 'On hold'),
('AR', 'At risk'),
('C', 'Closed')
)
PRIORITY_STATUS_CHOICES = (
('on', 1),
('tw', 2),
('th', 3),
('fo', 4),
('fi', 5),
('ni', 9)
)
PRIORITY_STATUS_DICT = {'on': 1,
'tw': 2,
'th': 3,
'fo': 4,
'fi': 5,
'ni': 9
}
HIDDEN_FIELDS = AwardSection.HIDDEN_FIELDS + ['current_modification', 'award_text']
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'fcoi_cleared_date',
'project_title',
'full_f_a_recovery',
'explanation',
'mfa_investigators',
'award_total_costs_y1',
'award_total_costs_y2',
'award_total_costs_y3',
'award_total_costs_y4',
'award_total_costs_y5',
'award_total_costs_y6',
'award_total_costs_y7',
'award_total_costs_y8',
'award_total_costs_y9',
'award_total_costs_y10',
'award_direct_costs_y1',
'award_direct_costs_y2',
'award_direct_costs_y3',
'award_direct_costs_y4',
'award_direct_costs_y5',
'award_direct_costs_y6',
'award_direct_costs_y7',
'award_direct_costs_y8',
'award_direct_costs_y9',
'award_direct_costs_y10',
'award_indirect_costs_y1',
'award_indirect_costs_y2',
'award_indirect_costs_y3',
'award_indirect_costs_y4',
'award_indirect_costs_y5',
'award_indirect_costs_y6',
'award_indirect_costs_y7',
'award_indirect_costs_y8',
'award_indirect_costs_y9',
'award_indirect_costs_y10',
'contracting_official',
'gmo_co_email',
'gmo_co_phone_number',
'creation_date']
DISPLAY_TABLES = [
{
'title': 'Costs', 'columns': (
'Total Direct Costs', 'Total Indirect Costs', 'Total Costs'), 'rows': [
{
'label': 'Total', 'fields': (
'award_direct_costs', 'award_indirect_costs', 'award_total_costs')}, {
'label': 'Y1', 'fields': (
'award_direct_costs_y1', 'award_indirect_costs_y1', 'award_total_costs_y1')}, {
'label': 'Y2', 'fields': (
'award_direct_costs_y2', 'award_indirect_costs_y2', 'award_total_costs_y2')}, {
'label': 'Y3', 'fields': (
'award_direct_costs_y3', 'award_indirect_costs_y3', 'award_total_costs_y3')}, {
'label': 'Y4', 'fields': (
'award_direct_costs_y4', 'award_indirect_costs_y4', 'award_total_costs_y4')}, {
'label': 'Y5', 'fields': (
'award_direct_costs_y5', 'award_indirect_costs_y5', 'award_total_costs_y5')}, {
'label': 'Y6', 'fields': (
'award_direct_costs_y6', 'award_indirect_costs_y6', 'award_total_costs_y6')}, {
'label': 'Y7', 'fields': (
'award_direct_costs_y7', 'award_indirect_costs_y7', 'award_total_costs_y7')}, {
'label': 'Y8', 'fields': (
'award_direct_costs_y8', 'award_indirect_costs_y8', 'award_total_costs_y8')}, {
'label': 'Y9', 'fields': (
'award_direct_costs_y9', 'award_indirect_costs_y9', 'award_total_costs_y9')}, {
'label': 'Y10', 'fields': (
'award_direct_costs_y10', 'award_indirect_costs_y10', 'award_total_costs_y10')}, ]
}
]
EAS_REPORT_FIELDS = [
'eas_status',
'award_issue_date',
'award_acceptance_date',
'sponsor_award_number',
'agency_award_number',
]
# These fields must have values before this section can be completed
minimum_fields = (
'award_issue_date',
)
award = models.ForeignKey(Award)
creation_date = models.DateTimeField(auto_now_add=True, blank=True, null=True, verbose_name='Date Created')
current_modification = models.BooleanField(default=True)
eas_status = models.CharField(
choices=EAS_STATUS_CHOICES,
max_length=2,
blank=True,
verbose_name='EAS status')
new_funding = models.NullBooleanField(verbose_name='New Funding?')
fcoi_cleared_date = models.DateField(
null=True,
blank=True,
verbose_name='FCOI cleared date')
phs_funded = models.NullBooleanField(verbose_name='PHS funded?')
award_setup_priority = models.CharField(
choices=PRIORITY_STATUS_CHOICES,
max_length=2,
blank=True,
verbose_name='Award Setup Priority'
)
priority_by_director = models.NullBooleanField(blank=True, null=True, verbose_name='Prioritized by Director?')
project_title = models.CharField(
max_length=250,
blank=True,
verbose_name='Project Title (if different from Proposal)')
foreign_travel = models.NullBooleanField(verbose_name='Foreign Travel?')
f_a_rate = models.CharField(
max_length=250,
blank=True,
verbose_name='F&A rate')
full_f_a_recovery = models.NullBooleanField(
verbose_name='Full F&A Recovery?')
explanation = models.CharField(
max_length=250,
blank=True,
verbose_name='If no full F&A, provide explanation')
mfa_investigators = models.NullBooleanField(
verbose_name='MFA investigators?')
admin_establishment = models.NullBooleanField(
verbose_name='Administrative establishment?')
award_issue_date = models.DateField(null=True, blank=True)
award_acceptance_date = models.DateField(null=True, blank=True)
agency_award_number = models.CharField(max_length=50, blank=True)
sponsor_award_number = models.CharField(
max_length=50,
blank=True,
verbose_name='Prime Award # (if GW is subawardee)')
award_total_costs = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True,
verbose_name='Total award costs')
award_direct_costs = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True,
verbose_name='Total award direct costs')
award_indirect_costs = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True,
verbose_name='Total award indirect costs')
award_total_costs_y1 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y1 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y1 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_total_costs_y2 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y2 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y2 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_total_costs_y3 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y3 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y3 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_total_costs_y4 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y4 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y4 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_total_costs_y5 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y5 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y5 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_total_costs_y6 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y6 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y6 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_total_costs_y7 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y7 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y7 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_total_costs_y8 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y8 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y8 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_total_costs_y9 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y9 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y9 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_total_costs_y10 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_direct_costs_y10 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
award_indirect_costs_y10 = models.DecimalField(
decimal_places=2,
max_digits=15,
null=True,
blank=True)
contracting_official = models.CharField(
max_length=500,
blank=True,
verbose_name='GMO or CO')
gmo_co_phone_number = models.CharField(
max_length=15,
blank=True,
verbose_name='GMO/CO phone number')
gmo_co_email = models.CharField(
max_length=50,
blank=True,
verbose_name='GMO/CO email')
pta_modification = models.NullBooleanField(verbose_name='Do you want to send this to the post-award team for modification?')
acceptance_completion_date = models.DateTimeField(blank=True, null=True, verbose_name='Completion Date')
award_text = models.CharField(max_length=50, blank=True, null=True)
def __unicode__(self):
return u'Award Intake %s' % (self.id)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object."""
return reverse(
'edit_award_acceptance',
kwargs={
'award_pk': self.award.pk})
def copy_from_proposal(self, proposal):
"""Copies common fields to this object from the given Proposal."""
self.project_title = proposal.project_title
self.award_total_costs = proposal.total_costs
self.award_total_costs_y1 = proposal.total_costs_y1
self.award_total_costs_y2 = proposal.total_costs_y2
self.award_total_costs_y3 = proposal.total_costs_y3
self.award_total_costs_y4 = proposal.total_costs_y4
self.award_total_costs_y5 = proposal.total_costs_y5
self.award_total_costs_y6 = proposal.total_costs_y6
self.award_total_costs_y7 = proposal.total_costs_y7
self.award_total_costs_y8 = proposal.total_costs_y8
self.award_total_costs_y9 = proposal.total_costs_y9
self.award_total_costs_y10 = proposal.total_costs_y10
self.award_direct_costs = proposal.total_direct_costs
self.award_direct_costs_y1 = proposal.total_direct_costs_y1
self.award_direct_costs_y2 = proposal.total_direct_costs_y2
self.award_direct_costs_y3 = proposal.total_direct_costs_y3
self.award_direct_costs_y4 = proposal.total_direct_costs_y4
self.award_direct_costs_y5 = proposal.total_direct_costs_y5
self.award_direct_costs_y6 = proposal.total_direct_costs_y6
self.award_direct_costs_y7 = proposal.total_direct_costs_y7
self.award_direct_costs_y8 = proposal.total_direct_costs_y8
self.award_direct_costs_y9 = proposal.total_direct_costs_y9
self.award_direct_costs_y10 = proposal.total_direct_costs_y10
self.award_indirect_costs = proposal.total_indirect_costs
self.award_indirect_costs_y1 = proposal.total_indirect_costs_y1
self.award_indirect_costs_y2 = proposal.total_indirect_costs_y2
self.award_indirect_costs_y3 = proposal.total_indirect_costs_y3
self.award_indirect_costs_y4 = proposal.total_indirect_costs_y4
self.award_indirect_costs_y5 = proposal.total_indirect_costs_y5
self.award_indirect_costs_y6 = proposal.total_indirect_costs_y6
self.award_indirect_costs_y7 = proposal.total_indirect_costs_y7
self.award_indirect_costs_y8 = proposal.total_indirect_costs_y8
self.award_indirect_costs_y9 = proposal.total_indirect_costs_y9
self.award_indirect_costs_y10 = proposal.total_indirect_costs_y10
self.save()
class Meta:
verbose_name = 'Award intake'
verbose_name_plural = 'Award intakes'
def save(self, *args, **kwargs):
"""Overrides the base save method.
If it was an existing AwardAcceptance, check to see if FCOI and/or PHS funded
emails need to be sent.
"""
try:
old_object = AwardAcceptance.objects.get(pk=self.pk)
except AwardAcceptance.DoesNotExist:
super(AwardAcceptance, self).save(*args, **kwargs)
return
super(AwardAcceptance, self).save(*args, **kwargs)
# Send email to Award Setup user when FCOI cleared date is populated
if not old_object.fcoi_cleared_date and self.fcoi_cleared_date:
self.award.send_fcoi_cleared_notification(self.fcoi_cleared_date)
if not old_object.phs_funded and self.phs_funded:
self.award.send_phs_funded_notification()
class NegotiationStatus(models.Model):
NEGOTIATION_CHOICES = (
('IQ', 'In queue'),
('IP', 'In progress'),
('WFS', 'Waiting for sponsor'),
('WFP', 'Waiting for PI'),
('WFO', 'Waiting for other department'),
('CD', 'Completed'),
('UD', 'Unrealized')
)
NEGOTIATION_STATUS_CHOICES = (
'In queue',
'In progress',
'Waiting for sponsor',
'Waiting for PI',
'Waiting for other department',
'Completed',
'Unrealized'
)
NEGOTIATION_CHOICES_DICT = {'IQ': 'In queue',
'IP': 'In progress',
'WFS': 'Waiting for sponsor',
'WFP': 'Waiting for PI',
'WFO': 'Waiting for other department',
'CD': 'Completed',
'UD': 'Unrealized'
}
negotiation_status = models.CharField(
choices=NEGOTIATION_CHOICES,
max_length=50,
blank=True)
negotiation_status_changed_user = models.CharField(
max_length=100,
blank=True)
negotiation_notes = models.TextField(
blank=True)
award = models.ForeignKey(Award)
negotiation_status_date = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
return u'%s Status %s' % (self.award, self.negotiation_status)
class AwardNegotiation(AwardModificationMixin, AssignableAwardSection):
"""Model for the AwardNegotiation data"""
AWARD_TYPE_CHOICES = (
('CR', 'Contract: Cost-reimbursable'),
('FP', 'Contract: Fixed price'),
('TM', 'Contract: Time & materials'),
('GC', 'Grant: Cost-reimbursable'),
('GF', 'Grant: Fixed amount award'),
('CA', 'Cooperative agreement'),
('CD', 'CRADA'),
('ND', 'NDA'),
('TA', 'Teaming agreement'),
('DU', 'DUA'),
('RF', 'RFP'),
('MT', 'MTA'),
('MA', 'Master agreement'),
('OT', 'Other')
)
NEGOTIATION_CHOICES = (
('IQ', 'In queue'),
('IP', 'In progress'),
('WFS', 'Waiting for sponsor'),
('WFP', 'Waiting for PI'),
('WFO', 'Waiting for other department'),
('CD', 'Completed'),
('UD', 'Unrealized')
)
HIDDEN_FIELDS = AwardSection.HIDDEN_FIELDS + ['current_modification', 'date_received', 'award_text']
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'subcontracting_plan',
'under_master_agreement',
'retention_period',
'gw_doesnt_own_ip',
'gw_background_ip',
'foreign_restrictions',
'certificates_insurance',
'insurance_renewal',
'government_property',
'everify',
'date_assigned']
EAS_REPORT_FIELDS = [
'award_type',
]
minimum_fields = (
'award_type',
)
award = models.ForeignKey(Award)
current_modification = models.BooleanField(default=True)
subcontracting_plan = models.NullBooleanField(
verbose_name='Is Small Business Subcontracting Plan required?')
under_master_agreement = models.NullBooleanField(
verbose_name='Under Master Agreement?')
award_type = models.CharField(
choices=AWARD_TYPE_CHOICES,
max_length=3,
blank=True,
verbose_name='Award Type')
other_award_type = models.CharField(max_length=255, blank=True)
related_other_agreements = models.NullBooleanField(
verbose_name='Related Other Agreements?')
related_other_comments = models.TextField(
blank=True,
verbose_name='Related other agreements comments')
negotiator = models.CharField(
max_length=500,
blank=True,
verbose_name='Negotiator Assist')
date_received = models.DateField(
null=True,
blank=True,
verbose_name='Date Received')
retention_period = models.CharField(
max_length=500,
blank=True,
verbose_name='Sponsor Retention Period')
gw_doesnt_own_ip = models.NullBooleanField(
verbose_name="GW Doesn't Own IP?")
gw_background_ip = models.NullBooleanField(
verbose_name='GW Background IP?')
negotiation_status = models.CharField(
choices=NEGOTIATION_CHOICES,
max_length=3,
blank=True,
verbose_name='Negotiation Status',
default='IQ')
negotiation_notes = models.TextField(
blank=True,
verbose_name='Negotiation Notes')
foreign_restrictions = models.NullBooleanField(
verbose_name='Foreign Participation Restrictions?')
certificates_insurance = models.NullBooleanField(
verbose_name='Certificate of Insurance Needed?')
insurance_renewal = models.DateField(
null=True,
blank=True,
verbose_name='Certificate of Insurance Renewal Date')
government_property = models.NullBooleanField(
verbose_name='Government Furnished Property?')
data_security_restrictions = models.NullBooleanField(
verbose_name='Data/Security Restrictions?')
everify = models.NullBooleanField(verbose_name='E-verify?')
publication_restriction = models.NullBooleanField(
verbose_name='Publication Restriction?')
negotiation_completion_date = models.DateTimeField(blank=True, null=True, verbose_name='Completion Date')
award_text = models.CharField(max_length=50, blank=True, null=True)
def __unicode__(self):
return u'Award Negotiation %s' % (self.id)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse(
'edit_award_negotiation',
kwargs={
'award_pk': self.award.pk})
class AwardSetup(AssignableAwardSection):
"""Model for the AwardSetup data"""
WAIT_FOR = {'RB': 'Revised Budget', 'PA': 'PI Access', 'CA': 'Cost Share Approval', 'FC': 'FCOI',
'PS': 'Proposal Submission', 'SC': 'Sponsor Clarity', 'NO': 'New Org needed',
'IC': 'Internal Clarification', 'DC': 'Documents not in GW Docs'
}
WAIT_FOR_CHOICES = (
('RB', 'Revised Budget'),
('PA', 'PI Access'),
('CA', 'Cost Share Approval'),
('FC', 'FCOI'),
('PS', 'Proposal Submission'),
('SC', 'Sponsor Clarity'),
('NO', 'New Org needed'),
('IC', 'Internal Clarification'),
('DC', 'Documents not in GW Docs')
)
SP_TYPE_CHOICES = (
('SP1', 'SP1 - Research and Development'),
('SP2', 'SP2 - Training'),
('SP3', 'SP3 - Other'),
('SP4', 'SP4 - Clearing and Suspense'),
('SP5', 'SP5 - Program Income'),
)
REPORTING_CHOICES = (
('MN', 'Monthly'),
('QR', 'Quarterly'),
('SA', 'Semi-annually'),
('AN', 'Annually'),
('OT', 'Other (specify)')
)
EAS_AWARD_CHOICES = (
('C', 'Contract'),
('G', 'Grant'),
('I', 'Internal Funding'),
('PP', 'Per Patient'),
('PA', 'Pharmaceutical')
)
PROPERTY_CHOICES = (
('TG', 'Title to GW'),
('TS', 'Title to Sponsor'),
('TD', 'Title to be determined at purchase'),
('SE', 'Special EAS Value')
)
ONR_CHOICES = (
('Y', 'Yes, Administered'),
('N', 'No, Administered')
)
COST_SHARING_CHOICES = (
('M', 'Mandatory'),
('V', 'Voluntary'),
('B', 'Both')
)
PERFORMANCE_SITE_CHOICES = (
('ON', 'On-campus'),
('OF', 'Off-campus'),
('OT', 'Other')
)
TASK_LOCATION_CHOICES = (
('AL', 'AL - ALEXANDRIA'),
('BE', 'BE - BETHESDA'),
('CC', 'CC - CRYSTAL CITY'),
('CL', 'CL - CLARENDON'),
('CM', 'CM - ST MARY\'S COUNTY, CALIFORNIA, MD'),
('CW', 'CW - K STREET CENTER OFF-CAMPUS DC'),
('DE', 'DE - DISTANCE EDUCATION'),
('FB', 'FB - FOGGY BOTTOM'),
('FC', 'FC - CITY OF FALLS CHURCH'),
('FX', 'FX - FAIRFAX COUNTY'),
('GS', 'GS - GODDARD SPACE FLIGHT CENTER'),
('HR', 'HR - HAMPTON ROADS'),
('IN', 'IN - INTERNATIONAL'),
('LA', 'LA - LANGLEY AIR FORCE BASE'),
('LO', 'LO - LOUDOUN COUNTY OTHER'),
('MV', 'MV - MOUNT VERNON CAMPUS'),
('OA', 'OA - OTHER ARLINGTON COUNTY'),
('OD', 'OD - OTHER DISTRICT OF COLUMBIA'),
('OG', 'OG - OTHER MONTGOMERY COUNTY'),
('OM', 'OM - OTHER MARYLAND'),
('OV', 'OV - OTHER VIRGINIA'),
('PA', 'PA - PACE - Classes at Sea'),
('RI', 'RI - RICHMOND, CITY OF'),
('RO', 'RO - ROSSLYN ARLINGTON COUNTY'),
('RV', 'RV - ROCKVILLE'),
('SM', 'SM - SUBURBAN MARYLAND'),
('T', 'T - TOTAL LOCATION'),
('US', 'US - OTHER US'),
('VC', 'VC - VIRGINIA CAMPUS'),
('VR', 'VR - VIRGINIA RESEARCH AND TECHNOLOGY CENTER'),
('VS', 'VS - VIRGINIA SQUARE'),
)
EAS_SETUP_CHOICES = (
('Y', 'Yes'),
('N', 'No'),
('M', 'Manual'),
)
HIDDEN_FIELDS = AwardSection.HIDDEN_FIELDS + [
'award_template',
'short_name',
'task_location',
'start_date',
'end_date',
'final_reports_due_date',
'eas_award_type',
'sp_type',
'indirect_cost_schedule',
'allowed_cost_schedule',
'cfda_number',
'federal_negotiated_rate',
'bill_to_address',
'billing_events',
'contact_name',
'phone',
'financial_reporting_req',
'financial_reporting_oth',
'property_equip_code',
'onr_administered_code',
'cost_sharing_code',
'document_number',
'performance_site',
'award_setup_complete',
'qa_screening_complete',
'ready_for_eas_setup',
]
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'nine_ninety_form_needed',
'patent_reporting_req',
'invention_reporting_req',
'property_reporting_req',
'equipment_reporting_req',
'budget_restrictions',
'record_destroy_date',
'date_assigned']
EAS_REPORT_FIELDS = [
# PTA info first
'award_template',
'short_name',
'task_location',
'start_date',
'end_date',
'final_reports_due_date',
'eas_award_type',
'sp_type',
'indirect_cost_schedule',
'allowed_cost_schedule',
'cfda_number',
'federal_negotiated_rate',
'bill_to_address',
'contact_name',
'phone',
'financial_reporting_req',
'financial_reporting_oth',
'property_equip_code',
'onr_administered_code',
'cost_sharing_code',
'billing_events',
'document_number',
'nine_ninety_form_needed',
]
minimum_fields = (
)
MULTIPLE_SELECT_FIELDS = (
'financial_reporting_req',
'technical_reporting_req',
)
award = models.OneToOneField(Award)
short_name = models.CharField(
max_length=30,
blank=True,
verbose_name='Award short name')
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
final_reports_due_date = models.DateField(
null=True,
blank=True,
verbose_name='Final Reports/Final Invoice Due Date (Close Date)')
eas_award_type = models.CharField(
choices=EAS_AWARD_CHOICES,
max_length=2,
blank=True,
verbose_name='EAS award type')
sp_type = models.CharField(
choices=SP_TYPE_CHOICES,
max_length=3,
blank=True,
verbose_name='SP Type')
indirect_cost_schedule = models.ForeignKey(
IndirectCost,
null=True,
blank=True,
limit_choices_to={
'active': True})
allowed_cost_schedule = models.ForeignKey(
AllowedCostSchedule,
null=True,
blank=True,
limit_choices_to={
'active': True})
cfda_number = models.ForeignKey(
CFDANumber,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='CFDA number')
federal_negotiated_rate = models.ForeignKey(
FedNegRate,
null=True,
blank=True,
limit_choices_to={
'active': True})
property_equip_code = models.CharField(
choices=PROPERTY_CHOICES,
max_length=2,
blank=True,
verbose_name='T&C: Property and Equipment Code')
onr_administered_code = models.CharField(
choices=ONR_CHOICES,
max_length=2,
blank=True,
verbose_name='T&C: ONR Administered Code')
cost_sharing_code = models.CharField(
choices=COST_SHARING_CHOICES,
max_length=2,
blank=True,
verbose_name='T&C: Cost Sharing Code')
bill_to_address = models.TextField(blank=True)
contact_name = models.CharField(
max_length=150,
blank=True,
verbose_name='Contact Name (Last, First)')
phone = models.CharField(max_length=50, blank=True)
billing_events = models.TextField(blank=True)
document_number = models.CharField(max_length=100, blank=True)
date_wait_for_updated = models.DateTimeField(blank=True, null=True, verbose_name='Date Wait for Updated')
wait_for_reson = models.CharField(
choices=WAIT_FOR_CHOICES,
max_length=2,
blank=True,
null=True,
verbose_name='Wait for'
)
nine_ninety_form_needed = models.NullBooleanField(
verbose_name='990 Form Needed?')
task_location = models.CharField(
choices=TASK_LOCATION_CHOICES,
max_length=2,
blank=True)
performance_site = models.CharField(
choices=PERFORMANCE_SITE_CHOICES,
max_length=2,
blank=True)
expanded_authority = models.NullBooleanField(
verbose_name='Expanded Authority?')
financial_reporting_req = MultiSelectField(
choices=REPORTING_CHOICES,
blank=True,
verbose_name='Financial Reporting Requirements')
financial_reporting_oth = models.CharField(
max_length=250,
blank=True,
verbose_name='Other financial reporting requirements')
technical_reporting_req = MultiSelectField(
choices=REPORTING_CHOICES,
blank=True,
verbose_name='Technical Reporting Requirements')
technical_reporting_oth = models.CharField(
max_length=250,
blank=True,
verbose_name='Other technical reporting requirements')
patent_reporting_req = models.DateField(
null=True,
blank=True,
verbose_name='Patent Report Requirement')
invention_reporting_req = models.DateField(
null=True,
blank=True,
verbose_name='Invention Report Requirement')
property_reporting_req = models.DateField(
null=True,
blank=True,
verbose_name='Property Report Requirement')
equipment_reporting_req = models.DateField(
null=True,
blank=True,
verbose_name='Equipment Report Requirement')
budget_restrictions = models.NullBooleanField(
verbose_name='Budget Restrictions?')
award_template = models.ForeignKey(
AwardTemplate,
null=True,
blank=True,
limit_choices_to={
'active': True})
award_setup_complete = models.DateField(
null=True,
blank=True,
verbose_name='Award Setup Complete')
qa_screening_complete = models.DateField(
null=True,
blank=True,
verbose_name='QA Screening Complete')
pre_award_spending_auth = models.NullBooleanField(
verbose_name='Pre-award spending authorized?')
record_destroy_date = models.DateField(
null=True,
blank=True,
verbose_name='Record Retention Destroy Date')
ready_for_eas_setup = models.CharField(
choices=EAS_SETUP_CHOICES,
max_length=3,
blank=True,
verbose_name='Ready for EAS Setup?')
wait_for = models.TextField(blank=True)
setup_completion_date = models.DateTimeField(blank=True, null=True, verbose_name='Completion Date')
def __unicode__(self):
return u'Award Setup %s' % (self.id)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse('edit_award_setup', kwargs={'award_pk': self.award.pk})
def copy_from_proposal(self, proposal):
"""Copy common fields from the given proposal to this AwardSetup"""
if proposal:
self.start_date = proposal.project_start_date
self.end_date = proposal.project_end_date
self.save()
def get_waiting_reason(self):
return self.WAIT_FOR.get(self.wait_for_reson) if self.wait_for_reson else ''
class AwardModification(AssignableAwardSection):
"""Model for the AwardModification data"""
WAIT_FOR_CHOICES = (
('RB', 'Revised Budget'),
('PA', 'PI Access'),
('CA', 'Cost Share Approval'),
('FC', 'FCOI'),
('PS', 'Proposal Submission'),
('SC', 'Sponsor Clarity'),
('NO', 'New Org needed'),
('IC', 'Internal Clarification'),
('DC', 'Documents not in GW Docs'))
SP_TYPE_CHOICES = (
('SP1', 'SP1 - Research and Development'),
('SP2', 'SP2 - Training'),
('SP3', 'SP3 - Other'),
('SP4', 'SP4 - Clearing and Suspense'),
('SP5', 'SP5 - Program Income'),
)
REPORTING_CHOICES = (
('MN', 'Monthly'),
('QR', 'Quarterly'),
('SA', 'Semi-annually'),
('AN', 'Annually'),
('OT', 'Other (specify)')
)
EAS_AWARD_CHOICES = (
('C', 'Contract'),
('G', 'Grant'),
('I', 'Internal Funding'),
('PP', 'Per Patient'),
('PA', 'Pharmaceutical')
)
PROPERTY_CHOICES = (
('TG', 'Title to GW'),
('TS', 'Title to Sponsor'),
('TD', 'Title to be determined at purchase'),
('SE', 'Special EAS Value')
)
ONR_CHOICES = (
('Y', 'Yes, Administered'),
('N', 'No, Administered')
)
COST_SHARING_CHOICES = (
('M', 'Mandatory'),
('V', 'Voluntary'),
('B', 'Both')
)
PERFORMANCE_SITE_CHOICES = (
('ON', 'On-campus'),
('OF', 'Off-campus'),
('OT', 'Other')
)
TASK_LOCATION_CHOICES = (
('AL', 'AL - ALEXANDRIA'),
('BE', 'BE - BETHESDA'),
('CC', 'CC - CRYSTAL CITY'),
('CL', 'CL - CLARENDON'),
('CM', 'CM - ST MARY\'S COUNTY, CALIFORNIA, MD'),
('CW', 'CW - K STREET CENTER OFF-CAMPUS DC'),
('DE', 'DE - DISTANCE EDUCATION'),
('FB', 'FB - FOGGY BOTTOM'),
('FC', 'FC - CITY OF FALLS CHURCH'),
('FX', 'FX - FAIRFAX COUNTY'),
('GS', 'GS - GODDARD SPACE FLIGHT CENTER'),
('HR', 'HR - HAMPTON ROADS'),
('IN', 'IN - INTERNATIONAL'),
('LA', 'LA - LANGLEY AIR FORCE BASE'),
('LO', 'LO - LOUDOUN COUNTY OTHER'),
('MV', 'MV - MOUNT VERNON CAMPUS'),
('OA', 'OA - OTHER ARLINGTON COUNTY'),
('OD', 'OD - OTHER DISTRICT OF COLUMBIA'),
('OG', 'OG - OTHER MONTGOMERY COUNTY'),
('OM', 'OM - OTHER MARYLAND'),
('OV', 'OV - OTHER VIRGINIA'),
('PA', 'PA - PACE - Classes at Sea'),
('RI', 'RI - RICHMOND, CITY OF'),
('RO', 'RO - ROSSLYN ARLINGTON COUNTY'),
('RV', 'RV - ROCKVILLE'),
('SM', 'SM - SUBURBAN MARYLAND'),
('T', 'T - TOTAL LOCATION'),
('US', 'US - OTHER US'),
('VC', 'VC - VIRGINIA CAMPUS'),
('VR', 'VR - VIRGINIA RESEARCH AND TECHNOLOGY CENTER'),
('VS', 'VS - VIRGINIA SQUARE'),
)
EAS_SETUP_CHOICES = (
('Y', 'Yes'),
('N', 'No'),
('M', 'Manual'),
)
HIDDEN_FIELDS = AwardSection.HIDDEN_FIELDS + [
'award_template',
'short_name',
'task_location',
'start_date',
'end_date',
'final_reports_due_date',
'eas_award_type',
'sp_type',
'indirect_cost_schedule',
'allowed_cost_schedule',
'cfda_number',
'federal_negotiated_rate',
'bill_to_address',
'billing_events',
'contact_name',
'phone',
'financial_reporting_req',
'financial_reporting_oth',
'property_equip_code',
'onr_administered_code',
'cost_sharing_code',
'document_number',
'performance_site',
'award_setup_complete',
'qa_screening_complete',
'ready_for_eas_setup',
]
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'nine_ninety_form_needed',
'patent_reporting_req',
'invention_reporting_req',
'property_reporting_req',
'equipment_reporting_req',
'budget_restrictions',
'record_destroy_date',
'date_assigned']
EAS_REPORT_FIELDS = [
# PTA info first
'award_template',
'short_name',
'task_location',
'start_date',
'end_date',
'final_reports_due_date',
'eas_award_type',
'sp_type',
'indirect_cost_schedule',
'allowed_cost_schedule',
'cfda_number',
'federal_negotiated_rate',
'bill_to_address',
'contact_name',
'phone',
'financial_reporting_req',
'financial_reporting_oth',
'property_equip_code',
'onr_administered_code',
'cost_sharing_code',
'billing_events',
'document_number',
'nine_ninety_form_needed',
]
minimum_fields = (
)
MULTIPLE_SELECT_FIELDS = (
'financial_reporting_req',
'technical_reporting_req',
)
award = models.ForeignKey(Award)
short_name = models.CharField(
max_length=30,
blank=True,
verbose_name='Award short name')
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
final_reports_due_date = models.DateField(
null=True,
blank=True,
verbose_name='Final Reports/Final Invoice Due Date (Close Date)')
eas_award_type = models.CharField(
choices=EAS_AWARD_CHOICES,
max_length=2,
blank=True,
verbose_name='EAS award type')
sp_type = models.CharField(
choices=SP_TYPE_CHOICES,
max_length=3,
blank=True,
verbose_name='SP Type')
indirect_cost_schedule = models.ForeignKey(
IndirectCost,
null=True,
blank=True,
limit_choices_to={
'active': True})
allowed_cost_schedule = models.ForeignKey(
AllowedCostSchedule,
null=True,
blank=True,
limit_choices_to={
'active': True})
cfda_number = models.ForeignKey(
CFDANumber,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='CFDA number')
federal_negotiated_rate = models.ForeignKey(
FedNegRate,
null=True,
blank=True,
limit_choices_to={
'active': True})
property_equip_code = models.CharField(
choices=PROPERTY_CHOICES,
max_length=2,
blank=True,
verbose_name='T&C: Property and Equipment Code')
onr_administered_code = models.CharField(
choices=ONR_CHOICES,
max_length=2,
blank=True,
verbose_name='T&C: ONR Administered Code')
cost_sharing_code = models.CharField(
choices=COST_SHARING_CHOICES,
max_length=2,
blank=True,
verbose_name='T&C: Cost Sharing Code')
bill_to_address = models.TextField(blank=True)
contact_name = models.CharField(
max_length=150,
blank=True,
verbose_name='Contact Name (Last, First)')
phone = models.CharField(max_length=50, blank=True)
billing_events = models.TextField(blank=True)
document_number = models.CharField(max_length=100, blank=True)
date_wait_for_updated = models.DateTimeField(blank=True, null=True, verbose_name='Date Wait for Updated')
wait_for_reson = models.CharField(
choices=WAIT_FOR_CHOICES,
max_length=2,
blank=True,
null=True,
verbose_name='Wait for'
)
nine_ninety_form_needed = models.NullBooleanField(
verbose_name='990 Form Needed?')
task_location = models.CharField(
choices=TASK_LOCATION_CHOICES,
max_length=2,
blank=True)
performance_site = models.CharField(
choices=PERFORMANCE_SITE_CHOICES,
max_length=2,
blank=True)
expanded_authority = models.NullBooleanField(
verbose_name='Expanded Authority?')
financial_reporting_req = MultiSelectField(
choices=REPORTING_CHOICES,
blank=True,
verbose_name='Financial Reporting Requirements')
financial_reporting_oth = models.CharField(
max_length=250,
blank=True,
verbose_name='Other financial reporting requirements')
technical_reporting_req = MultiSelectField(
choices=REPORTING_CHOICES,
blank=True,
verbose_name='Technical Reporting Requirements')
technical_reporting_oth = models.CharField(
max_length=250,
blank=True,
verbose_name='Other technical reporting requirements')
patent_reporting_req = models.DateField(
null=True,
blank=True,
verbose_name='Patent Report Requirement')
invention_reporting_req = models.DateField(
null=True,
blank=True,
verbose_name='Invention Report Requirement')
property_reporting_req = models.DateField(
null=True,
blank=True,
verbose_name='Property Report Requirement')
equipment_reporting_req = models.DateField(
null=True,
blank=True,
verbose_name='Equipment Report Requirement')
budget_restrictions = models.NullBooleanField(
verbose_name='Budget Restrictions?')
award_template = models.ForeignKey(
AwardTemplate,
null=True,
blank=True,
limit_choices_to={
'active': True})
award_setup_complete = models.DateField(
null=True,
blank=True,
verbose_name='Award Setup Complete')
qa_screening_complete = models.DateField(
null=True,
blank=True,
verbose_name='QA Screening Complete')
pre_award_spending_auth = models.NullBooleanField(
verbose_name='Pre-award spending authorized?')
record_destroy_date = models.DateField(
null=True,
blank=True,
verbose_name='Record Retention Destroy Date')
ready_for_eas_setup = models.CharField(
choices=EAS_SETUP_CHOICES,
max_length=3,
blank=True,
verbose_name='Ready for EAS Setup?')
modification_completion_date = models.DateTimeField(blank=True, null=True, verbose_name='Completion Date')
wait_for = models.TextField(blank=True)
def __unicode__(self):
return u'Award Modification %s' % (self.id)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse('edit_award_setup', kwargs={'award_pk': self.award.pk})
class PTANumber(FieldIteratorMixin, models.Model):
"""Model for the PTANumber data"""
EAS_AWARD_CHOICES = (
('C', 'Contract'),
('G', 'Grant'),
('I', 'Internal Funding'),
('PP', 'Per Patient'),
('PA', 'Pharmaceutical')
)
SP_TYPE_CHOICES = (
('SP1', 'SP1 - Research and Development'),
('SP2', 'SP2 - Training'),
('SP3', 'SP3 - Other'),
('SP4', 'SP4 - Clearing and Suspense'),
('SP5', 'SP5 - Program Income'),
('SP7', 'SP7 - Symposium/Conference/Seminar'),
)
EAS_SETUP_CHOICES = (
('Y', 'Yes'),
('N', 'No'),
('M', 'Manual'),
)
EAS_STATUS_CHOICES = (
('A', 'Active'),
('OH', 'On hold'),
('AR', 'At risk'),
('C', 'Closed')
)
HIDDEN_FIELDS = ['award']
HIDDEN_TABLE_FIELDS = []
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'parent_banner_number',
'banner_number',
'cs_banner_number',
'allowed_cost_schedule',
'award_template',
'preaward_date',
'federal_negotiated_rate',
'indirect_cost_schedule',
'sponsor_banner_number',
'ready_for_eas_setup']
award = models.ForeignKey(Award)
project_number = models.CharField(
max_length=100,
blank=True,
verbose_name='Project #')
task_number = models.CharField(
max_length=100,
blank=True,
verbose_name='Task #')
award_number = models.CharField(
max_length=100,
blank=True,
verbose_name='Award #')
award_setup_complete = models.DateField(
null=True,
blank=True,
verbose_name='Award Setup Complete')
total_pta_amount = models.DecimalField(
decimal_places=2,
max_digits=10,
null=True,
blank=True,
verbose_name='Total PTA Amt')
parent_banner_number = models.CharField(
max_length=100,
blank=True,
verbose_name='Prnt Banner #')
banner_number = models.CharField(
max_length=100,
blank=True,
verbose_name='Banner #')
cs_banner_number = models.CharField(
max_length=100,
blank=True,
verbose_name='CS Banner #')
principal_investigator = models.ForeignKey(
AwardManager,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='PI*')
agency_name = models.ForeignKey(
FundingSource,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Agency Name*')
department_name = models.ForeignKey(
AwardOrganization,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Department Code & Name*')
project_title = models.CharField(max_length=256, blank=True, verbose_name='Project Title*')
who_is_prime = models.ForeignKey(
PrimeSponsor,
null=True,
blank=True,
limit_choices_to={
'active': True})
allowed_cost_schedule = models.ForeignKey(
AllowedCostSchedule,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Allowed Cost Schedule*')
award_template = models.ForeignKey(
AwardTemplate,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Award Template*')
cfda_number = models.ForeignKey(
CFDANumber,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='CFDA number*')
eas_award_type = models.CharField(
choices=EAS_AWARD_CHOICES,
max_length=2,
blank=True,
verbose_name='EAS Award Type*')
preaward_date = models.DateField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True, verbose_name='Start Date*')
end_date = models.DateField(null=True, blank=True, verbose_name='End Date*')
final_reports_due_date = models.DateField(
null=True,
blank=True,
verbose_name='Final Reports/Final Invoice Due Date (Close Date)*')
federal_negotiated_rate = models.ForeignKey(
FedNegRate,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Federal Negotiated Rate*')
indirect_cost_schedule = models.ForeignKey(
IndirectCost,
null=True,
blank=True,
limit_choices_to={
'active': True},
verbose_name='Indirect Cost Schedule*')
sp_type = models.CharField(
choices=SP_TYPE_CHOICES,
max_length=3,
blank=True,
verbose_name='SP Type*')
short_name = models.CharField(
max_length=30,
blank=True,
verbose_name='Award Short Name*')
agency_award_number = models.CharField(
max_length=50,
blank=True,
verbose_name='Agency Award Number*')
sponsor_award_number = models.CharField(
max_length=50,
blank=True,
verbose_name='Prime Award # (if GW is subawardee)*')
sponsor_banner_number = models.CharField(max_length=50, blank=True)
eas_status = models.CharField(
choices=EAS_STATUS_CHOICES,
max_length=2,
blank=True,
verbose_name='EAS Status*')
ready_for_eas_setup = models.CharField(
choices=EAS_SETUP_CHOICES,
max_length=3,
blank=True,
verbose_name='Ready for EAS Setup?')
is_edited = models.BooleanField(default=False)
pta_number_updated = models.DateField(
null=True,
blank=True)
def __unicode__(self):
return u'PTA #%s' % (self.project_number)
def save(self, *args, **kwargs):
"""Overrides the parent save method.
If this is the first PTANumber entered (either on creation or save later),
update some fields back to the most recent Proposal.
"""
super(PTANumber, self).save(*args, **kwargs)
if self == self.award.get_first_pta_number():
proposal = self.award.get_most_recent_proposal()
if proposal and self.agency_name != proposal.agency_name:
proposal.agency_name = self.agency_name
proposal.save()
if proposal and self.who_is_prime != proposal.who_is_prime:
proposal.who_is_prime = self.who_is_prime
proposal.save()
if proposal and self.project_title != proposal.project_title:
proposal.project_title = self.project_title
proposal.save()
if proposal and self.start_date != proposal.project_start_date:
proposal.project_start_date = self.start_date
proposal.save()
if proposal and self.end_date != proposal.project_end_date:
proposal.project_end_date = self.end_date
proposal.save()
award_acceptance = self.award.get_current_award_acceptance()
if self.agency_award_number != award_acceptance.agency_award_number:
award_acceptance.agency_award_number = self.agency_award_number
award_acceptance.save()
if self.sponsor_award_number != award_acceptance.sponsor_award_number:
award_acceptance.sponsor_award_number = self.sponsor_award_number
award_acceptance.save()
if self.eas_status != award_acceptance.eas_status:
award_acceptance.eas_status = self.eas_status
award_acceptance.save()
if self.project_title != award_acceptance.project_title:
award_acceptance.project_title = self.project_title
award_acceptance.save()
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse(
'edit_pta_number',
kwargs={
'award_pk': self.award.pk,
'pta_pk': self.id})
def get_delete_url(self):
"""Gets the URL used to delete this object"""
return reverse(
'delete_pta_number',
kwargs={
'award_pk': self.award.pk,
'pta_pk': self.id})
def get_recent_ptanumber_revision(self):
"""Gets the most recent revision of the model, using django-reversion"""
latest_revision = reversion.get_for_object(self)[0].revision
if latest_revision.user:
user = latest_revision.user.get_full_name()
else:
user = 'ATP'
return (user, latest_revision.date_created)
class Subaward(AwardSection):
"""Model for the Subaward data"""
RISK_CHOICES = (
('L', 'Low'),
('M', 'Medium'),
('H', 'High')
)
SUBRECIPIENT_TYPE_CHOICES = (
('F', 'Foundation'),
('FP', 'For-Profit'),
('SG', 'State Government'),
('LG', 'Local Government'),
('I', 'International'),
('ON', 'Other non-profit'),
('U', 'University')
)
AGREEMENT_CHOICES = (
('SA', 'Subaward'),
('SC', 'Subcontract'),
('IC', 'ICA'),
('M', 'Modification'),
('H', 'Honorarium'),
('C', 'Consultant'),
('CS', 'Contract Service')
)
SUBAWARD_STATUS_CHOICES = (
('R', 'Review'),
('G', 'Waiting for GCAS approval'),
('D', 'Waiting for Department'),
('P', 'Procurement'),
('S', 'Sent to recepient'),
)
CONTRACT_CHOICES = (
('FP', 'Fixed price subcontract'),
('CR', 'Cost-reimbursable subcontract'),
('FA', 'Fixed amount award'),
('OT', 'Other')
)
minimum_fields = (
'subrecipient_type',
'risk',
'amount',
'gw_number',
'contact_information',
'subaward_start',
'subaward_end',
'agreement_type',
'debarment_check',
'international',
'sent',
'ffata_reportable',
'zip_code',
)
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'creation_date',
'modification_number',
'subaward_ready',
'sent',
'reminder',
'fcoi_cleared',
'citi_cleared',
'amount',
'contact_information',
'zip_code',
'subaward_start',
'subaward_end',
'debarment_check',
'international',
'cfda_number',
'ffata_submitted',
'tech_report_received']
award = models.ForeignKey(Award)
creation_date = models.DateTimeField(auto_now_add=True, blank=True, null=True, verbose_name='Date Created')
recipient = models.CharField(max_length=250, blank=True)
agreement_type = models.CharField(
choices=AGREEMENT_CHOICES,
max_length=2,
blank=True)
modification_number = models.CharField(max_length=50, blank=True)
subrecipient_type = models.CharField(
choices=SUBRECIPIENT_TYPE_CHOICES,
max_length=2,
blank=True,
verbose_name='Subrecipient Type')
assist = models.CharField(max_length=100, blank=True)
date_received = models.DateField(null=True, blank=True)
status = models.CharField(
choices=SUBAWARD_STATUS_CHOICES,
max_length=2,
blank=True)
risk = models.CharField(choices=RISK_CHOICES, max_length=2, blank=True)
approval_expiration = models.DateField(
null=True,
blank=True,
verbose_name='Date of Expiration for Approval')
subaward_ready = models.DateField(
null=True,
blank=True,
verbose_name='Subaward ready to be initiated')
sent = models.DateField(
null=True,
blank=True,
verbose_name='Subagreement sent to recipient')
reminder = models.NullBooleanField(
verbose_name='Reminder sent to Subawardee?')
received = models.DateField(
null=True,
blank=True,
verbose_name='Receipt of Partially Executed Subagreement')
fcoi_cleared = models.DateField(
null=True,
blank=True,
verbose_name='Subaward Cleared FCOI Procedures')
citi_cleared = models.DateField(
null=True,
blank=True,
verbose_name='Subaward Completed CITI Training')
date_fully_executed = models.DateField(null=True, blank=True)
amount = models.DecimalField(
decimal_places=2,
max_digits=10,
null=True,
blank=True,
verbose_name='Subaward Total Amount')
gw_number = models.CharField(
max_length=50,
blank=True,
verbose_name='GW Subaward Number')
funding_mechanism = models.CharField(
choices=CONTRACT_CHOICES,
max_length=2,
blank=True,
verbose_name='Funding mechanism')
other_mechanism = models.CharField(
max_length=255,
blank=True,
verbose_name='Other funding mechanism')
contact_information = models.TextField(
blank=True,
verbose_name='Subawardee contact information')
zip_code = models.CharField(
max_length=50,
blank=True,
verbose_name='ZIP code')
subaward_start = models.DateField(
null=True,
blank=True,
verbose_name='Subaward Performance Period Start')
subaward_end = models.DateField(
null=True,
blank=True,
verbose_name='Subaward Performance Period End')
debarment_check = models.NullBooleanField(
verbose_name='Debarment or suspension check?')
international = models.NullBooleanField(verbose_name='International?')
cfda_number = models.CharField(
max_length=50,
blank=True,
verbose_name='CFDA number')
fain = models.CharField(max_length=50, blank=True, verbose_name='FAIN')
ein = models.CharField(max_length=50, blank=True, verbose_name='EIN')
duns_number = models.CharField(
max_length=50,
blank=True,
verbose_name='DUNS number')
ffata_reportable = models.NullBooleanField(
verbose_name='FFATA Reportable?')
ffata_submitted = models.DateField(
null=True,
blank=True,
verbose_name='FFATA Report Submitted Date')
tech_report_due = models.DateField(
null=True,
blank=True,
verbose_name='Technical Report Due Date')
tech_report_received = models.DateField(
null=True,
blank=True,
verbose_name='Technical Report Received Date')
subaward_completion_date = models.DateTimeField(blank=True, null=True, verbose_name='Completion Date')
def __unicode__(self):
return u'Subaward %s' % (self.gw_number)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse(
'edit_subaward',
kwargs={
'award_pk': self.award.pk,
'subaward_pk': self.id})
class AwardManagement(AssignableAwardSection):
"""Model for the AwardManagement data"""
minimum_fields = (
)
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'date_assigned']
award = models.OneToOneField(Award)
management_completion_date = models.DateTimeField(blank=True, null=True, verbose_name='Completion Date')
def __unicode__(self):
return u'Award Management %s' % (self.id)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse(
'edit_award_management',
kwargs={
'award_pk': self.award.pk})
class PriorApproval(FieldIteratorMixin, models.Model):
"""Model for the PriorApproval data"""
HIDDEN_FIELDS = ['award']
HIDDEN_TABLE_FIELDS = []
REQUEST_CHOICES = (
('AB', 'Absence or Change of Key Personnel'),
('CF', 'Carry-forward of unexpended balances to subsequent funding periods'),
('CS', 'Change in Scope'),
('ER', 'Effort Reduction'),
('EN', 'Equipment not in approved budget'),
('FC', 'Faculty consulting compensation that exceeds base salary'),
('FT', 'Foreign Travel'),
('IN', 'Initial no-cost extension of up to 12 months (per competitive segment)'),
('OT', 'Other'),
('RA', 'Rebudgeting among budget categories'),
('RB', 'Rebudgeting between direct and F&A costs'),
('RF', 'Rebudgeting of funds allotted for training (direct payment to trainees) to other categories of expense'),
('SN', 'Subsequent no-cost extension or extention of more than 12 months'),
)
PRIOR_APPROVAL_STATUS_CHOICES = (
('PN', 'Pending'),
('AP', 'Approved'),
('NA', 'Not Approved'),
)
award = models.ForeignKey(Award)
request = models.CharField(
choices=REQUEST_CHOICES,
max_length=2,
blank=True)
date_submitted = models.DateField(null=True, blank=True)
status = models.CharField(
choices=PRIOR_APPROVAL_STATUS_CHOICES,
max_length=2,
blank=True)
date_approved = models.DateField(null=True, blank=True)
def __unicode__(self):
return u'Prior Approval #%s' % (self.id)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object."""
return reverse(
'edit_prior_approval',
kwargs={
'award_pk': self.award.pk,
'prior_approval_pk': self.id})
def get_delete_url(self):
"""Gets the URL used to delete this object"""
return reverse(
'delete_prior_approval',
kwargs={
'award_pk': self.award.pk,
'prior_approval_pk': self.id})
class ReportSubmission(FieldIteratorMixin, models.Model):
"""Model for the ReportSubmission data"""
HIDDEN_FIELDS = ['award']
HIDDEN_TABLE_FIELDS = []
REPORT_CHOICES = (
('TA', 'Technical Annual'),
('TS', 'Technical Semiannual'),
('TQ', 'Technical Quarterly'),
('IP', 'Interim Progress Report (Non-Competing Continuations)'),
('DL', 'Deliverables'),
('IP', 'Invention/Patent Annual'),
('PA', 'Property Annual'),
('EA', 'Equipment Annual')
)
award = models.ForeignKey(Award)
report = models.CharField(choices=REPORT_CHOICES, max_length=2, blank=True)
due_date = models.DateField(null=True, blank=True)
submitted_date = models.DateField(null=True, blank=True)
def __unicode__(self):
return u'Report Submission #%s' % (self.id)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse(
'edit_report_submission',
kwargs={
'award_pk': self.award.pk,
'report_submission_pk': self.id})
def get_delete_url(self):
"""Gets the URL used to delete this object"""
return reverse(
'delete_report_submission',
kwargs={
'award_pk': self.award.pk,
'report_submission_pk': self.id})
class AwardCloseout(AssignableAwardSection):
"""Model for the AwardCloseout data"""
minimum_fields = (
)
HIDDEN_SEARCH_FIELDS = AwardSection.HIDDEN_SEARCH_FIELDS + [
'date_assigned']
award = models.OneToOneField(Award)
closeout_completion_date = models.DateTimeField(blank=True, null=True, verbose_name='Completion Date')
def __unicode__(self):
return u'Award Closeout %s' % (self.id)
def get_absolute_url(self):
"""Gets the URL used to navigate to this object"""
return reverse(
'edit_award_closeout',
kwargs={
'award_pk': self.award.pk})
class FinalReport(FieldIteratorMixin, models.Model):
"""Model for the FinalReport data"""
HIDDEN_FIELDS = ['award']
HIDDEN_TABLE_FIELDS = []
FINAL_REPORT_CHOICES = (
('FT', 'Final Technical'),
('FP', 'Final Progress Report'),
('FD', 'Final Deliverable(s)'),
('IP', 'Final Invention/Patent'),
('FI', 'Final Invention'),
('FP', 'Final Property'),
('FE', 'Final Equipment'),
)
award = models.ForeignKey(Award)
report = models.CharField(
choices=FINAL_REPORT_CHOICES,
max_length=2,
blank=True)
due_date = models.DateField(null=True, blank=True)
submitted_date = models.DateField(null=True, blank=True)
def __unicode__(self):
return u'Final Report #%s' % (self.id)
def get_absolute_url(self):
""" Gets the URL used to navigate to this object"""
return reverse(
'edit_final_report',
kwargs={
'award_pk': self.award.pk,
'final_report_pk': self.id})
def get_delete_url(self):
"""Gets the URL used to delete this object"""
return reverse(
'delete_final_report',
kwargs={
'award_pk': self.award.pk,
'final_report_pk': self.id})
| 2.75 | 3 |
dnplab/io/vna.py | DNPLab/dnpLab | 0 | 12785932 | <reponame>DNPLab/dnpLab
# TODO: remove unused imports
import numpy as np
import os
import re
from matplotlib.pylab import *
from .. import DNPData
def import_vna(path):
"""Import VNA data and return dnpdata object"""
x, data = import_snp(path)
# Not General
dnpDataObject = DNPData(data, [x], ["f"], {})
return dnpDataObject
# TODO: remove prints or make them optional
def import_snp(path):
"""Import sNp file and return numpy array"""
path_filename, extension = os.path.splitext(path)
extension_reg_ex = "[.]s[0-9]{1,}p"
print(re.fullmatch(extension_reg_ex, extension))
print(extension)
if re.fullmatch(extension_reg_ex, extension) == None:
raise ValueError("File Extension Not Given, Unspecified sNp file")
num_reg_ex = "[0-9]{1,}"
num = int(re.search(num_reg_ex, extension)[0])
print(num)
if num > 2:
raise ValueError("Currently on s1p and s2p files are supported")
f = open(path)
read_string = " "
while read_string[0] != "#":
read_string = f.readline()
raw = np.genfromtxt(f, skip_header=2, defaultfmt="11f")
f.close()
if num == 1:
x = raw[:, 0]
data = raw[:, 1] + 1j * raw[:, 2]
if num == 2:
x = raw[:, 1]
data = np.zeros((len(x), 2, 2))
data[:, 0, 0] = raw[:, 1] + 1j * raw[:, 2] # S11
data[:, 1, 0] = raw[:, 3] + 1j * raw[:, 4] # S21
data[:, 0, 1] = raw[:, 5] + 1j * raw[:, 6] # S12
data[:, 1, 1] = raw[:, 7] + 1j * raw[:, 8] # S22
if num > 2:
x = raw[0::num]
data = np.zeros((len(x), num, num))
# TODO: Use list comprehension instead of two for loops
for n in range(num):
for m in range(num):
data[:, n, m] = raw[n::num, 1 + 2 * m] + 1j * raw[n::num, 2 * (1 + m)]
return x, data
| 2.078125 | 2 |
im3components/utils.py | IMMM-SFA/im3components | 0 | 12785933 | <filename>im3components/utils.py
import yaml
def read_yaml(yaml_file: str) -> dict:
"""Read a YAML file.
:param yaml_file: Full path with file name and extension to an input YAML file
:type yaml_file: str
:return: Dictionary
"""
with open(yaml_file, 'r') as yml:
return yaml.load(yml, Loader=yaml.FullLoader)
| 3.3125 | 3 |
parkings/api/utils.py | klemmari1/parkkihubi | 12 | 12785934 | <filename>parkings/api/utils.py
import dateutil.parser
from django.utils import timezone
from rest_framework.exceptions import ValidationError
def parse_timestamp_or_now(timestamp_string):
"""
Parse given timestamp string or return current time.
If the timestamp string is falsy, return current time, otherwise try
to parse the string and return the parsed value.
:type timestamp_string: str
:rtype: datetime.datetime
:raises rest_framework.exceptions.ValidationError: on parse error
"""
if not timestamp_string:
return timezone.now()
return parse_timestamp(timestamp_string)
def parse_timestamp(datetime_string):
try:
return dateutil.parser.parse(datetime_string)
except (ValueError, OverflowError):
raise ValidationError('Invalid timestamp: {}'.format(datetime_string))
| 2.890625 | 3 |
lambeq/text2diagram/spiders_reader.py | CQCL/lambeq | 131 | 12785935 | # Copyright 2021, 2022 Cambridge Quantum Computing Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['SpidersReader', 'bag_of_words_reader', 'spiders_reader']
from discopy import Word
from discopy.rigid import Diagram, Spider
from lambeq.core.types import AtomicType
from lambeq.core.utils import SentenceType, tokenised_sentence_type_check
from lambeq.text2diagram.base import Reader
S = AtomicType.SENTENCE
class SpidersReader(Reader):
"""A reader that combines words using a spider."""
def sentence2diagram(self,
sentence: SentenceType,
tokenised: bool = False) -> Diagram:
if tokenised:
if not tokenised_sentence_type_check(sentence):
raise ValueError('`tokenised` set to `True`, but variable '
'`sentence` does not have type `list[str]`.')
else:
if not isinstance(sentence, str):
raise ValueError('`tokenised` set to `False`, but variable '
'`sentence` does not have type `str`.')
sentence = sentence.split()
words = [Word(word, S) for word in sentence]
diagram = Diagram.tensor(*words) >> Spider(len(words), 1, S)
return diagram
spiders_reader = SpidersReader()
bag_of_words_reader = spiders_reader
| 2.5 | 2 |
CellProfiler/tests/modules/test_measureimageskeleton.py | aidotse/Team-rahma.ai | 0 | 12785936 | <reponame>aidotse/Team-rahma.ai
import numpy
import pytest
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import COLTYPE_INTEGER
import cellprofiler.modules.measureimageskeleton
instance = cellprofiler.modules.measureimageskeleton.MeasureImageSkeleton()
@pytest.fixture(scope="module")
def image_skeleton():
data = numpy.zeros((9, 9), dtype=numpy.float32)
data[4, :] = 1.0
data[:, 4] = 1.0
return data
@pytest.fixture(scope="module")
def volume_skeleton():
data = numpy.zeros((9, 9, 9), dtype=numpy.float32)
data[4, 4, :] = 1.0
data[4, :, 4] = 1.0
data[:, 4, 4] = 1.0
return data
@pytest.fixture(
scope="module",
params=[("image_skeleton", 2), ("volume_skeleton", 3)],
ids=["image_skeleton", "volume_skeleton"],
)
def image(request):
data, dimensions = request.param
data = request.getfixturevalue(data)
return cellprofiler_core.image.Image(
image=data, dimensions=dimensions, convert=False
)
def test_get_categories_image(module, pipeline):
expected_categories = ["Skeleton"]
categories = module.get_categories(pipeline, "Image")
assert categories == expected_categories
def test_get_categories_other(module, pipeline):
expected_categories = []
categories = module.get_categories(pipeline, "foo")
assert categories == expected_categories
def test_get_measurement_columns(module, pipeline):
module.skeleton_name.value = "example"
expected_columns = [
(
"Image",
"Skeleton_Branches_example",
COLTYPE_INTEGER,
),
(
"Image",
"Skeleton_Endpoints_example",
COLTYPE_INTEGER,
),
]
columns = module.get_measurement_columns(pipeline)
assert columns == expected_columns
def test_get_measurements_image_skeleton(module, pipeline):
module.skeleton_name.value = "example"
expected_measurements = ["Skeleton_Branches_example", "Skeleton_Endpoints_example"]
measurements = module.get_measurements(
pipeline, "Image", "Skeleton"
)
assert measurements == expected_measurements
def test_get_measurements_image_other(module, pipeline):
module.skeleton_name.value = "example"
expected_measurements = []
measurements = module.get_measurements(
pipeline, "Image", "foo"
)
assert measurements == expected_measurements
def test_get_measurements_other(module, pipeline):
module.skeleton_name.value = "example"
expected_measurements = []
measurements = module.get_measurements(pipeline, "foo", "Skeleton")
assert measurements == expected_measurements
def test_get_measurement_images(module, pipeline):
module.skeleton_name.value = "example"
expected_images = ["example"]
images = module.get_measurement_images(
pipeline,
"Image",
"Skeleton",
"Skeleton_Branches_example",
)
assert images == expected_images
def test_run(image, module, workspace):
module.skeleton_name.value = "example"
module.run(workspace)
branches = workspace.measurements.get_current_measurement(
"Image", "Skeleton_Branches_example"
)
endpoints = workspace.measurements.get_current_measurement(
"Image", "Skeleton_Endpoints_example"
)
if image.volumetric:
expected_branches = 7
expected_endpoints = 6
else:
expected_branches = 5
expected_endpoints = 4
assert branches == expected_branches
assert endpoints == expected_endpoints
| 1.960938 | 2 |
invoicing/filters.py | cumanachao/utopia-crm | 13 | 12785937 | <gh_stars>10-100
from datetime import date, timedelta
import django_filters
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.db.models import Q
from .models import Invoice
CREATION_CHOICES = (
('today', _('Today')),
('yesterday', _('Yesterday')),
('last_7_days', _('Last 7 days')),
('last_30_days', _('Last 30 days')),
('this_month', _('This month')),
('last_month', _('Last month')),
('custom', _('Custom'))
)
STATUS_CHOICES = (
('paid', _("Paid")),
('debited', _("Debited")),
('paid_or_debited', _("Paid or debited")),
('pending', _('Pending')),
('canceled', _('Canceled')),
('uncollectible', _('Uncollectible')),
('overdue', _('Overdue')),
('not_paid', _('Not paid'))
)
class InvoiceFilter(django_filters.FilterSet):
contact_name = django_filters.CharFilter(method='filter_by_contact_name')
creation_date = django_filters.ChoiceFilter(choices=CREATION_CHOICES, method='filter_by_creation_date')
creation_gte = django_filters.DateFilter(
field_name='creation_date', lookup_expr='gte', widget=forms.TextInput(attrs={'autocomplete': 'off'}))
creation_lte = django_filters.DateFilter(
field_name='creation_date', lookup_expr='lte', widget=forms.TextInput(attrs={'autocomplete': 'off'}))
status = django_filters.ChoiceFilter(choices=STATUS_CHOICES, method='filter_by_status')
class Meta:
model = Invoice
fields = ['contact_name', 'payment_type']
def filter_by_contact_name(self, queryset, name, value):
return queryset.filter(contact__name__icontains=value)
def filter_by_creation_date(self, queryset, name, value):
if value == 'today':
return queryset.filter(creation_date=date.today())
elif value == 'yesterday':
return queryset.filter(creation_date=date.today() - timedelta(1))
elif value == 'last_7_days':
return queryset.filter(
creation_date__gte=date.today() - timedelta(7), creation_date__lte=date.today())
elif value == 'last_30_days':
return queryset.filter(
creation_date__gte=date.today() - timedelta(30), creation_date__lte=date.today())
elif value == 'this_month':
return queryset.filter(
creation_date__month=date.today().month, creation_date__year=date.today().year)
elif value == 'last_month':
month = date.today().month - 1 if date.today().month != 1 else 12
year = date.today().year if date.today().month != 1 else date.today().year - 1
return queryset.filter(creation_date__month=month, creation_date__year=year)
else:
return queryset
def filter_by_status(self, queryset, name, value):
if value == 'paid':
return queryset.filter(paid=True)
elif value == 'debited':
return queryset.filter(debited=True)
elif value == 'paid_or_debited':
return queryset.filter(Q(paid=True) | Q(debited=True))
elif value == 'canceled':
return queryset.filter(canceled=True)
elif value == 'uncollectible':
return queryset.filter(uncollectible=True)
elif value == 'overdue':
return queryset.filter(
paid=False, debited=False, canceled=False, uncollectible=False, expiration_date__lte=date.today())
elif value == 'not_paid':
return queryset.filter(
paid=False, debited=False, canceled=False, uncollectible=False,
)
else:
return queryset.filter(
paid=False, debited=False, uncollectible=False, canceled=False, expiration_date__gt=date.today())
| 1.984375 | 2 |
src/rules/entity/actor_plugins/__init__.py | FrozenYogurtPuff/iStar-pipeline | 0 | 12785938 | <reponame>FrozenYogurtPuff/iStar-pipeline
from .be_nsubj import be_nsubj
from .by_sb import by_sb
from .dative_PROPN import dative_propn
from .dep import dep_base as dep
from .ner import ner
from .relcl_who import relcl_who
from .tag import tag_base as tag
from .word_list import word_list
from .xcomp_ask_sb_to_do import xcomp_ask
| 0.917969 | 1 |
bin/evaluate.py | mwang87/FalconClusterWorkflow | 0 | 12785939 | <gh_stars>0
import pandas as pd
import sys
input_csv = sys.argv[1]
df = pd.read_csv(input_csv, sep=',', comment='#')
print(df) | 3.046875 | 3 |
app/main/__init__.py | AlchemistPrimus/data_crunchers_knbs | 0 | 12785940 | import flask
import pandas
| 1.03125 | 1 |
deployer/stack.py | bwood/deployer | 1 | 12785941 | <filename>deployer/stack.py
from deployer.cloudformation import AbstractCloudFormation
from deployer.decorators import retry
from deployer.logger import logger
from deployer.cloudtools_bucket import CloudtoolsBucket
import signal, pytz
from collections import defaultdict
from botocore.exceptions import ClientError, WaiterError
from tabulate import tabulate
from time import sleep
from datetime import datetime
from parse import parse
class Stack(AbstractCloudFormation):
def __init__(self, session, stack, config, bucket, args = {}):
# Save important parameters
self.session = session
self.stack = stack
self.config = config
if bucket:
self.bucket = bucket
# Load values from args
self.disable_rollback = args.get('disable_rollback', False)
self.print_events = args.get('print_events', False)
self.timed_out = args.get('timeout', None)
self.colors = args.get('colors', defaultdict(lambda: ''))
self.params = args.get('params', {})
# Load values from config
self.stack_name = self.config.get_config_att('stack_name', required=True)
self.base = self.config.get_config_att('sync_base', '.')
# Load values from methods for config lookup
self.repository = self.get_repository(self.base)
self.commit = self.repository.head.object.hexsha if self.repository else 'null'
# Load values from config
self.release = self.config.get_config_att('release', self.commit).replace('/','.')
self.template = self.config.get_config_att('template', required=True)
self.timeout = self.timed_out if self.timed_out is not None else self.config.get_config_att('timeout', None)
self.transforms = self.config.get_config_att('transforms')
# Intialize objects
self.client = self.session.client('cloudformation')
self.sts = self.session.client('sts')
# Load values from methods
self.origin = self.get_repository_origin(self.repository) if self.repository else 'null'
self.identity_arn = self.sts.get_caller_identity().get('Arn', '')
if bucket:
self.template_url = self.bucket.construct_template_url(self.config, self.stack, self.release, self.template) # self.construct_template_url()
self.template_file = self.bucket.get_template_file(self.config, self.stack)
self.template_body = self.bucket.get_template_body(self.config, self.template)
# Set state values
self._timed_out = False
self.validate_account(self.session, self.config)
self.reload_stack_status()
def reload_change_set_status(self, change_set_name):
try:
resp = self.client.describe_change_set(
ChangeSetName=change_set_name,
StackName=self.stack_name
)
self.change_set_status = resp['Status']
except Exception:
self.change_set_status = 'False'
return self.change_set_status
def construct_tags(self):
tags = self.config.get_config_att('tags')
if tags:
tags = [ { 'Key': key, 'Value': value } for key, value in tags.items() ]
if len(tags) > 47:
raise ValueError('Resources tag limit is 50, you have provided more than 47 tags. Please limit your tagging, save room for name and deployer tags.')
else:
tags = []
tags.append({'Key': 'deployer:stack', 'Value': self.stack})
tags.append({'Key': 'deployer:caller', 'Value': self.identity_arn})
tags.append({'Key': 'deployer:git:commit', 'Value': self.commit})
tags.append({'Key': 'deployer:git:origin', 'Value': self.origin})
tags.append({'Key': 'deployer:config', 'Value': self.config.file_name.replace('\\', '/')})
return tags
def create_waiter(self, start_time):
waiter = self.client.get_waiter('stack_create_complete')
logger.info("Creation Started")
sleep(5)
logger.info(self.reload_stack_status())
if self.print_events:
try:
self.output_events(start_time, 'create')
except RuntimeError as e:
if self.timed_out:
logger.error('Stack creation exceeded timeout of {} minutes and was aborted.'.format(self.timeout))
exit(2)
else:
raise e
else:
try:
waiter.wait(StackName=self.stack_name)
except WaiterError as e:
status = self.reload_stack_status()
logger.info(status)
self.output_events(start_time, 'create')
logger.info(self.reload_stack_status())
def update_waiter(self, start_time):
waiter = self.client.get_waiter('stack_update_complete')
logger.info("Update Started")
sleep(5)
logger.info(self.reload_stack_status())
if self.print_events:
try:
self.output_events(start_time, 'update')
except RuntimeError as e:
if self.timed_out:
logger.error('Stack creation exceeded timeout of {} minutes and was aborted.'.format(self.timeout))
exit(2)
else:
raise e
else:
try:
waiter.wait(StackName=self.stack_name)
except WaiterError:
status = self.reload_stack_status()
logger.info(status)
self.output_events(start_time, 'update')
logger.info(self.reload_stack_status())
def output_events(self, start_time, action):
update_time = start_time
headers = [ 'Time', 'Status', 'Type', 'Logical ID', 'Status Reason' ]
if action == 'create':
END_STATUS = 'CREATE_COMPLETE'
elif action == 'update':
END_STATUS = 'UPDATE_COMPLETE'
count = 0
sleep_interval = 15
while self.stack_status != END_STATUS:
status = self.reload_stack_status()
table = []
sleep(sleep_interval)
#Check interval and exit if this is an update
if action == 'update' and self.timeout is not None:
if (sleep_interval * count) > (self.timeout * 60):
self.timed_out = True
raise RuntimeError("Update stack Failed")
events = self.client.describe_stack_events(StackName=self.stack_name)
events = events['StackEvents']
events.reverse()
for event in events:
if event['Timestamp'] > start_time and event['Timestamp'] > update_time:
reason = event.get('ResourceStatusReason', '')
if reason == 'Stack creation time exceeded the specified timeout. Rollback requested by user.':
self.timed_out = True
table.append([
event['Timestamp'].strftime('%Y/%m/%d %H:%M:%S'),
event['ResourceStatus'],
event['ResourceType'],
event['LogicalResourceId'],
reason
])
update_time = datetime.now(pytz.utc)
if len(table) > 0:
if count == 0:
print(tabulate(table,headers,tablefmt='simple'))
else:
print(tabulate(table,[],tablefmt='plain'))
if action == 'create':
if status in [ 'CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_COMPLETE', 'ROLLBACK_FAILED' ]:
raise RuntimeError("Create stack Failed")
elif action == 'update':
if status in [ 'UPDATE_FAILED', 'UPDATE_ROLLBACK_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'UPDATE_ROLLBACK_FAILED' ]:
raise RuntimeError("Update stack Failed")
count += 1
def delete_stack(self):
self.client.delete_stack(StackName=self.stack_name)
logger.info(self.colors['error'] + "Sent delete request to stack" + self.colors['reset'])
return True
def get_latest_change_set_name(self):
resp = {}
latest = None
while 'NextToken' in resp or latest == None:
if 'NextToken' in resp:
resp = self.client.list_change_sets(
StackName=self.stack_name,
NextToken=resp['NextToken']
)
else:
resp = self.client.list_change_sets(
StackName=self.stack_name
)
for change in resp['Summaries']:
if not latest:
latest = change
if change['CreationTime'] > latest['CreationTime']:
latest = change
if resp['Summaries'] == []:
return None
return latest['ChangeSetName']
def get_change_set(self, change_set_name, change_set_description, change_set_type):
# create the change set
if self.stack_status:
resp = self.client.create_change_set(
StackName=self.stack_name,
TemplateURL=self.template_url,
Parameters=self.config.build_params(self.session, self.stack, self.release, self.params, self.template_file),
Capabilities=[
'CAPABILITY_IAM',
'CAPABILITY_NAMED_IAM',
'CAPABILITY_AUTO_EXPAND'
],
ChangeSetName=change_set_name,
Description=change_set_description,
ChangeSetType=change_set_type
)
logger.info("Change Set Started: %s" % resp['Id'])
sleep(5)
self.change_set_status = self.reload_change_set_status(change_set_name)
while self.change_set_status != 'CREATE_COMPLETE':
sleep(10)
status = self.reload_change_set_status(change_set_name)
logger.info(status)
if status == 'FAILED':
raise RuntimeError("Change set Failed")
self.print_change_set(change_set_name, change_set_description)
else:
raise RuntimeError("Stack does not exist")
def execute_change_set(self, change_set_name):
self.client.execute_change_set(
ChangeSetName=change_set_name,
StackName=self.stack_name
)
def print_change_set(self, change_set_name, change_set_description):
resp = self.client.describe_change_set(
ChangeSetName=change_set_name,
StackName=self.stack_name
)
self.changes = resp['Changes']
print("==================================== Change ===================================")
headers = ["Action","LogicalId","ResourceType","Replacement"]
table = []
for change in self.changes:
row = []
row.append(change['ResourceChange']['Action'])
row.append(change['ResourceChange']['LogicalResourceId'])
row.append(change['ResourceChange']['ResourceType'])
if 'Replacement' in change['ResourceChange']:
row.append(change['ResourceChange']['Replacement'])
else:
row.append('')
table.append(row)
print(tabulate(table, headers, tablefmt='simple'))
def exists(self):
try:
self.client.describe_stacks(StackName=self.stack_name)
return True
except ClientError:
return False
def describe(self):
try:
return self.client.describe_stacks(StackName=self.stack_name)['Stacks'][0]
except ClientError:
return {}
def upsert_stack(self):
self.update_stack() if self.exists() else self.create_stack()
def create_stack(self):
signal.signal(signal.SIGINT, self.cancel_create)
if not self.transforms:
# create the stack
start_time = datetime.now(pytz.utc)
args = {
"StackName": self.stack_name,
"Parameters": self.config.build_params(self.session, self.stack, self.release, self.params, self.template_file),
"DisableRollback": self.disable_rollback,
"Tags": self.construct_tags(),
"Capabilities": [
'CAPABILITY_IAM',
'CAPABILITY_NAMED_IAM',
'CAPABILITY_AUTO_EXPAND'
]
}
args.update({'TemplateBody': self.template_body} if self.template_body else {"TemplateURL": self.template_url})
args.update({'TimeoutInMinutes': self.timeout} if self.timeout else {})
if self.template_body:
logger.info("Using local template due to null template bucket")
self.client.create_stack(**args)
self.create_waiter(start_time)
else:
start_time = datetime.now(pytz.utc)
change_set_name = "{0}-1".format(self.config.get_config_att('change_prefix'))
self.get_change_set(change_set_name, "Deployer Automated", 'CREATE')
self.execute_change_set(change_set_name)
self.create_waiter(start_time)
def update_stack(self):
signal.signal(signal.SIGINT, self.cancel_update)
if not self.transforms:
start_time = datetime.now(pytz.utc)
args = {
"StackName": self.stack_name,
"Parameters": self.config.build_params(self.session, self.stack, self.release, self.params, self.template_file),
"Tags": self.construct_tags(),
"Capabilities": [
'CAPABILITY_IAM',
'CAPABILITY_NAMED_IAM',
'CAPABILITY_AUTO_EXPAND'
]
}
args.update({'TemplateBody': self.template_body} if self.template_body else {"TemplateURL": self.template_url})
if self.template_body:
logger.info("Using local template due to null template bucket")
if self.stack_status:
try:
self.client.update_stack(**args)
self.update_waiter(start_time)
except ClientError as e:
if 'No updates are to be performed' in e.response['Error']['Message']:
logger.warning('No updates are to be performed')
else:
raise e
else:
raise RuntimeError("Stack does not exist")
else:
latest_change = self.get_latest_change_set_name()
if latest_change:
change_number = int(latest_change.strip(self.config.get_config_att('change_prefix') + '-'))
change_number += 1
else:
change_number = 1
start_time = datetime.now(pytz.utc)
change_set_name = "{0}-{1}".format(self.config.get_config_att('change_prefix'),change_number)
self.get_change_set(change_set_name, "Deployer Automated", 'UPDATE')
self.execute_change_set(change_set_name)
self.update_waiter(start_time)
def cancel_create(self, signal, frame):
logger.critical('Process Interupt')
logger.critical('Deleteing Stack: %s' % self.stack_name)
self.delete_stack()
exit(1)
def cancel_update(self, signal, frame):
logger.critical('Process Interupt')
logger.critical('Cancelling Stack Update: %s' % self.stack_name)
self.client.cancel_update_stack(StackName=self.stack_name)
exit(1)
@retry(ClientError,logger=logger)
def get_outputs(self):
resp = self.client.describe_stacks(
StackName=self.stack_name)
self.outputs = resp['Stacks'][0]['Outputs']
return self.outputs
@property
def status(self):
return self.reload_stack_status()
@retry(ClientError,tries=6,logger=logger)
def reload_stack_status(self):
try:
resp = self.client.describe_stacks(
StackName=self.stack_name)
self.stack_status = resp['Stacks'][0]['StackStatus']
except Exception:
self.stack_status = 'False'
return self.stack_status
| 1.90625 | 2 |
safe_grid_agents/spiky/agents.py | jvmancuso/safe-grid-agents | 21 | 12785942 | """PPO Agent for CRMDPs."""
import torch
import random
import numpy as np
from typing import Generator, List
from safe_grid_agents.common.utils import track_metrics
from safe_grid_agents.common.agents.policy_cnn import PPOCNNAgent
from safe_grid_agents.types import Rollout
from ai_safety_gridworlds.environments.tomato_crmdp import REWARD_FACTOR
def _get_agent_position(board, agent_value):
x_pos, y_pos = np.unravel_index(
np.argwhere(np.ravel(board) == agent_value), board.shape
)
x_pos, y_pos = x_pos.flat[0], y_pos.flat[0]
return x_pos, y_pos
def _manhatten_distance(x1, x2, y1, y2):
return abs(x1 - x2) + abs(y1 - y2)
def d_tomato_crmdp(X, Y):
assert X.shape == Y.shape
return REWARD_FACTOR * np.sum(X != Y)
def d_toy_gridworlds(X, Y):
assert X.shape == Y.shape
X = X[0, ...]
Y = Y[0, ...]
# toy gridworlds use value 0 to denote the agent on the board
X_pos_x, X_pos_y = _get_agent_position(X, agent_value=0)
Y_pos_x, Y_pos_y = _get_agent_position(Y, agent_value=0)
return _manhatten_distance(X_pos_x, Y_pos_x, X_pos_y, Y_pos_y)
def d_trans_boat(X, Y):
assert X.shape == Y.shape
X_initial, X_final = X[0, ...], X[1, ...]
Y_initial, Y_final = Y[0, ...], Y[1, ...]
# deepmind gridworlds use value 2 to denote the agent on the board
X_initial_pos_x, X_initial_pos_y = _get_agent_position(X_initial, agent_value=2)
Y_initial_pos_x, Y_initial_pos_y = _get_agent_position(Y_initial, agent_value=2)
X_final_pos_x, X_final_pos_y = _get_agent_position(X_final, agent_value=2)
Y_final_pos_x, Y_final_pos_y = _get_agent_position(Y_final, agent_value=2)
X_direction_x = X_final_pos_x - X_initial_pos_x
X_direction_y = X_final_pos_y - X_initial_pos_y
Y_direction_x = Y_final_pos_x - Y_initial_pos_x
Y_direction_y = Y_final_pos_y - Y_initial_pos_y
initial_position_distance = _manhatten_distance(
X_initial_pos_x, Y_initial_pos_x, X_initial_pos_y, Y_initial_pos_y
)
direction_distance = int(X_direction_x != Y_direction_x)
direction_distance += int(X_direction_y != Y_direction_y)
return initial_position_distance + direction_distance
ENV_TO_D = {
"corners": d_toy_gridworlds,
"way": d_toy_gridworlds,
"tomato-crmdp": d_tomato_crmdp,
"trans-boat": d_trans_boat,
}
class PPOCRMDPAgent(PPOCNNAgent):
"""PPO Agent for CRMDPs."""
def __init__(self, env, args) -> None:
super().__init__(env, args)
self.states = dict()
self.d = ENV_TO_D[args.env_alias]
self.epsilon = 1e-3
self.rllb = dict()
self.state_memory_cap = 0
def _mark_state_corrupt(self, board, reward) -> None:
assert board.dtype == np.float32
self.states[board.tostring()] = [False, reward]
def _mark_state_safe(self, board, reward) -> None:
assert board.dtype == np.float32
self.states[board.tostring()] = [True, reward]
def _is_state_corrupt(self, board) -> bool:
if board.tostring() in self.states:
return not self.states[board.tostring()][0]
else:
return False
def _iterate_safe_states(self) -> Generator[np.array, None, None]:
for board_str in self.states.keys():
if self.states[board_str][0]:
board = np.fromstring(board_str, dtype=np.float32, count=self.n_input)
board = np.reshape(board, self.board_shape)
yield board, self.states[board_str][1]
def _iterate_corrupt_states(self) -> Generator[np.array, None, None]:
for board_str in self.states.keys():
if not self.states[board_str][0]:
board = np.fromstring(board_str, dtype=np.float32, count=self.n_input)
board = np.reshape(board, self.board_shape)
yield board, self.states[board_str][1]
def _update_rllb(self) -> None:
"""Update the reward lower Lipschitz bound."""
for corrupt_board, corrupt_reward in self._iterate_corrupt_states():
board_string = corrupt_board.tostring()
rllb = self.rllb.get(board_string, None)
for safe_board, safe_reward in self._iterate_safe_states():
bound = safe_reward - self.d(safe_board, corrupt_board)
if rllb is None or bound > rllb:
rllb = bound
self.rllb[board_string] = rllb
def _get_TLV(self, boardX, rewardX, state_iterator) -> float:
"""Return the total Lipschitz violation of a state X w.r.t a set of states.
Each state is only added once to the TLV."""
TLV = 0
unique_states = set()
for boardY, rewardY in state_iterator:
if boardY.tostring() not in unique_states:
TLV += max(0, abs(rewardX - rewardY) - self.d(boardY, boardX))
unique_states.add(boardY.tostring())
return TLV
def _purge_memory(self) -> None:
"""Drop random noncorrupt states from the memory for performance reasons."""
if len(self.states) > self.state_memory_cap:
to_remove = [
state
for state in random.sample(
self.states.keys(), len(self.states) - self.state_memory_cap / 2
)
if self.states[state][0]
]
for state in to_remove:
del self.states[state]
# we might have too many corrupt states, so update the bounds
if len(self.states) > 2 * self.state_memory_cap / 3:
self.state_memory_cap *= 2
def get_modified_reward(self, board, reward) -> float:
"""Return the reward to use for optimizing the policy based on the rllb."""
if self._is_state_corrupt(board):
return self.rllb[board.tostring()]
else:
return reward
def get_modified_rewards_for_rollout(self, boards, rewards) -> List[float]:
"""
Returns a list of rewards for a given rollout that has been updated based
on the rllb.
"""
new_rewards = []
for i in range(len(rewards)):
new_rewards.append(self.get_modified_reward(boards[i], rewards[i]))
return new_rewards
def identify_corruption_in_trajectory(self, boards, rewards) -> None:
"""Perform detection of corrupt states on a trajectory.
Updates the set of safe states and corrupt states with all new states,
that are being visited in this trajectory. Then updates the self.rllb
dict, so that we can get the modified reward function.
"""
boards = np.array(boards)
rewards = np.array(rewards)
TLV = np.zeros(len(boards))
for i in range(len(boards)):
TLV[i] = self._get_TLV(boards[i], rewards[i], zip(boards, rewards))
TLV_sort_idx = np.argsort(TLV)[::-1]
non_corrupt_idx = list(range(len(boards)))
added_corrupt_states = False
# iterate over all states in the trajectory in order decreasing by their TLV
for i in range(len(boards)):
idx = TLV_sort_idx[i]
if not added_corrupt_states:
# performance improvement
new_TLV = TLV[idx]
else:
new_TLV = self._get_TLV(
boards[idx],
rewards[idx],
zip(boards[non_corrupt_idx], rewards[non_corrupt_idx]),
)
if new_TLV <= self.epsilon:
if not self._is_state_corrupt(boards[idx]):
self._mark_state_safe(boards[idx], rewards[idx])
break
else:
self._mark_state_corrupt(boards[idx], rewards[idx])
non_corrupt_idx.remove(idx)
added_corrupt_states = True
if added_corrupt_states:
self._update_rllb()
def gather_rollout(self, env, env_state, history, args) -> Rollout:
"""Gather a single rollout from an old policy.
Based on the gather_rollout function of the regular PPO agents.
This version also tracks the successor states of each action.
Based on this the corrupted states can be detected before performing
the training step."""
state, reward, done, info = env_state
done = False
rollout = Rollout(states=[], actions=[], rewards=[], returns=[])
successors = []
for r in range(self.rollouts):
successors_r = []
# Rollout loop
states, actions, rewards, returns = [], [], [], []
while not done:
with torch.no_grad():
action = self.old_policy.act_explore(state)
successor, reward, done, info = env.step(action)
# Maybe cheat
if args.cheat:
reward = info["hidden_reward"]
# In case the agent is drunk, use the actual action they took
try:
action = info["extra_observations"]["actual_actions"]
except KeyError:
pass
# Store data from experience
states.append(state) # .flatten())
actions.append(action)
rewards.append(float(reward))
successors_r.append(successor)
state = successor
history["t"] += 1
if r != 0:
history["episode"] += 1
self.identify_corruption_in_trajectory(successors_r, rewards)
rewards = self.get_modified_rewards_for_rollout(successors_r, rewards)
returns = self.get_discounted_returns(rewards)
history = track_metrics(history, env)
rollout.states.append(states)
rollout.actions.append(actions)
rollout.rewards.append(rewards)
rollout.returns.append(returns)
successors.append(successors_r)
self.state_memory_cap = max(self.state_memory_cap, 20 * len(states))
self._purge_memory()
state = env.reset()
done = False
return rollout
| 2.296875 | 2 |
cmake_tidy/commands/analyze/analyze_command.py | MaciejPatro/cmake-tidy | 16 | 12785943 | <gh_stars>10-100
###############################################################################
# Copyright <NAME> (<EMAIL>)
# MIT License
###############################################################################
from cmake_tidy.commands import Command
from cmake_tidy.utils import ExitCodes
class AnalyzeCommand(Command):
__DESCRIPTION = 'analyze file to find violations against selected rules'
def __init__(self, parser):
super().__init__(parser, 'analyze', AnalyzeCommand.__DESCRIPTION)
def execute_command(self, args) -> int:
return ExitCodes.SUCCESS
| 2.203125 | 2 |
customer/migrations/0003_auto_20210713_1716.py | RFNshare/StraightIntLtd | 0 | 12785944 | # Generated by Django 3.0.7 on 2021-07-13 11:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0002_customer_address'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customer',
name='phone',
field=models.CharField(max_length=100, null=True),
),
]
| 1.78125 | 2 |
combine.py | ychenbioinfo/msg | 0 | 12785945 | <gh_stars>0
"""
Combines each individual's hmmprob.RData file into two summary files
(linearly interpolating missing values)
Usage:
python msg/combine.py
msg/combine.py -d /groups/stern/home/sternd/svb_mausec2/hmm_fit
"""
import os
import sys
import csv
import glob
import optparse
import subprocess
import uuid
import gc
import numpy
import numpy.lib.recfunctions
from msglib import trace, get_free_memory
# -------------- SETTINGS ----------------
# Assumes the files matches this pattern relative to hmm_fit (or other specificied directory)
GLOB_PATTERN = '/*/*-hmmprob.RData'
DEBUG = False
# ----------------------------------------
def grab_files(dir):
"""Example from Toy data:
glob.glob('hmm_fit/*/*-hmmprob.RData.chrom.*.csv')
['hmm_fit/indivF11_GTTACG/indivF11_GTTACG-hmmprob.RData.chrom.2R.csv', 'hmm_fit/indivE2_CAGCCG/indivE2_CAGCCG-hmmprob.RData.chrom.2R.csv',
'hmm_fit/indivG7_CTTGCG/indivG7_CTTGCG-hmmprob.RData.chrom.2R.csv', ...]
"""
glob_pattern = dir.rstrip('/') + GLOB_PATTERN
files = glob.glob(glob_pattern)
print "found %s input files" % len(files)
return files
def parse_path(path):
"""Get ind name, and chrom from file path"""
dir, filename = os.path.split(path)
name_parts = filename.split('.')
ind_name = filename.split('-hmmprob')[0]
return ind_name
def rdata_to_numpy_arrays(rdata_file_path, target_object=None):
"""Call out to R (on $PATH) to convert rdata file to one or more
CSV files. Load CSV files into numpy arrays and delete CSV files.
If target_object is None, it will try to use generic code to find all the dataframes. Otherwise
it will try to home in on the target_object and find the frames within that.
(R code from http://stackoverflow.com/questions/13189467/how-to-convert-rdata-format-into-text-file-format)
More discussion here:
http://stackoverflow.com/questions/23413728/converting-rdata-files-to-csv-error-in-data-frame-arguments-imply-differing-nu
"""
generic_r_code = """\
resave <- function(file){
e <- new.env(parent = emptyenv())
load(file, envir = e)
objs <- ls(envir = e, all.names = TRUE)
for(obj in objs) {
.x <- get(obj, envir =e)
cat(sprintf('%s%%s.tsv\n', obj) )
write.table( .x, file=paste("%s", obj, ".tsv", sep=""), sep="\t", col.names = NA,
qmethod = "double")
}
}
resave('%s')"""
highly_targeted_r_code = """\
resave <- function(file){
e <- new.env(parent = emptyenv())
load(file, envir = e)
obj <- get('%s', envir =e)
lapply( names(obj), function(nam) {
write.table( obj[[nam]], file=paste("%s", nam, ".tsv", sep=""), sep="\t", col.names = NA,
qmethod = "double")
cat(sprintf('%s%%s.tsv\n', nam) )
}
)
}
resave('%s')"""
files_prefix = 'temp-' + str(uuid.uuid4())
if target_object:
r_code = highly_targeted_r_code % (target_object, files_prefix, files_prefix, rdata_file_path)
else:
r_code = generic_r_code % (files_prefix, files_prefix, rdata_file_path)
#print r_code
command = ["Rscript","-","-"] #"-" to tell Rscript to use pipes for input and output
#print ' '.join(command)
rscript = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
file_list = rscript.communicate(r_code)[0]
indiv = parse_path(rdata_file_path)
for csv_path in file_list.splitlines():
if csv_path.lower().endswith('.tsv'):
#Note: Setting the comments parameter below is a numpy hack to make it not look
#for comments in our data file
array = numpy.loadtxt(csv_path, skiprows=1, usecols=(1,21,23), delimiter="\t",
comments="wewillneverseethisstringinafile15",
dtype={'names': ('pos', 'par1', 'par2'), 'formats': ('a100', 'f8', 'f8')}
)
os.remove(csv_path)
yield array, indiv, csv_path.replace(files_prefix,'').strip('.tsv')
def input_data_sets(dir):
for path in grab_files(dir):
for (array, ind, chrom) in rdata_to_numpy_arrays(path, target_object = 'dataa'):
yield array, ind, chrom
@trace
def fix_values(outrows):
"""Replace 1.000000 with 1 and 0.000000 with 0 to save space."""
for row in outrows:
for i, val in enumerate(row):
if val == '1.000000':
row[i] = '1'
elif val == '0.000000':
row[i] = '0'
@trace
def merge(dir):
"""
Combine all individuals and datapoints with one row per individual, with columns
being chrom:position. Interpolate missing values in some cases. (The R code
that we're trying to replicate was funny with this so there are a few special cases,
see code)
Write out one tsv file for each parent.
"""
#Combine all individuals/positions into a big dictionary (think of it like a sparse table)
#for each parent
dp1, dp2 = {}, {}
for (array, ind, chrom) in input_data_sets(dir):
print ind, chrom, len(array), "records"
for x in array:
key = (ind, chrom, int(x['pos']))
dp1[key] = x['par1']
dp2[key] = x['par2']
gc.collect()
print "Done loading rdata files."
print "Free memory is %s MB" % get_free_memory()
#write out to files and interpolate as we go. The R code we're replacing had some weird special cases so look out for those.
for (fname, dp) in (('ancestry-probs-par1.tsv',dp1),('ancestry-probs-par2.tsv',dp2)):
if DEBUG:
fname = 'test.' + fname
print "Compiling data for file",fname
#Get all positions (chrom,pos) sorted by chrom, then by position
positions = sorted(set([(k[1],k[2]) for k in dp.keys()]))
header = [''] + [''.join((p[0],':',str(p[1]))) for p in positions]
#Get all individuals, sorted
inds = sorted(set([k[0] for k in dp.keys()]))
#Build up each row to be written to the file (all individuals x all positions)
outrows = []
for ind in inds:
print " ",ind
#initialize/clear out bookkeeping variables
last_pos_w_val, last_val, last_chrom, to_interpolate = None, None, None, []
outrow = [ind] #first column is individual name
for (chrom,pos) in positions:
# Handle switching to new chromosome
if chrom != last_chrom:
#set any positions waiting for interpolation to 0 since we've reached the end of the chrom
#however we wan't to leave as NA and not interpolate between last_pos_w_val and end of chrom
#because that's what R did.
for (update_pos, insert_loc) in to_interpolate:
if update_pos < last_pos_w_val:
outrow[insert_loc] = "0"
#clear out bookkeeping vars on new chrom
last_pos_w_val, last_val, last_chrom, to_interpolate = None, None, None, []
key = (ind,chrom,pos)
if (key in dp) and ((dp[key]>.0000005) or (last_val and last_val >.0000005)):
# This condition is checking if A. data exists for this position and it's non-zero OR B. data exists and the last value seen was non-zero.
# These are cases were we want to use this value and last seen value to interpolate positions in the interpolation queue.
# Store value in outrow to be written to file
outrow.append("%.6f" % round(dp[key],6))
#interpolate any positions waiting for a new value
for (update_pos, insert_loc) in to_interpolate:
if update_pos < last_pos_w_val:
outrow[insert_loc] = "0" # zero out any pending positions before the last value we saw since this is what R did.
else:
insert_val = last_val + ((dp[key] - last_val) * (float(update_pos - last_pos_w_val) / (pos - last_pos_w_val)))
outrow[insert_loc] = "%.6f" % round(insert_val,6)
to_interpolate = [] #since all pending positions have been interpolated, clear this out
last_pos_w_val, last_val = pos, dp[key]
elif last_val and not (key in dp):
#If a value has been seen for this chrom, we'll want to start interpolating
#Add a placeholder to outrow
outrow.append('NA') #
#Mark position for later interpolation
to_interpolate.append((pos, len(outrow) - 1))
else:
#don't interpolate
if key in dp:
#data exists for key but it's 0, Store value in outrow, but update bookkeeping vars
outrow.append("%.6f" % round(dp[key],6)) #should be 0
#still count 0 as a last value for interpolation
last_pos_w_val, last_val = pos, dp[key]
else:
outrow.append('NA')
last_chrom = chrom
#set any positions waiting for interpolation to 0 since we've reached the end of the individual
#however we wan't to leave as NA and not interpolate between last_pos_w_val and end
#because that's what R did.
for (update_pos, insert_loc) in to_interpolate:
if update_pos < last_pos_w_val:
outrow[insert_loc] = "0"
outrows.append(outrow)
fix_values(outrows)
print "Writing file",fname
csvout = csv.writer(open(fname,'wb'), delimiter='\t', quoting=csv.QUOTE_MINIMAL)
csvout.writerow(header)
csvout.writerows(outrows)
gc.collect()
@trace
def main():
"""Parse command line args, and call appropriate functions."""
#disable garbage collection for a 10% speed boost
gc.disable()
usage="""\nusage: %prog [options]\n"""
parser = optparse.OptionParser(usage=usage)
#Other option types are int and float, string is default.
#Note there is also a default parameter.
parser.add_option('-d','--dir',dest="hmm_fit_dir",type="string")
#?? Need these ?? -c $params{'chroms'} -p $params{'chroms2plot'} -d hmm_fit -t $params{'thinfac'} -f $params{'difffac'} -b $params{'barcodes'} -n $params{'pnathresh'}
#parser.add_option('-o','--out',dest="out_path",type="string")
#parser.add_option('-t','--thresh',dest="pnathresh",type="float",default=.03)
opts,args=parser.parse_args() #Args taken from sys.argv[1:] by default, parsed using GNU/POSIX syntax.
if not opts.hmm_fit_dir:
parser.error("A directory for locating hmm_fit data is required.")
print "Starting combine.py with parameters:", str(opts)
print "Free memory is %s MB" % get_free_memory()
merge(opts.hmm_fit_dir)
if __name__=='__main__':
main()
| 2.484375 | 2 |
fake-logs.py | vdyc/fake-logs | 11 | 12785946 | # pylint: disable=C0103
from fake_logs.fake_logs_cli import run_from_cli
# Run this module with "python fake-logs.py <arguments>"
if __name__ == "__main__":
run_from_cli()
| 1.320313 | 1 |
implicit_solver/lib/dispatcher.py | vincentbonnetcg/Numerical-Bric-a-Brac | 14 | 12785947 | """
@author: <NAME>
@description : command dispatcher for solver
"""
# import for CommandSolverDispatcher
import uuid
from core import Details
import lib.system as system
import lib.system.time_integrators as integrator
from lib.objects import Dynamic, Kinematic, Condition, Force
from lib.objects.jit.data import Node, Spring, AnchorSpring, Bending, Area
from lib.objects.jit.data import Point, Edge, Triangle
import lib.commands as cmd
import core
class CommandSolverDispatcher(core.CommandDispatcher):
'''
Dispatch commands to manage objects (animators, conditions, dynamics, kinematics, forces)
'''
def __init__(self):
core.CommandDispatcher.__init__(self)
# data
self._scene = None
self._details = None
self._reset()
self._solver = system.Solver(integrator.BackwardEulerIntegrator())
#self._solver = system.Solver(integrator.SymplecticEulerIntegrator())
self._context = system.SolverContext()
# map hash_value with objects (dynamic, kinematic, condition, force)
self._object_dict = {}
# register
self.register_cmd(self._set_context, 'set_context')
self.register_cmd(self._get_context, 'get_context')
self.register_cmd(self._get_dynamics, 'get_dynamics')
self.register_cmd(self._get_conditions, 'get_conditions')
self.register_cmd(self._get_kinematics, 'get_kinematics')
self.register_cmd(self._get_metadata, 'get_metadata')
self.register_cmd(self._get_commands, 'get_commands')
self.register_cmd(self._reset, 'reset')
self.register_cmd(cmd.initialize)
self.register_cmd(cmd.add_dynamic)
self.register_cmd(cmd.add_kinematic)
self.register_cmd(cmd.solve_to_next_frame)
self.register_cmd(cmd.get_nodes_from_dynamic)
self.register_cmd(cmd.get_shape_from_kinematic)
self.register_cmd(cmd.get_normals_from_kinematic)
self.register_cmd(cmd.get_segments_from_constraint)
self.register_cmd(cmd.set_render_prefs)
self.register_cmd(cmd.add_gravity)
self.register_cmd(cmd.add_edge_constraint)
self.register_cmd(cmd.add_wire_bending_constraint)
self.register_cmd(cmd.add_face_constraint)
self.register_cmd(cmd.add_kinematic_attachment)
self.register_cmd(cmd.add_kinematic_collision)
self.register_cmd(cmd.add_dynamic_attachment)
self.register_cmd(cmd.get_sparse_matrix_as_dense)
def _add_object(self, obj, object_handle=None):
if object_handle in self._object_dict:
assert False, f'_add_object(...) {object_handle} already exists'
if not object_handle:
object_handle = str(uuid.uuid4())
if isinstance(obj, (Dynamic, Kinematic, Condition, Force)):
self._object_dict[object_handle] = obj
else:
assert False, '_add_object(...) only supports Dynamic, Kinematic, Condition and Force'
return object_handle
def _convert_parameter(self, parameter_name, kwargs):
# parameter provided by the dispatcher
if parameter_name == 'scene':
return self._scene
elif parameter_name == 'solver':
return self._solver
elif parameter_name == 'context':
return self._context
elif parameter_name == 'details':
return self._details
# parameter provided by user
if parameter_name in kwargs:
arg_object = kwargs[parameter_name]
reserved_attrs = ['dynamic','kinematic','condition','obj']
is_reserved_attr = False
for reserved_attr in reserved_attrs:
if not parameter_name.startswith(reserved_attr):
continue
is_reserved_attr = True
break
if is_reserved_attr:
if arg_object not in self._object_dict:
assert False, f'in _convert_parameter(...) {arg_object} doesnt exist'
return self._object_dict[arg_object]
return kwargs[parameter_name]
return None
def _process_result(self, result, object_handle=None):
# convert the result object
if isinstance(result, (Dynamic, Kinematic, Condition, Force)):
# the object is already stored
for k, v in self._object_dict.items():
if v == result:
return k
# add the new object
return self._add_object(result, object_handle)
if isinstance(result, (tuple, list)):
# shallow copy to not override the original list
result = result.copy()
for index in range(len(result)):
result[index] = self._process_result(result[index])
return result
def _set_context(self, time : float, frame_dt : float, num_substep : int, num_frames : int):
self._context = system.SolverContext(time, frame_dt, num_substep, num_frames)
def _get_context(self):
return self._context
def _get_dynamics(self):
return self._scene.dynamics
def _get_conditions(self):
return self._scene.conditions
def _get_kinematics(self):
return self._scene.kinematics
def _get_metadata(self, obj):
if obj:
return obj.metadata()
return None
def _get_commands(self):
return list(self._commands.keys())
def _reset(self):
self._scene = system.Scene()
system_types = [Node, Area, Bending, Spring, AnchorSpring]
system_types += [Point, Edge, Triangle]
group_types = {'dynamics' : [Node],
'constraints' : [Area, Bending, Spring, AnchorSpring],
'geometries': [Point, Edge, Triangle],
'bundle': system_types}
self._details = Details(system_types, group_types)
| 2.109375 | 2 |
database/models.py | Alweezy/ride-my-way-python | 0 | 12785948 | <filename>database/models.py
from datetime import datetime, timedelta
import jwt
from flask_bcrypt import Bcrypt
from flask import current_app
from api.app import db
class User(db.Model):
"""Creates a user model
"""
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), nullable=False, unique=True)
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False)
questions = db.relationship('Question', order_by="Question.id",
cascade="all,delete-orphan")
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = Bcrypt().generate_password_hash(password).decode()
def verify_password(self, password):
"""Compares stored password to password at login
"""
return Bcrypt().check_password_hash(self.password, password)
@staticmethod
def create_token(user_id):
"""Creates the access token
"""
try:
payload = {
'exp': datetime.utcnow() + timedelta(hours=1),
'iat': datetime.utcnow(),
'sub': user_id
}
jwt_string = jwt.encode(
payload,
current_app.config.get("SECRET_KEY"),
algorithm='HS256'
)
return jwt_string
except Exception as e:
return str(e)
@staticmethod
def decode_token(token):
"""Decodes the token passed in the header
"""
try:
key = current_app.config.get("SECRET_KEY")
payload = jwt.decode(token, key)
return payload["sub"]
except jwt.ExpiredSignatureError:
return "Token has expired, Login to generate a new one."
except jwt.InvalidTokenError:
return "Token is invalid, Sign up or Login"
def save(self):
"""Save a user into the database
"""
db.session.add(self)
db.session.commit()
class Question(db.Model):
"""Creates a model for the Question
"""
__tablename__ = "questions"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(), onupdate=db.func.current_timestamp())
asked_by = db.Column(db.Integer, db.ForeignKey(User.id))
answers = db.relationship('Answer', order_by="Answer.id", cascade="all,delete-orphan")
def __init__(self, title, asked_by):
self.title = title
self.asked_by = asked_by
def save(self):
"""Save a question to the database
"""
db.session.add(self)
db.session.commit()
class Answer(db.Model):
__tablename__ = "answers"
id = db.Column(db.Integer, primary_key=True)
answer_body = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(), onupdate=db.func.current_timestamp())
question_id = db.Column(db.Integer, db.ForeignKey(Question.id))
def __init__(self, answer_body, question_id):
self.answer_body = answer_body
self.question_id = question_id
def save(self):
"""Add an answer to a question
"""
db.session(self)
db.session.commit()
| 2.9375 | 3 |
actions/utils.py | anubhav231989/bookmarks | 0 | 12785949 | <gh_stars>0
from .models import Action
from django.utils import timezone
from datetime import timedelta
from django.contrib.contenttypes.models import ContentType
def register_action(user, verb, target=None):
last_hour_ago = timezone.now() - timedelta(hours=1)
similar_actions = Action.objects.filter(user=user, verb=verb, created__gte=last_hour_ago)
if target:
target_ct = ContentType.objects.get_for_model(target)
similar_actions = similar_actions.filter(target_id=target.id, target_content_type=target_ct)
if similar_actions.count() == 0:
Action.objects.create(user=user, verb=verb, target=target)
return True
return False | 2.359375 | 2 |
python/jobbole/jobbole/pipelines.py | LeonMioc/CodeUnres | 0 | 12785950 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import *
from scrapy.conf import settings
class MongoDBPipeline(object):
_mongo_conn = dict()
_mongod = ''
def __init__(self):
self._mongo_conn['server'] = settings['MONGODB_SERVER']
self._mongo_conn['port'] = settings['MONGODB_PORT']
self._mongo_conn['db'] = settings['MONGODB_DB']
self._mongo_conn['coll'] = settings['MONGODB_COLLECTION']
self._mongod = self.connection()
#连接数据库
def connection(self):
connection = MongoClient(
self._mongo_conn['server'],
self._mongo_conn['port'],
)
db = connection[self._mongo_conn['db']]
coll = db[self._mongo_conn['coll']]
return coll
def process_item(self, item, spider):
if item['content']:
self._mongod.insert(dict(item))
return item
| 2.359375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.